1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_TLBFLUSH_H
3 #define _ASM_X86_TLBFLUSH_H
5 #include <linux/mm_types.h>
6 #include <linux/sched.h>
8 #include <asm/processor.h>
9 #include <asm/cpufeature.h>
10 #include <asm/special_insns.h>
12 #include <asm/invpcid.h>
14 #include <asm/processor-flags.h>
15 #include <asm/pgtable.h>
17 void __flush_tlb_all(void);
19 #define TLB_FLUSH_ALL -1UL
20 #define TLB_GENERATION_INVALID 0
22 void cr4_update_irqsoff(unsigned long set, unsigned long clear);
23 unsigned long cr4_read_shadow(void);
25 /* Set in this cpu's CR4. */
26 static inline void cr4_set_bits_irqsoff(unsigned long mask)
28 cr4_update_irqsoff(mask, 0);
31 /* Clear in this cpu's CR4. */
32 static inline void cr4_clear_bits_irqsoff(unsigned long mask)
34 cr4_update_irqsoff(0, mask);
37 /* Set in this cpu's CR4. */
38 static inline void cr4_set_bits(unsigned long mask)
42 local_irq_save(flags);
43 cr4_set_bits_irqsoff(mask);
44 local_irq_restore(flags);
47 /* Clear in this cpu's CR4. */
48 static inline void cr4_clear_bits(unsigned long mask)
52 local_irq_save(flags);
53 cr4_clear_bits_irqsoff(mask);
54 local_irq_restore(flags);
57 #ifdef CONFIG_ADDRESS_MASKING
58 DECLARE_PER_CPU(u64, tlbstate_untag_mask);
60 static inline u64 current_untag_mask(void)
62 return this_cpu_read(tlbstate_untag_mask);
68 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
71 #define TLB_NR_DYN_ASIDS 6
80 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
81 * are on. This means that it may not match current->active_mm,
82 * which will contain the previous user mm when we're in lazy TLB
83 * mode even if we've already switched back to swapper_pg_dir.
85 * During switch_mm_irqs_off(), loaded_mm will be set to
86 * LOADED_MM_SWITCHING during the brief interrupts-off window
87 * when CR3 and loaded_mm would otherwise be inconsistent. This
88 * is for nmi_uaccess_okay()'s benefit.
90 struct mm_struct *loaded_mm;
92 #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
94 /* Last user mm for optimizing IBPB */
96 struct mm_struct *last_user_mm;
97 unsigned long last_user_mm_spec;
104 * If set we changed the page tables in such a way that we
105 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
106 * This tells us to go invalidate all the non-loaded ctxs[]
107 * on the next context switch.
109 * The current ctx was kept up-to-date as it ran and does not
110 * need to be invalidated.
112 bool invalidate_other;
114 #ifdef CONFIG_ADDRESS_MASKING
118 * X86_CR3_LAM_U57/U48 shifted right by X86_CR3_LAM_U57_BIT or 0 if LAM
125 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
126 * the corresponding user PCID needs a flush next time we
127 * switch to it; see SWITCH_TO_USER_CR3.
129 unsigned short user_pcid_flush_mask;
132 * Access to this CR4 shadow and to H/W CR4 is protected by
133 * disabling interrupts when modifying either one.
138 * This is a list of all contexts that might exist in the TLB.
139 * There is one per ASID that we use, and the ASID (what the
140 * CPU calls PCID) is the index into ctxts.
142 * For each context, ctx_id indicates which mm the TLB's user
143 * entries came from. As an invariant, the TLB will never
144 * contain entries that are out-of-date as when that mm reached
145 * the tlb_gen in the list.
147 * To be clear, this means that it's legal for the TLB code to
148 * flush the TLB without updating tlb_gen. This can happen
149 * (for now, at least) due to paravirt remote flushes.
151 * NB: context 0 is a bit special, since it's also used by
152 * various bits of init code. This is fine -- code that
153 * isn't aware of PCID will end up harmlessly flushing
156 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
158 DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
160 struct tlb_state_shared {
162 * We can be in one of several states:
164 * - Actively using an mm. Our CPU's bit will be set in
165 * mm_cpumask(loaded_mm) and is_lazy == false;
167 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
168 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
170 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
171 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
172 * We're heuristically guessing that the CR3 load we
173 * skipped more than makes up for the overhead added by
178 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
180 bool nmi_uaccess_okay(void);
181 #define nmi_uaccess_okay nmi_uaccess_okay
183 /* Initialize cr4 shadow for this CPU. */
184 static inline void cr4_init_shadow(void)
186 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
189 extern unsigned long mmu_cr4_features;
190 extern u32 *trampoline_cr4_features;
192 extern void initialize_tlbstate_and_flush(void);
197 * - flush_tlb_all() flushes all processes TLBs
198 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
199 * - flush_tlb_page(vma, vmaddr) flushes one page
200 * - flush_tlb_range(vma, start, end) flushes a range of pages
201 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
202 * - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus
204 * ..but the i386 has somewhat limited tlb flushing capabilities,
205 * and page-granular flushes are available only on i486 and up.
207 struct flush_tlb_info {
209 * We support several kinds of flushes.
211 * - Fully flush a single mm. .mm will be set, .end will be
212 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
213 * which the IPI sender is trying to catch us up.
215 * - Partially flush a single mm. .mm will be set, .start and
216 * .end will indicate the range, and .new_tlb_gen will be set
217 * such that the changes between generation .new_tlb_gen-1 and
218 * .new_tlb_gen are entirely contained in the indicated range.
220 * - Fully flush all mms whose tlb_gens have been updated. .mm
221 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
224 struct mm_struct *mm;
228 unsigned int initiating_cpu;
233 void flush_tlb_local(void);
234 void flush_tlb_one_user(unsigned long addr);
235 void flush_tlb_one_kernel(unsigned long addr);
236 void flush_tlb_multi(const struct cpumask *cpumask,
237 const struct flush_tlb_info *info);
239 #ifdef CONFIG_PARAVIRT
240 #include <asm/paravirt.h>
243 #define flush_tlb_mm(mm) \
244 flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
246 #define flush_tlb_range(vma, start, end) \
247 flush_tlb_mm_range((vma)->vm_mm, start, end, \
248 ((vma)->vm_flags & VM_HUGETLB) \
249 ? huge_page_shift(hstate_vma(vma)) \
252 extern void flush_tlb_all(void);
253 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
254 unsigned long end, unsigned int stride_shift,
256 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
258 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
260 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
263 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
266 * Bump the generation count. This also serves as a full barrier
267 * that synchronizes with switch_mm(): callers are required to order
268 * their read of mm_cpumask after their writes to the paging
271 return atomic64_inc_return(&mm->context.tlb_gen);
274 static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
275 struct mm_struct *mm)
278 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
281 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
283 static inline bool pte_flags_need_flush(unsigned long oldflags,
284 unsigned long newflags,
288 * Flags that require a flush when cleared but not when they are set.
289 * Only include flags that would not trigger spurious page-faults.
290 * Non-present entries are not cached. Hardware would set the
291 * dirty/access bit if needed without a fault.
293 const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT |
295 const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 |
296 _PAGE_SOFTW3 | _PAGE_SOFTW4;
297 const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT |
298 _PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT |
299 _PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 |
300 _PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX;
301 unsigned long diff = oldflags ^ newflags;
303 BUILD_BUG_ON(flush_on_clear & software_flags);
304 BUILD_BUG_ON(flush_on_clear & flush_on_change);
305 BUILD_BUG_ON(flush_on_change & software_flags);
307 /* Ignore software flags */
308 diff &= ~software_flags;
311 diff &= ~_PAGE_ACCESSED;
314 * Did any of the 'flush_on_clear' flags was clleared set from between
315 * 'oldflags' and 'newflags'?
317 if (diff & oldflags & flush_on_clear)
320 /* Flush on modified flags. */
321 if (diff & flush_on_change)
324 /* Ensure there are no flags that were left behind */
325 if (IS_ENABLED(CONFIG_DEBUG_VM) &&
326 (diff & ~(flush_on_clear | software_flags | flush_on_change))) {
335 * pte_needs_flush() checks whether permissions were demoted and require a
336 * flush. It should only be used for userspace PTEs.
338 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
340 /* !PRESENT -> * ; no need for flush */
341 if (!(pte_flags(oldpte) & _PAGE_PRESENT))
344 /* PFN changed ; needs flush */
345 if (pte_pfn(oldpte) != pte_pfn(newpte))
349 * check PTE flags; ignore access-bit; see comment in
350 * ptep_clear_flush_young().
352 return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte),
355 #define pte_needs_flush pte_needs_flush
358 * huge_pmd_needs_flush() checks whether permissions were demoted and require a
359 * flush. It should only be used for userspace huge PMDs.
361 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
363 /* !PRESENT -> * ; no need for flush */
364 if (!(pmd_flags(oldpmd) & _PAGE_PRESENT))
367 /* PFN changed ; needs flush */
368 if (pmd_pfn(oldpmd) != pmd_pfn(newpmd))
372 * check PMD flags; do not ignore access-bit; see
373 * pmdp_clear_flush_young().
375 return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd),
378 #define huge_pmd_needs_flush huge_pmd_needs_flush
380 #ifdef CONFIG_ADDRESS_MASKING
381 static inline u64 tlbstate_lam_cr3_mask(void)
383 u64 lam = this_cpu_read(cpu_tlbstate.lam);
385 return lam << X86_CR3_LAM_U57_BIT;
388 static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
390 this_cpu_write(cpu_tlbstate.lam,
391 mm->context.lam_cr3_mask >> X86_CR3_LAM_U57_BIT);
392 this_cpu_write(tlbstate_untag_mask, mm->context.untag_mask);
397 static inline u64 tlbstate_lam_cr3_mask(void)
402 static inline void set_tlbstate_lam_mode(struct mm_struct *mm)
408 static inline void __native_tlb_flush_global(unsigned long cr4)
410 native_write_cr4(cr4 ^ X86_CR4_PGE);
411 native_write_cr4(cr4);
413 #endif /* _ASM_X86_TLBFLUSH_H */