xen/privcmd: Remove unneeded asm/tlb.h include
[linux-2.6-block.git] / arch / x86 / include / asm / tlbflush.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_TLBFLUSH_H
3#define _ASM_X86_TLBFLUSH_H
d291cf83
TG
4
5#include <linux/mm.h>
6#include <linux/sched.h>
7
8#include <asm/processor.h>
cd4d09ec 9#include <asm/cpufeature.h>
f05e798a 10#include <asm/special_insns.h>
ce4a4e56 11#include <asm/smp.h>
1a3b0cae 12#include <asm/invpcid.h>
6fd166aa
PZ
13#include <asm/pti.h>
14#include <asm/processor-flags.h>
d291cf83 15
29def599
TG
16struct flush_tlb_info;
17
4b04e6c2 18void __flush_tlb_all(void);
2faf153b 19void flush_tlb_local(void);
127ac915 20void flush_tlb_one_user(unsigned long addr);
58430c5d 21void flush_tlb_one_kernel(unsigned long addr);
29def599
TG
22void flush_tlb_others(const struct cpumask *cpumask,
23 const struct flush_tlb_info *info);
2faf153b 24
d291cf83
TG
25#ifdef CONFIG_PARAVIRT
26#include <asm/paravirt.h>
d291cf83
TG
27#endif
28
6c9b7d79
TG
29/*
30 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
31 * lines.
32 */
33#define TLB_NR_DYN_ASIDS 6
34
b0579ade
AL
35struct tlb_context {
36 u64 ctx_id;
37 u64 tlb_gen;
38};
39
1e02ce4c 40struct tlb_state {
3d28ebce
AL
41 /*
42 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
43 * are on. This means that it may not match current->active_mm,
44 * which will contain the previous user mm when we're in lazy TLB
45 * mode even if we've already switched back to swapper_pg_dir.
4012e77a
AL
46 *
47 * During switch_mm_irqs_off(), loaded_mm will be set to
48 * LOADED_MM_SWITCHING during the brief interrupts-off window
49 * when CR3 and loaded_mm would otherwise be inconsistent. This
50 * is for nmi_uaccess_okay()'s benefit.
3d28ebce
AL
51 */
52 struct mm_struct *loaded_mm;
4012e77a 53
a72a1932 54#define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
4012e77a 55
4c71a2b6
TG
56 /* Last user mm for optimizing IBPB */
57 union {
58 struct mm_struct *last_user_mm;
59 unsigned long last_user_mm_ibpb;
60 };
61
10af6235
AL
62 u16 loaded_mm_asid;
63 u16 next_asid;
1e02ce4c 64
b956575b
AL
65 /*
66 * We can be in one of several states:
67 *
68 * - Actively using an mm. Our CPU's bit will be set in
69 * mm_cpumask(loaded_mm) and is_lazy == false;
70 *
71 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
72 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
73 *
74 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
75 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
76 * We're heuristically guessing that the CR3 load we
77 * skipped more than makes up for the overhead added by
78 * lazy mode.
79 */
80 bool is_lazy;
81
2ea907c4
DH
82 /*
83 * If set we changed the page tables in such a way that we
84 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
85 * This tells us to go invalidate all the non-loaded ctxs[]
86 * on the next context switch.
87 *
88 * The current ctx was kept up-to-date as it ran and does not
89 * need to be invalidated.
90 */
91 bool invalidate_other;
92
6fd166aa
PZ
93 /*
94 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
95 * the corresponding user PCID needs a flush next time we
96 * switch to it; see SWITCH_TO_USER_CR3.
97 */
98 unsigned short user_pcid_flush_mask;
99
1e02ce4c
AL
100 /*
101 * Access to this CR4 shadow and to H/W CR4 is protected by
102 * disabling interrupts when modifying either one.
103 */
104 unsigned long cr4;
b0579ade
AL
105
106 /*
107 * This is a list of all contexts that might exist in the TLB.
10af6235
AL
108 * There is one per ASID that we use, and the ASID (what the
109 * CPU calls PCID) is the index into ctxts.
b0579ade
AL
110 *
111 * For each context, ctx_id indicates which mm the TLB's user
112 * entries came from. As an invariant, the TLB will never
113 * contain entries that are out-of-date as when that mm reached
114 * the tlb_gen in the list.
115 *
116 * To be clear, this means that it's legal for the TLB code to
117 * flush the TLB without updating tlb_gen. This can happen
118 * (for now, at least) due to paravirt remote flushes.
10af6235
AL
119 *
120 * NB: context 0 is a bit special, since it's also used by
121 * various bits of init code. This is fine -- code that
122 * isn't aware of PCID will end up harmlessly flushing
123 * context 0.
b0579ade 124 */
10af6235 125 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
1e02ce4c
AL
126};
127DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
128
af5c40c6 129bool nmi_uaccess_okay(void);
5932c9fd
NA
130#define nmi_uaccess_okay nmi_uaccess_okay
131
d8f0b353
TG
132void cr4_update_irqsoff(unsigned long set, unsigned long clear);
133unsigned long cr4_read_shadow(void);
134
1e02ce4c
AL
135/* Initialize cr4 shadow for this CPU. */
136static inline void cr4_init_shadow(void)
137{
1ef55be1 138 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
1e02ce4c
AL
139}
140
375074cc 141/* Set in this cpu's CR4. */
21e450d2 142static inline void cr4_set_bits_irqsoff(unsigned long mask)
375074cc 143{
d8f0b353 144 cr4_update_irqsoff(mask, 0);
375074cc
AL
145}
146
147/* Clear in this cpu's CR4. */
21e450d2 148static inline void cr4_clear_bits_irqsoff(unsigned long mask)
375074cc 149{
d8f0b353 150 cr4_update_irqsoff(0, mask);
21e450d2
JK
151}
152
153/* Set in this cpu's CR4. */
154static inline void cr4_set_bits(unsigned long mask)
155{
156 unsigned long flags;
157
158 local_irq_save(flags);
159 cr4_set_bits_irqsoff(mask);
160 local_irq_restore(flags);
161}
162
163/* Clear in this cpu's CR4. */
164static inline void cr4_clear_bits(unsigned long mask)
165{
166 unsigned long flags;
167
168 local_irq_save(flags);
169 cr4_clear_bits_irqsoff(mask);
9d0b6232 170 local_irq_restore(flags);
1e02ce4c
AL
171}
172
375074cc
AL
173extern unsigned long mmu_cr4_features;
174extern u32 *trampoline_cr4_features;
175
72c0098d
AL
176extern void initialize_tlbstate_and_flush(void);
177
3e7f3db0 178#define TLB_FLUSH_ALL -1UL
d291cf83
TG
179
180/*
181 * TLB flushing:
182 *
d291cf83
TG
183 * - flush_tlb_all() flushes all processes TLBs
184 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
185 * - flush_tlb_page(vma, vmaddr) flushes one page
186 * - flush_tlb_range(vma, start, end) flushes a range of pages
187 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
a2055abe 188 * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
d291cf83
TG
189 *
190 * ..but the i386 has somewhat limited tlb flushing capabilities,
191 * and page-granular flushes are available only on i486 and up.
d291cf83 192 */
a2055abe 193struct flush_tlb_info {
b0579ade
AL
194 /*
195 * We support several kinds of flushes.
196 *
197 * - Fully flush a single mm. .mm will be set, .end will be
198 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
199 * which the IPI sender is trying to catch us up.
200 *
201 * - Partially flush a single mm. .mm will be set, .start and
202 * .end will indicate the range, and .new_tlb_gen will be set
203 * such that the changes between generation .new_tlb_gen-1 and
204 * .new_tlb_gen are entirely contained in the indicated range.
205 *
206 * - Fully flush all mms whose tlb_gens have been updated. .mm
207 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
208 * will be zero.
209 */
210 struct mm_struct *mm;
211 unsigned long start;
212 unsigned long end;
213 u64 new_tlb_gen;
a31acd3e 214 unsigned int stride_shift;
97807813 215 bool freed_tables;
a2055abe
AL
216};
217
016c4d92
RR
218#define flush_tlb_mm(mm) \
219 flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
611ae8e3 220
a31acd3e
PZ
221#define flush_tlb_range(vma, start, end) \
222 flush_tlb_mm_range((vma)->vm_mm, start, end, \
223 ((vma)->vm_flags & VM_HUGETLB) \
224 ? huge_page_shift(hstate_vma(vma)) \
016c4d92 225 : PAGE_SHIFT, false)
611ae8e3 226
d291cf83 227extern void flush_tlb_all(void);
611ae8e3 228extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
016c4d92
RR
229 unsigned long end, unsigned int stride_shift,
230 bool freed_tables);
effee4b9 231extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
d291cf83 232
ca6c99c0
AL
233static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
234{
016c4d92 235 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
ca6c99c0
AL
236}
237
0a126abd
PZ
238static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
239{
240 /*
241 * Bump the generation count. This also serves as a full barrier
242 * that synchronizes with switch_mm(): callers are required to order
243 * their read of mm_cpumask after their writes to the paging
244 * structures.
245 */
246 return atomic64_inc_return(&mm->context.tlb_gen);
247}
248
e73ad5ff
AL
249static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
250 struct mm_struct *mm)
251{
f39681ed 252 inc_mm_tlb_gen(mm);
e73ad5ff
AL
253 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
254}
255
256extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
257
1965aae3 258#endif /* _ASM_X86_TLBFLUSH_H */