Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_TLBFLUSH_H |
2 | #define _ASM_X86_TLBFLUSH_H | |
d291cf83 TG |
3 | |
4 | #include <linux/mm.h> | |
5 | #include <linux/sched.h> | |
6 | ||
7 | #include <asm/processor.h> | |
cd4d09ec | 8 | #include <asm/cpufeature.h> |
f05e798a | 9 | #include <asm/special_insns.h> |
ce4a4e56 | 10 | #include <asm/smp.h> |
d291cf83 | 11 | |
060a402a AL |
12 | static inline void __invpcid(unsigned long pcid, unsigned long addr, |
13 | unsigned long type) | |
14 | { | |
e2c7698c | 15 | struct { u64 d[2]; } desc = { { pcid, addr } }; |
060a402a AL |
16 | |
17 | /* | |
18 | * The memory clobber is because the whole point is to invalidate | |
19 | * stale TLB entries and, especially if we're flushing global | |
20 | * mappings, we don't want the compiler to reorder any subsequent | |
21 | * memory accesses before the TLB flush. | |
22 | * | |
23 | * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and | |
24 | * invpcid (%rcx), %rax in long mode. | |
25 | */ | |
26 | asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01" | |
e2c7698c | 27 | : : "m" (desc), "a" (type), "c" (&desc) : "memory"); |
060a402a AL |
28 | } |
29 | ||
30 | #define INVPCID_TYPE_INDIV_ADDR 0 | |
31 | #define INVPCID_TYPE_SINGLE_CTXT 1 | |
32 | #define INVPCID_TYPE_ALL_INCL_GLOBAL 2 | |
33 | #define INVPCID_TYPE_ALL_NON_GLOBAL 3 | |
34 | ||
35 | /* Flush all mappings for a given pcid and addr, not including globals. */ | |
36 | static inline void invpcid_flush_one(unsigned long pcid, | |
37 | unsigned long addr) | |
38 | { | |
39 | __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR); | |
40 | } | |
41 | ||
42 | /* Flush all mappings for a given PCID, not including globals. */ | |
43 | static inline void invpcid_flush_single_context(unsigned long pcid) | |
44 | { | |
45 | __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT); | |
46 | } | |
47 | ||
48 | /* Flush all mappings, including globals, for all PCIDs. */ | |
49 | static inline void invpcid_flush_all(void) | |
50 | { | |
51 | __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL); | |
52 | } | |
53 | ||
54 | /* Flush all mappings for all PCIDs except globals. */ | |
55 | static inline void invpcid_flush_all_nonglobals(void) | |
56 | { | |
57 | __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL); | |
58 | } | |
59 | ||
f39681ed AL |
60 | static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) |
61 | { | |
62 | u64 new_tlb_gen; | |
63 | ||
64 | /* | |
65 | * Bump the generation count. This also serves as a full barrier | |
66 | * that synchronizes with switch_mm(): callers are required to order | |
67 | * their read of mm_cpumask after their writes to the paging | |
68 | * structures. | |
69 | */ | |
70 | smp_mb__before_atomic(); | |
71 | new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen); | |
72 | smp_mb__after_atomic(); | |
73 | ||
74 | return new_tlb_gen; | |
75 | } | |
76 | ||
d291cf83 TG |
77 | #ifdef CONFIG_PARAVIRT |
78 | #include <asm/paravirt.h> | |
79 | #else | |
80 | #define __flush_tlb() __native_flush_tlb() | |
81 | #define __flush_tlb_global() __native_flush_tlb_global() | |
82 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | |
83 | #endif | |
84 | ||
1e02ce4c | 85 | struct tlb_state { |
3d28ebce AL |
86 | /* |
87 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts | |
88 | * are on. This means that it may not match current->active_mm, | |
89 | * which will contain the previous user mm when we're in lazy TLB | |
90 | * mode even if we've already switched back to swapper_pg_dir. | |
91 | */ | |
92 | struct mm_struct *loaded_mm; | |
1e02ce4c | 93 | int state; |
1e02ce4c AL |
94 | |
95 | /* | |
96 | * Access to this CR4 shadow and to H/W CR4 is protected by | |
97 | * disabling interrupts when modifying either one. | |
98 | */ | |
99 | unsigned long cr4; | |
100 | }; | |
101 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | |
102 | ||
103 | /* Initialize cr4 shadow for this CPU. */ | |
104 | static inline void cr4_init_shadow(void) | |
105 | { | |
1ef55be1 | 106 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
1e02ce4c AL |
107 | } |
108 | ||
375074cc AL |
109 | /* Set in this cpu's CR4. */ |
110 | static inline void cr4_set_bits(unsigned long mask) | |
111 | { | |
112 | unsigned long cr4; | |
113 | ||
1e02ce4c AL |
114 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
115 | if ((cr4 | mask) != cr4) { | |
116 | cr4 |= mask; | |
117 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
118 | __write_cr4(cr4); | |
119 | } | |
375074cc AL |
120 | } |
121 | ||
122 | /* Clear in this cpu's CR4. */ | |
123 | static inline void cr4_clear_bits(unsigned long mask) | |
124 | { | |
125 | unsigned long cr4; | |
126 | ||
1e02ce4c AL |
127 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
128 | if ((cr4 & ~mask) != cr4) { | |
129 | cr4 &= ~mask; | |
130 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
131 | __write_cr4(cr4); | |
132 | } | |
133 | } | |
134 | ||
5a920155 TG |
135 | static inline void cr4_toggle_bits(unsigned long mask) |
136 | { | |
137 | unsigned long cr4; | |
138 | ||
139 | cr4 = this_cpu_read(cpu_tlbstate.cr4); | |
140 | cr4 ^= mask; | |
141 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
142 | __write_cr4(cr4); | |
143 | } | |
144 | ||
1e02ce4c AL |
145 | /* Read the CR4 shadow. */ |
146 | static inline unsigned long cr4_read_shadow(void) | |
147 | { | |
148 | return this_cpu_read(cpu_tlbstate.cr4); | |
375074cc AL |
149 | } |
150 | ||
151 | /* | |
152 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB | |
153 | * enable and PPro Global page enable), so that any CPU's that boot | |
154 | * up after us can get the correct flags. This should only be used | |
155 | * during boot on the boot cpu. | |
156 | */ | |
157 | extern unsigned long mmu_cr4_features; | |
158 | extern u32 *trampoline_cr4_features; | |
159 | ||
160 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) | |
161 | { | |
162 | mmu_cr4_features |= mask; | |
163 | if (trampoline_cr4_features) | |
164 | *trampoline_cr4_features = mmu_cr4_features; | |
165 | cr4_set_bits(mask); | |
166 | } | |
167 | ||
d291cf83 TG |
168 | static inline void __native_flush_tlb(void) |
169 | { | |
5cf0791d SAS |
170 | /* |
171 | * If current->mm == NULL then we borrow a mm which may change during a | |
172 | * task switch and therefore we must not be preempted while we write CR3 | |
173 | * back: | |
174 | */ | |
175 | preempt_disable(); | |
6c690ee1 | 176 | native_write_cr3(__native_read_cr3()); |
5cf0791d | 177 | preempt_enable(); |
d291cf83 TG |
178 | } |
179 | ||
086fc8f8 FY |
180 | static inline void __native_flush_tlb_global_irq_disabled(void) |
181 | { | |
182 | unsigned long cr4; | |
183 | ||
1e02ce4c | 184 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
086fc8f8 FY |
185 | /* clear PGE */ |
186 | native_write_cr4(cr4 & ~X86_CR4_PGE); | |
187 | /* write old PGE again and flush TLBs */ | |
188 | native_write_cr4(cr4); | |
189 | } | |
190 | ||
d291cf83 TG |
191 | static inline void __native_flush_tlb_global(void) |
192 | { | |
b1979a5f | 193 | unsigned long flags; |
d291cf83 | 194 | |
d8bced79 AL |
195 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
196 | /* | |
197 | * Using INVPCID is considerably faster than a pair of writes | |
198 | * to CR4 sandwiched inside an IRQ flag save/restore. | |
199 | */ | |
200 | invpcid_flush_all(); | |
201 | return; | |
202 | } | |
203 | ||
b1979a5f IM |
204 | /* |
205 | * Read-modify-write to CR4 - protect it from preemption and | |
206 | * from interrupts. (Use the raw variant because this code can | |
207 | * be called from deep inside debugging code.) | |
208 | */ | |
209 | raw_local_irq_save(flags); | |
210 | ||
086fc8f8 | 211 | __native_flush_tlb_global_irq_disabled(); |
b1979a5f IM |
212 | |
213 | raw_local_irq_restore(flags); | |
d291cf83 TG |
214 | } |
215 | ||
216 | static inline void __native_flush_tlb_single(unsigned long addr) | |
217 | { | |
94cf8de0 | 218 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
d291cf83 TG |
219 | } |
220 | ||
221 | static inline void __flush_tlb_all(void) | |
222 | { | |
2c4ea6e2 | 223 | if (boot_cpu_has(X86_FEATURE_PGE)) |
d291cf83 TG |
224 | __flush_tlb_global(); |
225 | else | |
226 | __flush_tlb(); | |
227 | } | |
228 | ||
229 | static inline void __flush_tlb_one(unsigned long addr) | |
230 | { | |
ec659934 | 231 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
e8747f10 | 232 | __flush_tlb_single(addr); |
d291cf83 TG |
233 | } |
234 | ||
3e7f3db0 | 235 | #define TLB_FLUSH_ALL -1UL |
d291cf83 TG |
236 | |
237 | /* | |
238 | * TLB flushing: | |
239 | * | |
d291cf83 TG |
240 | * - flush_tlb_all() flushes all processes TLBs |
241 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
242 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
243 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
244 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
a2055abe | 245 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus |
d291cf83 TG |
246 | * |
247 | * ..but the i386 has somewhat limited tlb flushing capabilities, | |
248 | * and page-granular flushes are available only on i486 and up. | |
d291cf83 | 249 | */ |
a2055abe AL |
250 | struct flush_tlb_info { |
251 | struct mm_struct *mm; | |
252 | unsigned long start; | |
253 | unsigned long end; | |
254 | }; | |
255 | ||
d291cf83 TG |
256 | #define local_flush_tlb() __flush_tlb() |
257 | ||
611ae8e3 AS |
258 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
259 | ||
260 | #define flush_tlb_range(vma, start, end) \ | |
261 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) | |
262 | ||
d291cf83 | 263 | extern void flush_tlb_all(void); |
611ae8e3 AS |
264 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
265 | unsigned long end, unsigned long vmflag); | |
effee4b9 | 266 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
d291cf83 | 267 | |
ca6c99c0 AL |
268 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
269 | { | |
270 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); | |
271 | } | |
272 | ||
4595f962 | 273 | void native_flush_tlb_others(const struct cpumask *cpumask, |
a2055abe | 274 | const struct flush_tlb_info *info); |
d291cf83 TG |
275 | |
276 | #define TLBSTATE_OK 1 | |
277 | #define TLBSTATE_LAZY 2 | |
278 | ||
e73ad5ff AL |
279 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
280 | struct mm_struct *mm) | |
281 | { | |
f39681ed | 282 | inc_mm_tlb_gen(mm); |
e73ad5ff AL |
283 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
284 | } | |
285 | ||
286 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); | |
287 | ||
d291cf83 | 288 | #ifndef CONFIG_PARAVIRT |
a2055abe AL |
289 | #define flush_tlb_others(mask, info) \ |
290 | native_flush_tlb_others(mask, info) | |
96a388de | 291 | #endif |
d291cf83 | 292 | |
1965aae3 | 293 | #endif /* _ASM_X86_TLBFLUSH_H */ |