Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1965aae3 PA |
2 | #ifndef _ASM_X86_TLBFLUSH_H |
3 | #define _ASM_X86_TLBFLUSH_H | |
d291cf83 TG |
4 | |
5 | #include <linux/mm.h> | |
6 | #include <linux/sched.h> | |
7 | ||
8 | #include <asm/processor.h> | |
cd4d09ec | 9 | #include <asm/cpufeature.h> |
f05e798a | 10 | #include <asm/special_insns.h> |
ce4a4e56 | 11 | #include <asm/smp.h> |
1a3b0cae | 12 | #include <asm/invpcid.h> |
6fd166aa PZ |
13 | #include <asm/pti.h> |
14 | #include <asm/processor-flags.h> | |
060a402a | 15 | |
0a126abd PZ |
16 | /* |
17 | * The x86 feature is called PCID (Process Context IDentifier). It is similar | |
18 | * to what is traditionally called ASID on the RISC processors. | |
19 | * | |
20 | * We don't use the traditional ASID implementation, where each process/mm gets | |
21 | * its own ASID and flush/restart when we run out of ASID space. | |
22 | * | |
23 | * Instead we have a small per-cpu array of ASIDs and cache the last few mm's | |
24 | * that came by on this CPU, allowing cheaper switch_mm between processes on | |
25 | * this CPU. | |
26 | * | |
27 | * We end up with different spaces for different things. To avoid confusion we | |
28 | * use different names for each of them: | |
29 | * | |
30 | * ASID - [0, TLB_NR_DYN_ASIDS-1] | |
31 | * the canonical identifier for an mm | |
32 | * | |
33 | * kPCID - [1, TLB_NR_DYN_ASIDS] | |
34 | * the value we write into the PCID part of CR3; corresponds to the | |
35 | * ASID+1, because PCID 0 is special. | |
36 | * | |
37 | * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] | |
38 | * for KPTI each mm has two address spaces and thus needs two | |
39 | * PCID values, but we can still do with a single ASID denomination | |
40 | * for each mm. Corresponds to kPCID + 2048. | |
41 | * | |
42 | */ | |
f39681ed | 43 | |
cb0a9144 DH |
44 | /* There are 12 bits of space for ASIDS in CR3 */ |
45 | #define CR3_HW_ASID_BITS 12 | |
6fd166aa | 46 | |
cb0a9144 DH |
47 | /* |
48 | * When enabled, PAGE_TABLE_ISOLATION consumes a single bit for | |
49 | * user/kernel switches | |
50 | */ | |
6fd166aa PZ |
51 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
52 | # define PTI_CONSUMED_PCID_BITS 1 | |
53 | #else | |
54 | # define PTI_CONSUMED_PCID_BITS 0 | |
55 | #endif | |
56 | ||
57 | #define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS) | |
cb0a9144 | 58 | |
cb0a9144 DH |
59 | /* |
60 | * ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account | |
0a126abd | 61 | * for them being zero-based. Another -1 is because PCID 0 is reserved for |
cb0a9144 DH |
62 | * use by non-PCID-aware users. |
63 | */ | |
6fd166aa PZ |
64 | #define MAX_ASID_AVAILABLE ((1 << CR3_AVAIL_PCID_BITS) - 2) |
65 | ||
66 | /* | |
67 | * 6 because 6 should be plenty and struct tlb_state will fit in two cache | |
68 | * lines. | |
69 | */ | |
70 | #define TLB_NR_DYN_ASIDS 6 | |
cb0a9144 | 71 | |
0a126abd PZ |
72 | /* |
73 | * Given @asid, compute kPCID | |
74 | */ | |
dd95f1a4 DH |
75 | static inline u16 kern_pcid(u16 asid) |
76 | { | |
77 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); | |
6fd166aa PZ |
78 | |
79 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
dd95f1a4 | 80 | /* |
6fd166aa PZ |
81 | * Make sure that the dynamic ASID space does not confict with the |
82 | * bit we are using to switch between user and kernel ASIDs. | |
83 | */ | |
84 | BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_SWITCH_BIT)); | |
85 | ||
86 | /* | |
87 | * The ASID being passed in here should have respected the | |
88 | * MAX_ASID_AVAILABLE and thus never have the switch bit set. | |
89 | */ | |
90 | VM_WARN_ON_ONCE(asid & (1 << X86_CR3_PTI_SWITCH_BIT)); | |
91 | #endif | |
92 | /* | |
93 | * The dynamically-assigned ASIDs that get passed in are small | |
94 | * (<TLB_NR_DYN_ASIDS). They never have the high switch bit set, | |
95 | * so do not bother to clear it. | |
96 | * | |
dd95f1a4 DH |
97 | * If PCID is on, ASID-aware code paths put the ASID+1 into the |
98 | * PCID bits. This serves two purposes. It prevents a nasty | |
99 | * situation in which PCID-unaware code saves CR3, loads some other | |
100 | * value (with PCID == 0), and then restores CR3, thus corrupting | |
101 | * the TLB for ASID 0 if the saved ASID was nonzero. It also means | |
102 | * that any bugs involving loading a PCID-enabled CR3 with | |
103 | * CR4.PCIDE off will trigger deterministically. | |
104 | */ | |
105 | return asid + 1; | |
106 | } | |
107 | ||
6cff64b8 | 108 | /* |
0a126abd | 109 | * Given @asid, compute uPCID |
6cff64b8 DH |
110 | */ |
111 | static inline u16 user_pcid(u16 asid) | |
112 | { | |
113 | u16 ret = kern_pcid(asid); | |
114 | #ifdef CONFIG_PAGE_TABLE_ISOLATION | |
115 | ret |= 1 << X86_CR3_PTI_SWITCH_BIT; | |
116 | #endif | |
117 | return ret; | |
118 | } | |
119 | ||
50fb83a6 DH |
120 | struct pgd_t; |
121 | static inline unsigned long build_cr3(pgd_t *pgd, u16 asid) | |
122 | { | |
123 | if (static_cpu_has(X86_FEATURE_PCID)) { | |
dd95f1a4 | 124 | return __sme_pa(pgd) | kern_pcid(asid); |
50fb83a6 DH |
125 | } else { |
126 | VM_WARN_ON_ONCE(asid != 0); | |
127 | return __sme_pa(pgd); | |
128 | } | |
129 | } | |
130 | ||
131 | static inline unsigned long build_cr3_noflush(pgd_t *pgd, u16 asid) | |
132 | { | |
cb0a9144 | 133 | VM_WARN_ON_ONCE(asid > MAX_ASID_AVAILABLE); |
dd95f1a4 DH |
134 | VM_WARN_ON_ONCE(!this_cpu_has(X86_FEATURE_PCID)); |
135 | return __sme_pa(pgd) | kern_pcid(asid) | CR3_NOFLUSH; | |
50fb83a6 DH |
136 | } |
137 | ||
d291cf83 TG |
138 | #ifdef CONFIG_PARAVIRT |
139 | #include <asm/paravirt.h> | |
140 | #else | |
141 | #define __flush_tlb() __native_flush_tlb() | |
142 | #define __flush_tlb_global() __native_flush_tlb_global() | |
143 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | |
144 | #endif | |
145 | ||
4e57b946 AL |
146 | static inline bool tlb_defer_switch_to_init_mm(void) |
147 | { | |
7ac7f2c3 AL |
148 | /* |
149 | * If we have PCID, then switching to init_mm is reasonably | |
150 | * fast. If we don't have PCID, then switching to init_mm is | |
151 | * quite slow, so we try to defer it in the hopes that we can | |
152 | * avoid it entirely. The latter approach runs the risk of | |
153 | * receiving otherwise unnecessary IPIs. | |
154 | * | |
155 | * This choice is just a heuristic. The tlb code can handle this | |
156 | * function returning true or false regardless of whether we have | |
157 | * PCID. | |
158 | */ | |
159 | return !static_cpu_has(X86_FEATURE_PCID); | |
4e57b946 | 160 | } |
b956575b | 161 | |
b0579ade AL |
162 | struct tlb_context { |
163 | u64 ctx_id; | |
164 | u64 tlb_gen; | |
165 | }; | |
166 | ||
1e02ce4c | 167 | struct tlb_state { |
3d28ebce AL |
168 | /* |
169 | * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts | |
170 | * are on. This means that it may not match current->active_mm, | |
171 | * which will contain the previous user mm when we're in lazy TLB | |
172 | * mode even if we've already switched back to swapper_pg_dir. | |
173 | */ | |
174 | struct mm_struct *loaded_mm; | |
10af6235 AL |
175 | u16 loaded_mm_asid; |
176 | u16 next_asid; | |
1e02ce4c | 177 | |
b956575b AL |
178 | /* |
179 | * We can be in one of several states: | |
180 | * | |
181 | * - Actively using an mm. Our CPU's bit will be set in | |
182 | * mm_cpumask(loaded_mm) and is_lazy == false; | |
183 | * | |
184 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit | |
185 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. | |
186 | * | |
187 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit | |
188 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. | |
189 | * We're heuristically guessing that the CR3 load we | |
190 | * skipped more than makes up for the overhead added by | |
191 | * lazy mode. | |
192 | */ | |
193 | bool is_lazy; | |
194 | ||
2ea907c4 DH |
195 | /* |
196 | * If set we changed the page tables in such a way that we | |
197 | * needed an invalidation of all contexts (aka. PCIDs / ASIDs). | |
198 | * This tells us to go invalidate all the non-loaded ctxs[] | |
199 | * on the next context switch. | |
200 | * | |
201 | * The current ctx was kept up-to-date as it ran and does not | |
202 | * need to be invalidated. | |
203 | */ | |
204 | bool invalidate_other; | |
205 | ||
6fd166aa PZ |
206 | /* |
207 | * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate | |
208 | * the corresponding user PCID needs a flush next time we | |
209 | * switch to it; see SWITCH_TO_USER_CR3. | |
210 | */ | |
211 | unsigned short user_pcid_flush_mask; | |
212 | ||
1e02ce4c AL |
213 | /* |
214 | * Access to this CR4 shadow and to H/W CR4 is protected by | |
215 | * disabling interrupts when modifying either one. | |
216 | */ | |
217 | unsigned long cr4; | |
b0579ade AL |
218 | |
219 | /* | |
220 | * This is a list of all contexts that might exist in the TLB. | |
10af6235 AL |
221 | * There is one per ASID that we use, and the ASID (what the |
222 | * CPU calls PCID) is the index into ctxts. | |
b0579ade AL |
223 | * |
224 | * For each context, ctx_id indicates which mm the TLB's user | |
225 | * entries came from. As an invariant, the TLB will never | |
226 | * contain entries that are out-of-date as when that mm reached | |
227 | * the tlb_gen in the list. | |
228 | * | |
229 | * To be clear, this means that it's legal for the TLB code to | |
230 | * flush the TLB without updating tlb_gen. This can happen | |
231 | * (for now, at least) due to paravirt remote flushes. | |
10af6235 AL |
232 | * |
233 | * NB: context 0 is a bit special, since it's also used by | |
234 | * various bits of init code. This is fine -- code that | |
235 | * isn't aware of PCID will end up harmlessly flushing | |
236 | * context 0. | |
b0579ade | 237 | */ |
10af6235 | 238 | struct tlb_context ctxs[TLB_NR_DYN_ASIDS]; |
1e02ce4c AL |
239 | }; |
240 | DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); | |
241 | ||
242 | /* Initialize cr4 shadow for this CPU. */ | |
243 | static inline void cr4_init_shadow(void) | |
244 | { | |
1ef55be1 | 245 | this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); |
1e02ce4c AL |
246 | } |
247 | ||
375074cc AL |
248 | /* Set in this cpu's CR4. */ |
249 | static inline void cr4_set_bits(unsigned long mask) | |
250 | { | |
251 | unsigned long cr4; | |
252 | ||
1e02ce4c AL |
253 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
254 | if ((cr4 | mask) != cr4) { | |
255 | cr4 |= mask; | |
256 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
257 | __write_cr4(cr4); | |
258 | } | |
375074cc AL |
259 | } |
260 | ||
261 | /* Clear in this cpu's CR4. */ | |
262 | static inline void cr4_clear_bits(unsigned long mask) | |
263 | { | |
264 | unsigned long cr4; | |
265 | ||
1e02ce4c AL |
266 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
267 | if ((cr4 & ~mask) != cr4) { | |
268 | cr4 &= ~mask; | |
269 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
270 | __write_cr4(cr4); | |
271 | } | |
272 | } | |
273 | ||
5a920155 TG |
274 | static inline void cr4_toggle_bits(unsigned long mask) |
275 | { | |
276 | unsigned long cr4; | |
277 | ||
278 | cr4 = this_cpu_read(cpu_tlbstate.cr4); | |
279 | cr4 ^= mask; | |
280 | this_cpu_write(cpu_tlbstate.cr4, cr4); | |
281 | __write_cr4(cr4); | |
282 | } | |
283 | ||
1e02ce4c AL |
284 | /* Read the CR4 shadow. */ |
285 | static inline unsigned long cr4_read_shadow(void) | |
286 | { | |
287 | return this_cpu_read(cpu_tlbstate.cr4); | |
375074cc AL |
288 | } |
289 | ||
2ea907c4 DH |
290 | /* |
291 | * Mark all other ASIDs as invalid, preserves the current. | |
292 | */ | |
293 | static inline void invalidate_other_asid(void) | |
294 | { | |
295 | this_cpu_write(cpu_tlbstate.invalidate_other, true); | |
296 | } | |
297 | ||
375074cc AL |
298 | /* |
299 | * Save some of cr4 feature set we're using (e.g. Pentium 4MB | |
300 | * enable and PPro Global page enable), so that any CPU's that boot | |
301 | * up after us can get the correct flags. This should only be used | |
302 | * during boot on the boot cpu. | |
303 | */ | |
304 | extern unsigned long mmu_cr4_features; | |
305 | extern u32 *trampoline_cr4_features; | |
306 | ||
307 | static inline void cr4_set_bits_and_update_boot(unsigned long mask) | |
308 | { | |
309 | mmu_cr4_features |= mask; | |
310 | if (trampoline_cr4_features) | |
311 | *trampoline_cr4_features = mmu_cr4_features; | |
312 | cr4_set_bits(mask); | |
313 | } | |
314 | ||
72c0098d AL |
315 | extern void initialize_tlbstate_and_flush(void); |
316 | ||
6fd166aa PZ |
317 | /* |
318 | * Given an ASID, flush the corresponding user ASID. We can delay this | |
319 | * until the next time we switch to it. | |
320 | * | |
321 | * See SWITCH_TO_USER_CR3. | |
322 | */ | |
323 | static inline void invalidate_user_asid(u16 asid) | |
324 | { | |
325 | /* There is no user ASID if address space separation is off */ | |
326 | if (!IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) | |
327 | return; | |
328 | ||
329 | /* | |
330 | * We only have a single ASID if PCID is off and the CR3 | |
331 | * write will have flushed it. | |
332 | */ | |
333 | if (!cpu_feature_enabled(X86_FEATURE_PCID)) | |
334 | return; | |
335 | ||
336 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
337 | return; | |
338 | ||
339 | __set_bit(kern_pcid(asid), | |
340 | (unsigned long *)this_cpu_ptr(&cpu_tlbstate.user_pcid_flush_mask)); | |
341 | } | |
342 | ||
3f67af51 PZ |
343 | /* |
344 | * flush the entire current user mapping | |
345 | */ | |
d291cf83 TG |
346 | static inline void __native_flush_tlb(void) |
347 | { | |
6fd166aa | 348 | invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid)); |
5cf0791d | 349 | /* |
6fd166aa PZ |
350 | * If current->mm == NULL then we borrow a mm which may change |
351 | * during a task switch and therefore we must not be preempted | |
352 | * while we write CR3 back: | |
5cf0791d SAS |
353 | */ |
354 | preempt_disable(); | |
6c690ee1 | 355 | native_write_cr3(__native_read_cr3()); |
5cf0791d | 356 | preempt_enable(); |
d291cf83 TG |
357 | } |
358 | ||
3f67af51 PZ |
359 | /* |
360 | * flush everything | |
361 | */ | |
d291cf83 TG |
362 | static inline void __native_flush_tlb_global(void) |
363 | { | |
23cb7d46 | 364 | unsigned long cr4, flags; |
d291cf83 | 365 | |
d8bced79 AL |
366 | if (static_cpu_has(X86_FEATURE_INVPCID)) { |
367 | /* | |
368 | * Using INVPCID is considerably faster than a pair of writes | |
369 | * to CR4 sandwiched inside an IRQ flag save/restore. | |
6cff64b8 DH |
370 | * |
371 | * Note, this works with CR4.PCIDE=0 or 1. | |
d8bced79 AL |
372 | */ |
373 | invpcid_flush_all(); | |
374 | return; | |
375 | } | |
376 | ||
b1979a5f IM |
377 | /* |
378 | * Read-modify-write to CR4 - protect it from preemption and | |
379 | * from interrupts. (Use the raw variant because this code can | |
380 | * be called from deep inside debugging code.) | |
381 | */ | |
382 | raw_local_irq_save(flags); | |
383 | ||
23cb7d46 PZ |
384 | cr4 = this_cpu_read(cpu_tlbstate.cr4); |
385 | /* toggle PGE */ | |
386 | native_write_cr4(cr4 ^ X86_CR4_PGE); | |
387 | /* write old PGE again and flush TLBs */ | |
388 | native_write_cr4(cr4); | |
b1979a5f IM |
389 | |
390 | raw_local_irq_restore(flags); | |
d291cf83 TG |
391 | } |
392 | ||
3f67af51 PZ |
393 | /* |
394 | * flush one page in the user mapping | |
395 | */ | |
d291cf83 TG |
396 | static inline void __native_flush_tlb_single(unsigned long addr) |
397 | { | |
6fd166aa PZ |
398 | u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); |
399 | ||
94cf8de0 | 400 | asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); |
6fd166aa PZ |
401 | |
402 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
403 | return; | |
404 | ||
6cff64b8 DH |
405 | /* |
406 | * Some platforms #GP if we call invpcid(type=1/2) before CR4.PCIDE=1. | |
407 | * Just use invalidate_user_asid() in case we are called early. | |
408 | */ | |
409 | if (!this_cpu_has(X86_FEATURE_INVPCID_SINGLE)) | |
410 | invalidate_user_asid(loaded_mm_asid); | |
411 | else | |
412 | invpcid_flush_one(user_pcid(loaded_mm_asid), addr); | |
d291cf83 TG |
413 | } |
414 | ||
3f67af51 PZ |
415 | /* |
416 | * flush everything | |
417 | */ | |
d291cf83 TG |
418 | static inline void __flush_tlb_all(void) |
419 | { | |
3f67af51 | 420 | if (boot_cpu_has(X86_FEATURE_PGE)) { |
d291cf83 | 421 | __flush_tlb_global(); |
3f67af51 PZ |
422 | } else { |
423 | /* | |
424 | * !PGE -> !PCID (setup_pcid()), thus every flush is total. | |
425 | */ | |
d291cf83 | 426 | __flush_tlb(); |
3f67af51 | 427 | } |
d291cf83 TG |
428 | } |
429 | ||
3f67af51 PZ |
430 | /* |
431 | * flush one page in the kernel mapping | |
432 | */ | |
d291cf83 TG |
433 | static inline void __flush_tlb_one(unsigned long addr) |
434 | { | |
ec659934 | 435 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
e8747f10 | 436 | __flush_tlb_single(addr); |
2ea907c4 DH |
437 | |
438 | if (!static_cpu_has(X86_FEATURE_PTI)) | |
439 | return; | |
440 | ||
441 | /* | |
442 | * __flush_tlb_single() will have cleared the TLB entry for this ASID, | |
443 | * but since kernel space is replicated across all, we must also | |
444 | * invalidate all others. | |
445 | */ | |
446 | invalidate_other_asid(); | |
d291cf83 TG |
447 | } |
448 | ||
3e7f3db0 | 449 | #define TLB_FLUSH_ALL -1UL |
d291cf83 TG |
450 | |
451 | /* | |
452 | * TLB flushing: | |
453 | * | |
d291cf83 TG |
454 | * - flush_tlb_all() flushes all processes TLBs |
455 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
456 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
457 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
458 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
a2055abe | 459 | * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus |
d291cf83 TG |
460 | * |
461 | * ..but the i386 has somewhat limited tlb flushing capabilities, | |
462 | * and page-granular flushes are available only on i486 and up. | |
d291cf83 | 463 | */ |
a2055abe | 464 | struct flush_tlb_info { |
b0579ade AL |
465 | /* |
466 | * We support several kinds of flushes. | |
467 | * | |
468 | * - Fully flush a single mm. .mm will be set, .end will be | |
469 | * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to | |
470 | * which the IPI sender is trying to catch us up. | |
471 | * | |
472 | * - Partially flush a single mm. .mm will be set, .start and | |
473 | * .end will indicate the range, and .new_tlb_gen will be set | |
474 | * such that the changes between generation .new_tlb_gen-1 and | |
475 | * .new_tlb_gen are entirely contained in the indicated range. | |
476 | * | |
477 | * - Fully flush all mms whose tlb_gens have been updated. .mm | |
478 | * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen | |
479 | * will be zero. | |
480 | */ | |
481 | struct mm_struct *mm; | |
482 | unsigned long start; | |
483 | unsigned long end; | |
484 | u64 new_tlb_gen; | |
a2055abe AL |
485 | }; |
486 | ||
d291cf83 TG |
487 | #define local_flush_tlb() __flush_tlb() |
488 | ||
611ae8e3 AS |
489 | #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) |
490 | ||
491 | #define flush_tlb_range(vma, start, end) \ | |
492 | flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags) | |
493 | ||
d291cf83 | 494 | extern void flush_tlb_all(void); |
611ae8e3 AS |
495 | extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
496 | unsigned long end, unsigned long vmflag); | |
effee4b9 | 497 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
d291cf83 | 498 | |
ca6c99c0 AL |
499 | static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) |
500 | { | |
501 | flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE); | |
502 | } | |
503 | ||
4595f962 | 504 | void native_flush_tlb_others(const struct cpumask *cpumask, |
a2055abe | 505 | const struct flush_tlb_info *info); |
d291cf83 | 506 | |
0a126abd PZ |
507 | static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) |
508 | { | |
509 | /* | |
510 | * Bump the generation count. This also serves as a full barrier | |
511 | * that synchronizes with switch_mm(): callers are required to order | |
512 | * their read of mm_cpumask after their writes to the paging | |
513 | * structures. | |
514 | */ | |
515 | return atomic64_inc_return(&mm->context.tlb_gen); | |
516 | } | |
517 | ||
e73ad5ff AL |
518 | static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch, |
519 | struct mm_struct *mm) | |
520 | { | |
f39681ed | 521 | inc_mm_tlb_gen(mm); |
e73ad5ff AL |
522 | cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); |
523 | } | |
524 | ||
525 | extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); | |
526 | ||
d291cf83 | 527 | #ifndef CONFIG_PARAVIRT |
a2055abe AL |
528 | #define flush_tlb_others(mask, info) \ |
529 | native_flush_tlb_others(mask, info) | |
96a388de | 530 | #endif |
d291cf83 | 531 | |
1965aae3 | 532 | #endif /* _ASM_X86_TLBFLUSH_H */ |