Commit | Line | Data |
---|---|---|
f30c2269 | 1 | /* include/asm-generic/tlb.h |
1da177e4 LT |
2 | * |
3 | * Generic TLB shootdown code | |
4 | * | |
5 | * Copyright 2001 Red Hat, Inc. | |
6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. | |
7 | * | |
d16dfc55 PZ |
8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
9 | * | |
1da177e4 LT |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or (at your option) any later version. | |
14 | */ | |
15 | #ifndef _ASM_GENERIC__TLB_H | |
16 | #define _ASM_GENERIC__TLB_H | |
17 | ||
1da177e4 | 18 | #include <linux/swap.h> |
62152d0e | 19 | #include <asm/pgalloc.h> |
1da177e4 LT |
20 | #include <asm/tlbflush.h> |
21 | ||
26723911 PZ |
22 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
23 | /* | |
24 | * Semi RCU freeing of the page directories. | |
25 | * | |
26 | * This is needed by some architectures to implement software pagetable walkers. | |
27 | * | |
28 | * gup_fast() and other software pagetable walkers do a lockless page-table | |
29 | * walk and therefore needs some synchronization with the freeing of the page | |
30 | * directories. The chosen means to accomplish that is by disabling IRQs over | |
31 | * the walk. | |
32 | * | |
33 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, | |
34 | * since we unlink the page, flush TLBs, free the page. Since the disabling of | |
35 | * IRQs delays the completion of the TLB flush we can never observe an already | |
36 | * freed page. | |
37 | * | |
38 | * Architectures that do not have this (PPC) need to delay the freeing by some | |
39 | * other means, this is that means. | |
40 | * | |
41 | * What we do is batch the freed directory pages (tables) and RCU free them. | |
42 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling | |
43 | * holds off grace periods. | |
44 | * | |
45 | * However, in order to batch these pages we need to allocate storage, this | |
46 | * allocation is deep inside the MM code and can thus easily fail on memory | |
47 | * pressure. To guarantee progress we fall back to single table freeing, see | |
48 | * the implementation of tlb_remove_table_one(). | |
49 | * | |
50 | */ | |
51 | struct mmu_table_batch { | |
52 | struct rcu_head rcu; | |
53 | unsigned int nr; | |
54 | void *tables[0]; | |
55 | }; | |
56 | ||
57 | #define MAX_TABLE_BATCH \ | |
58 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) | |
59 | ||
60 | extern void tlb_table_flush(struct mmu_gather *tlb); | |
61 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | |
62 | ||
63 | #endif | |
64 | ||
d16dfc55 PZ |
65 | /* |
66 | * If we can't allocate a page to make a big batch of page pointers | |
67 | * to work on, then just handle a few from the on-stack structure. | |
68 | */ | |
69 | #define MMU_GATHER_BUNDLE 8 | |
70 | ||
e303297e PZ |
71 | struct mmu_gather_batch { |
72 | struct mmu_gather_batch *next; | |
73 | unsigned int nr; | |
74 | unsigned int max; | |
75 | struct page *pages[0]; | |
76 | }; | |
77 | ||
78 | #define MAX_GATHER_BATCH \ | |
79 | ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) | |
80 | ||
1da177e4 | 81 | /* struct mmu_gather is an opaque type used by the mm code for passing around |
15a23ffa | 82 | * any data needed by arch specific code for tlb_remove_page. |
1da177e4 LT |
83 | */ |
84 | struct mmu_gather { | |
85 | struct mm_struct *mm; | |
26723911 PZ |
86 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
87 | struct mmu_table_batch *batch; | |
88 | #endif | |
e303297e PZ |
89 | unsigned int need_flush : 1, /* Did free PTEs */ |
90 | fast_mode : 1; /* No batching */ | |
91 | ||
92 | unsigned int fullmm; | |
93 | ||
94 | struct mmu_gather_batch *active; | |
95 | struct mmu_gather_batch local; | |
96 | struct page *__pages[MMU_GATHER_BUNDLE]; | |
1da177e4 LT |
97 | }; |
98 | ||
e303297e PZ |
99 | /* |
100 | * For UP we don't need to worry about TLB flush | |
101 | * and page free order so much.. | |
102 | */ | |
103 | #ifdef CONFIG_SMP | |
104 | #define tlb_fast_mode(tlb) (tlb->fast_mode) | |
105 | #else | |
106 | #define tlb_fast_mode(tlb) 1 | |
107 | #endif | |
108 | ||
109 | static inline int tlb_next_batch(struct mmu_gather *tlb) | |
d16dfc55 | 110 | { |
e303297e | 111 | struct mmu_gather_batch *batch; |
d16dfc55 | 112 | |
e303297e PZ |
113 | batch = tlb->active; |
114 | if (batch->next) { | |
115 | tlb->active = batch->next; | |
116 | return 1; | |
d16dfc55 | 117 | } |
e303297e PZ |
118 | |
119 | batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); | |
120 | if (!batch) | |
121 | return 0; | |
122 | ||
123 | batch->next = NULL; | |
124 | batch->nr = 0; | |
125 | batch->max = MAX_GATHER_BATCH; | |
126 | ||
127 | tlb->active->next = batch; | |
128 | tlb->active = batch; | |
129 | ||
130 | return 1; | |
d16dfc55 | 131 | } |
1da177e4 LT |
132 | |
133 | /* tlb_gather_mmu | |
d16dfc55 PZ |
134 | * Called to initialize an (on-stack) mmu_gather structure for page-table |
135 | * tear-down from @mm. The @fullmm argument is used when @mm is without | |
136 | * users and we're going to destroy the full address space (exit/execve). | |
1da177e4 | 137 | */ |
d16dfc55 PZ |
138 | static inline void |
139 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | |
1da177e4 | 140 | { |
1da177e4 LT |
141 | tlb->mm = mm; |
142 | ||
e303297e PZ |
143 | tlb->fullmm = fullmm; |
144 | tlb->need_flush = 0; | |
145 | tlb->fast_mode = (num_possible_cpus() == 1); | |
146 | tlb->local.next = NULL; | |
147 | tlb->local.nr = 0; | |
148 | tlb->local.max = ARRAY_SIZE(tlb->__pages); | |
149 | tlb->active = &tlb->local; | |
1da177e4 | 150 | |
26723911 PZ |
151 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
152 | tlb->batch = NULL; | |
d16dfc55 | 153 | #endif |
1da177e4 LT |
154 | } |
155 | ||
156 | static inline void | |
d16dfc55 | 157 | tlb_flush_mmu(struct mmu_gather *tlb) |
1da177e4 | 158 | { |
e303297e PZ |
159 | struct mmu_gather_batch *batch; |
160 | ||
1da177e4 LT |
161 | if (!tlb->need_flush) |
162 | return; | |
163 | tlb->need_flush = 0; | |
164 | tlb_flush(tlb); | |
26723911 PZ |
165 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
166 | tlb_table_flush(tlb); | |
167 | #endif | |
e303297e PZ |
168 | |
169 | if (tlb_fast_mode(tlb)) | |
170 | return; | |
171 | ||
172 | for (batch = &tlb->local; batch; batch = batch->next) { | |
173 | free_pages_and_swap_cache(batch->pages, batch->nr); | |
174 | batch->nr = 0; | |
1da177e4 | 175 | } |
e303297e | 176 | tlb->active = &tlb->local; |
1da177e4 LT |
177 | } |
178 | ||
179 | /* tlb_finish_mmu | |
180 | * Called at the end of the shootdown operation to free up any resources | |
15a23ffa | 181 | * that were required. |
1da177e4 LT |
182 | */ |
183 | static inline void | |
184 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |
185 | { | |
e303297e PZ |
186 | struct mmu_gather_batch *batch, *next; |
187 | ||
d16dfc55 | 188 | tlb_flush_mmu(tlb); |
1da177e4 LT |
189 | |
190 | /* keep the page table cache within bounds */ | |
191 | check_pgt_cache(); | |
15a23ffa | 192 | |
e303297e PZ |
193 | for (batch = tlb->local.next; batch; batch = next) { |
194 | next = batch->next; | |
195 | free_pages((unsigned long)batch, 0); | |
196 | } | |
197 | tlb->local.next = NULL; | |
1da177e4 LT |
198 | } |
199 | ||
d16dfc55 | 200 | /* __tlb_remove_page |
1da177e4 LT |
201 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while |
202 | * handling the additional races in SMP caused by other CPUs caching valid | |
d16dfc55 PZ |
203 | * mappings in their TLBs. Returns the number of free page slots left. |
204 | * When out of page slots we must call tlb_flush_mmu(). | |
1da177e4 | 205 | */ |
d16dfc55 | 206 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
1da177e4 | 207 | { |
e303297e PZ |
208 | struct mmu_gather_batch *batch; |
209 | ||
1da177e4 | 210 | tlb->need_flush = 1; |
e303297e | 211 | |
1da177e4 LT |
212 | if (tlb_fast_mode(tlb)) { |
213 | free_page_and_swap_cache(page); | |
d16dfc55 | 214 | return 1; /* avoid calling tlb_flush_mmu() */ |
1da177e4 | 215 | } |
d16dfc55 | 216 | |
e303297e PZ |
217 | batch = tlb->active; |
218 | batch->pages[batch->nr++] = page; | |
219 | VM_BUG_ON(batch->nr > batch->max); | |
220 | if (batch->nr == batch->max) { | |
221 | if (!tlb_next_batch(tlb)) | |
222 | return 0; | |
223 | } | |
224 | ||
225 | return batch->max - batch->nr; | |
d16dfc55 PZ |
226 | } |
227 | ||
228 | /* tlb_remove_page | |
229 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when | |
230 | * required. | |
231 | */ | |
232 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |
233 | { | |
234 | if (!__tlb_remove_page(tlb, page)) | |
235 | tlb_flush_mmu(tlb); | |
1da177e4 LT |
236 | } |
237 | ||
238 | /** | |
239 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | |
240 | * | |
241 | * Record the fact that pte's were really umapped in ->need_flush, so we can | |
242 | * later optimise away the tlb invalidate. This helps when userspace is | |
243 | * unmapping already-unmapped pages, which happens quite a lot. | |
244 | */ | |
245 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ | |
246 | do { \ | |
247 | tlb->need_flush = 1; \ | |
248 | __tlb_remove_tlb_entry(tlb, ptep, address); \ | |
249 | } while (0) | |
250 | ||
9e1b32ca | 251 | #define pte_free_tlb(tlb, ptep, address) \ |
1da177e4 LT |
252 | do { \ |
253 | tlb->need_flush = 1; \ | |
9e1b32ca | 254 | __pte_free_tlb(tlb, ptep, address); \ |
1da177e4 LT |
255 | } while (0) |
256 | ||
257 | #ifndef __ARCH_HAS_4LEVEL_HACK | |
9e1b32ca | 258 | #define pud_free_tlb(tlb, pudp, address) \ |
1da177e4 LT |
259 | do { \ |
260 | tlb->need_flush = 1; \ | |
9e1b32ca | 261 | __pud_free_tlb(tlb, pudp, address); \ |
1da177e4 LT |
262 | } while (0) |
263 | #endif | |
264 | ||
9e1b32ca | 265 | #define pmd_free_tlb(tlb, pmdp, address) \ |
1da177e4 LT |
266 | do { \ |
267 | tlb->need_flush = 1; \ | |
9e1b32ca | 268 | __pmd_free_tlb(tlb, pmdp, address); \ |
1da177e4 LT |
269 | } while (0) |
270 | ||
271 | #define tlb_migrate_finish(mm) do {} while (0) | |
272 | ||
273 | #endif /* _ASM_GENERIC__TLB_H */ |