Commit | Line | Data |
---|---|---|
196d9d8b PZ |
1 | #include <linux/gfp.h> |
2 | #include <linux/highmem.h> | |
3 | #include <linux/kernel.h> | |
4 | #include <linux/mmdebug.h> | |
5 | #include <linux/mm_types.h> | |
6 | #include <linux/pagemap.h> | |
7 | #include <linux/rcupdate.h> | |
8 | #include <linux/smp.h> | |
9 | #include <linux/swap.h> | |
10 | ||
11 | #include <asm/pgalloc.h> | |
12 | #include <asm/tlb.h> | |
13 | ||
580a586c | 14 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
952a31c9 | 15 | |
196d9d8b PZ |
16 | static bool tlb_next_batch(struct mmu_gather *tlb) |
17 | { | |
18 | struct mmu_gather_batch *batch; | |
19 | ||
20 | batch = tlb->active; | |
21 | if (batch->next) { | |
22 | tlb->active = batch->next; | |
23 | return true; | |
24 | } | |
25 | ||
26 | if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) | |
27 | return false; | |
28 | ||
29 | batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); | |
30 | if (!batch) | |
31 | return false; | |
32 | ||
33 | tlb->batch_count++; | |
34 | batch->next = NULL; | |
35 | batch->nr = 0; | |
36 | batch->max = MAX_GATHER_BATCH; | |
37 | ||
38 | tlb->active->next = batch; | |
39 | tlb->active = batch; | |
40 | ||
41 | return true; | |
42 | } | |
43 | ||
952a31c9 MS |
44 | static void tlb_batch_pages_flush(struct mmu_gather *tlb) |
45 | { | |
46 | struct mmu_gather_batch *batch; | |
47 | ||
48 | for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { | |
49 | free_pages_and_swap_cache(batch->pages, batch->nr); | |
50 | batch->nr = 0; | |
51 | } | |
52 | tlb->active = &tlb->local; | |
53 | } | |
54 | ||
55 | static void tlb_batch_list_free(struct mmu_gather *tlb) | |
56 | { | |
57 | struct mmu_gather_batch *batch, *next; | |
58 | ||
59 | for (batch = tlb->local.next; batch; batch = next) { | |
60 | next = batch->next; | |
61 | free_pages((unsigned long)batch, 0); | |
62 | } | |
63 | tlb->local.next = NULL; | |
64 | } | |
65 | ||
66 | bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) | |
67 | { | |
68 | struct mmu_gather_batch *batch; | |
69 | ||
70 | VM_BUG_ON(!tlb->end); | |
71 | ||
3af4bd03 | 72 | #ifdef CONFIG_MMU_GATHER_PAGE_SIZE |
952a31c9 MS |
73 | VM_WARN_ON(tlb->page_size != page_size); |
74 | #endif | |
75 | ||
76 | batch = tlb->active; | |
77 | /* | |
78 | * Add the page and check if we are full. If so | |
79 | * force a flush. | |
80 | */ | |
81 | batch->pages[batch->nr++] = page; | |
82 | if (batch->nr == batch->max) { | |
83 | if (!tlb_next_batch(tlb)) | |
84 | return true; | |
85 | batch = tlb->active; | |
86 | } | |
87 | VM_BUG_ON_PAGE(batch->nr > batch->max, page); | |
88 | ||
89 | return false; | |
90 | } | |
91 | ||
580a586c | 92 | #endif /* MMU_GATHER_NO_GATHER */ |
952a31c9 | 93 | |
0d6e24d4 | 94 | #ifdef CONFIG_MMU_GATHER_TABLE_FREE |
196d9d8b | 95 | |
0d6e24d4 PZ |
96 | static void __tlb_remove_table_free(struct mmu_table_batch *batch) |
97 | { | |
98 | int i; | |
99 | ||
100 | for (i = 0; i < batch->nr; i++) | |
101 | __tlb_remove_table(batch->tables[i]); | |
102 | ||
103 | free_page((unsigned long)batch); | |
104 | } | |
105 | ||
106 | #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE | |
196d9d8b PZ |
107 | |
108 | /* | |
0d6e24d4 PZ |
109 | * Semi RCU freeing of the page directories. |
110 | * | |
111 | * This is needed by some architectures to implement software pagetable walkers. | |
112 | * | |
113 | * gup_fast() and other software pagetable walkers do a lockless page-table | |
114 | * walk and therefore needs some synchronization with the freeing of the page | |
115 | * directories. The chosen means to accomplish that is by disabling IRQs over | |
116 | * the walk. | |
117 | * | |
118 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, | |
119 | * since we unlink the page, flush TLBs, free the page. Since the disabling of | |
120 | * IRQs delays the completion of the TLB flush we can never observe an already | |
121 | * freed page. | |
122 | * | |
123 | * Architectures that do not have this (PPC) need to delay the freeing by some | |
124 | * other means, this is that means. | |
125 | * | |
126 | * What we do is batch the freed directory pages (tables) and RCU free them. | |
127 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling | |
128 | * holds off grace periods. | |
129 | * | |
130 | * However, in order to batch these pages we need to allocate storage, this | |
131 | * allocation is deep inside the MM code and can thus easily fail on memory | |
132 | * pressure. To guarantee progress we fall back to single table freeing, see | |
133 | * the implementation of tlb_remove_table_one(). | |
134 | * | |
196d9d8b | 135 | */ |
196d9d8b PZ |
136 | |
137 | static void tlb_remove_table_smp_sync(void *arg) | |
138 | { | |
139 | /* Simply deliver the interrupt */ | |
140 | } | |
141 | ||
0d6e24d4 | 142 | static void tlb_remove_table_sync_one(void) |
196d9d8b PZ |
143 | { |
144 | /* | |
145 | * This isn't an RCU grace period and hence the page-tables cannot be | |
146 | * assumed to be actually RCU-freed. | |
147 | * | |
148 | * It is however sufficient for software page-table walkers that rely on | |
0d6e24d4 | 149 | * IRQ disabling. |
196d9d8b PZ |
150 | */ |
151 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | |
196d9d8b PZ |
152 | } |
153 | ||
154 | static void tlb_remove_table_rcu(struct rcu_head *head) | |
155 | { | |
0d6e24d4 PZ |
156 | __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu)); |
157 | } | |
196d9d8b | 158 | |
0d6e24d4 PZ |
159 | static void tlb_remove_table_free(struct mmu_table_batch *batch) |
160 | { | |
161 | call_rcu(&batch->rcu, tlb_remove_table_rcu); | |
162 | } | |
196d9d8b | 163 | |
0d6e24d4 | 164 | #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */ |
196d9d8b | 165 | |
0d6e24d4 PZ |
166 | static void tlb_remove_table_sync_one(void) { } |
167 | ||
168 | static void tlb_remove_table_free(struct mmu_table_batch *batch) | |
169 | { | |
170 | __tlb_remove_table_free(batch); | |
171 | } | |
172 | ||
173 | #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ | |
174 | ||
175 | /* | |
176 | * If we want tlb_remove_table() to imply TLB invalidates. | |
177 | */ | |
178 | static inline void tlb_table_invalidate(struct mmu_gather *tlb) | |
179 | { | |
180 | if (tlb_needs_table_invalidate()) { | |
181 | /* | |
182 | * Invalidate page-table caches used by hardware walkers. Then | |
183 | * we still need to RCU-sched wait while freeing the pages | |
184 | * because software walkers can still be in-flight. | |
185 | */ | |
186 | tlb_flush_mmu_tlbonly(tlb); | |
187 | } | |
188 | } | |
189 | ||
190 | static void tlb_remove_table_one(void *table) | |
191 | { | |
192 | tlb_remove_table_sync_one(); | |
193 | __tlb_remove_table(table); | |
196d9d8b PZ |
194 | } |
195 | ||
0a8caf21 | 196 | static void tlb_table_flush(struct mmu_gather *tlb) |
196d9d8b PZ |
197 | { |
198 | struct mmu_table_batch **batch = &tlb->batch; | |
199 | ||
200 | if (*batch) { | |
201 | tlb_table_invalidate(tlb); | |
0d6e24d4 | 202 | tlb_remove_table_free(*batch); |
196d9d8b PZ |
203 | *batch = NULL; |
204 | } | |
205 | } | |
206 | ||
207 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | |
208 | { | |
209 | struct mmu_table_batch **batch = &tlb->batch; | |
210 | ||
211 | if (*batch == NULL) { | |
212 | *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); | |
213 | if (*batch == NULL) { | |
214 | tlb_table_invalidate(tlb); | |
215 | tlb_remove_table_one(table); | |
216 | return; | |
217 | } | |
218 | (*batch)->nr = 0; | |
219 | } | |
220 | ||
221 | (*batch)->tables[(*batch)->nr++] = table; | |
222 | if ((*batch)->nr == MAX_TABLE_BATCH) | |
223 | tlb_table_flush(tlb); | |
224 | } | |
225 | ||
0d6e24d4 PZ |
226 | static inline void tlb_table_init(struct mmu_gather *tlb) |
227 | { | |
228 | tlb->batch = NULL; | |
229 | } | |
230 | ||
231 | #else /* !CONFIG_MMU_GATHER_TABLE_FREE */ | |
232 | ||
233 | static inline void tlb_table_flush(struct mmu_gather *tlb) { } | |
234 | static inline void tlb_table_init(struct mmu_gather *tlb) { } | |
235 | ||
236 | #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ | |
196d9d8b | 237 | |
0a8caf21 PZ |
238 | static void tlb_flush_mmu_free(struct mmu_gather *tlb) |
239 | { | |
0a8caf21 | 240 | tlb_table_flush(tlb); |
580a586c | 241 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
0a8caf21 PZ |
242 | tlb_batch_pages_flush(tlb); |
243 | #endif | |
244 | } | |
245 | ||
246 | void tlb_flush_mmu(struct mmu_gather *tlb) | |
247 | { | |
248 | tlb_flush_mmu_tlbonly(tlb); | |
249 | tlb_flush_mmu_free(tlb); | |
250 | } | |
251 | ||
196d9d8b PZ |
252 | /** |
253 | * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down | |
254 | * @tlb: the mmu_gather structure to initialize | |
255 | * @mm: the mm_struct of the target address space | |
256 | * @start: start of the region that will be removed from the page-table | |
257 | * @end: end of the region that will be removed from the page-table | |
258 | * | |
259 | * Called to initialize an (on-stack) mmu_gather structure for page-table | |
260 | * tear-down from @mm. The @start and @end are set to 0 and -1 | |
261 | * respectively when @mm is without users and we're going to destroy | |
262 | * the full address space (exit/execve). | |
263 | */ | |
264 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, | |
265 | unsigned long start, unsigned long end) | |
266 | { | |
1808d65b PZ |
267 | tlb->mm = mm; |
268 | ||
269 | /* Is it from 0 to ~0? */ | |
270 | tlb->fullmm = !(start | (end+1)); | |
271 | ||
580a586c | 272 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
1808d65b PZ |
273 | tlb->need_flush_all = 0; |
274 | tlb->local.next = NULL; | |
275 | tlb->local.nr = 0; | |
276 | tlb->local.max = ARRAY_SIZE(tlb->__pages); | |
277 | tlb->active = &tlb->local; | |
278 | tlb->batch_count = 0; | |
279 | #endif | |
280 | ||
0d6e24d4 | 281 | tlb_table_init(tlb); |
3af4bd03 | 282 | #ifdef CONFIG_MMU_GATHER_PAGE_SIZE |
1808d65b PZ |
283 | tlb->page_size = 0; |
284 | #endif | |
285 | ||
286 | __tlb_reset_range(tlb); | |
196d9d8b PZ |
287 | inc_tlb_flush_pending(tlb->mm); |
288 | } | |
289 | ||
1808d65b PZ |
290 | /** |
291 | * tlb_finish_mmu - finish an mmu_gather structure | |
292 | * @tlb: the mmu_gather structure to finish | |
293 | * @start: start of the region that will be removed from the page-table | |
294 | * @end: end of the region that will be removed from the page-table | |
295 | * | |
296 | * Called at the end of the shootdown operation to free up any resources that | |
297 | * were required. | |
298 | */ | |
196d9d8b PZ |
299 | void tlb_finish_mmu(struct mmu_gather *tlb, |
300 | unsigned long start, unsigned long end) | |
301 | { | |
302 | /* | |
303 | * If there are parallel threads are doing PTE changes on same range | |
c1e8d7c6 | 304 | * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB |
7a30df49 YS |
305 | * flush by batching, one thread may end up seeing inconsistent PTEs |
306 | * and result in having stale TLB entries. So flush TLB forcefully | |
307 | * if we detect parallel PTE batching threads. | |
308 | * | |
309 | * However, some syscalls, e.g. munmap(), may free page tables, this | |
310 | * needs force flush everything in the given range. Otherwise this | |
311 | * may result in having stale TLB entries for some architectures, | |
312 | * e.g. aarch64, that could specify flush what level TLB. | |
196d9d8b | 313 | */ |
1808d65b | 314 | if (mm_tlb_flush_nested(tlb->mm)) { |
7a30df49 YS |
315 | /* |
316 | * The aarch64 yields better performance with fullmm by | |
317 | * avoiding multiple CPUs spamming TLBI messages at the | |
318 | * same time. | |
319 | * | |
320 | * On x86 non-fullmm doesn't yield significant difference | |
321 | * against fullmm. | |
322 | */ | |
323 | tlb->fullmm = 1; | |
1808d65b | 324 | __tlb_reset_range(tlb); |
7a30df49 | 325 | tlb->freed_tables = 1; |
1808d65b | 326 | } |
196d9d8b | 327 | |
1808d65b PZ |
328 | tlb_flush_mmu(tlb); |
329 | ||
580a586c | 330 | #ifndef CONFIG_MMU_GATHER_NO_GATHER |
1808d65b PZ |
331 | tlb_batch_list_free(tlb); |
332 | #endif | |
196d9d8b PZ |
333 | dec_tlb_flush_pending(tlb->mm); |
334 | } |