Commit | Line | Data |
---|---|---|
196d9d8b PZ |
1 | #include <linux/gfp.h> |
2 | #include <linux/highmem.h> | |
3 | #include <linux/kernel.h> | |
4 | #include <linux/mmdebug.h> | |
5 | #include <linux/mm_types.h> | |
6 | #include <linux/pagemap.h> | |
7 | #include <linux/rcupdate.h> | |
8 | #include <linux/smp.h> | |
9 | #include <linux/swap.h> | |
10 | ||
11 | #include <asm/pgalloc.h> | |
12 | #include <asm/tlb.h> | |
13 | ||
14 | #ifdef HAVE_GENERIC_MMU_GATHER | |
15 | ||
16 | static bool tlb_next_batch(struct mmu_gather *tlb) | |
17 | { | |
18 | struct mmu_gather_batch *batch; | |
19 | ||
20 | batch = tlb->active; | |
21 | if (batch->next) { | |
22 | tlb->active = batch->next; | |
23 | return true; | |
24 | } | |
25 | ||
26 | if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) | |
27 | return false; | |
28 | ||
29 | batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); | |
30 | if (!batch) | |
31 | return false; | |
32 | ||
33 | tlb->batch_count++; | |
34 | batch->next = NULL; | |
35 | batch->nr = 0; | |
36 | batch->max = MAX_GATHER_BATCH; | |
37 | ||
38 | tlb->active->next = batch; | |
39 | tlb->active = batch; | |
40 | ||
41 | return true; | |
42 | } | |
43 | ||
44 | void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, | |
45 | unsigned long start, unsigned long end) | |
46 | { | |
47 | tlb->mm = mm; | |
48 | ||
49 | /* Is it from 0 to ~0? */ | |
50 | tlb->fullmm = !(start | (end+1)); | |
51 | tlb->need_flush_all = 0; | |
52 | tlb->local.next = NULL; | |
53 | tlb->local.nr = 0; | |
54 | tlb->local.max = ARRAY_SIZE(tlb->__pages); | |
55 | tlb->active = &tlb->local; | |
56 | tlb->batch_count = 0; | |
57 | ||
58 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | |
59 | tlb->batch = NULL; | |
60 | #endif | |
ed6a7935 | 61 | #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE |
196d9d8b | 62 | tlb->page_size = 0; |
ed6a7935 | 63 | #endif |
196d9d8b PZ |
64 | |
65 | __tlb_reset_range(tlb); | |
66 | } | |
67 | ||
68 | void tlb_flush_mmu_free(struct mmu_gather *tlb) | |
69 | { | |
70 | struct mmu_gather_batch *batch; | |
71 | ||
72 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | |
73 | tlb_table_flush(tlb); | |
74 | #endif | |
75 | for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { | |
76 | free_pages_and_swap_cache(batch->pages, batch->nr); | |
77 | batch->nr = 0; | |
78 | } | |
79 | tlb->active = &tlb->local; | |
80 | } | |
81 | ||
82 | void tlb_flush_mmu(struct mmu_gather *tlb) | |
83 | { | |
84 | tlb_flush_mmu_tlbonly(tlb); | |
85 | tlb_flush_mmu_free(tlb); | |
86 | } | |
87 | ||
88 | /* tlb_finish_mmu | |
89 | * Called at the end of the shootdown operation to free up any resources | |
90 | * that were required. | |
91 | */ | |
92 | void arch_tlb_finish_mmu(struct mmu_gather *tlb, | |
93 | unsigned long start, unsigned long end, bool force) | |
94 | { | |
95 | struct mmu_gather_batch *batch, *next; | |
96 | ||
97 | if (force) { | |
98 | __tlb_reset_range(tlb); | |
99 | __tlb_adjust_range(tlb, start, end - start); | |
100 | } | |
101 | ||
102 | tlb_flush_mmu(tlb); | |
103 | ||
104 | /* keep the page table cache within bounds */ | |
105 | check_pgt_cache(); | |
106 | ||
107 | for (batch = tlb->local.next; batch; batch = next) { | |
108 | next = batch->next; | |
109 | free_pages((unsigned long)batch, 0); | |
110 | } | |
111 | tlb->local.next = NULL; | |
112 | } | |
113 | ||
114 | /* __tlb_remove_page | |
115 | * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while | |
116 | * handling the additional races in SMP caused by other CPUs caching valid | |
117 | * mappings in their TLBs. Returns the number of free page slots left. | |
118 | * When out of page slots we must call tlb_flush_mmu(). | |
119 | *returns true if the caller should flush. | |
120 | */ | |
121 | bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) | |
122 | { | |
123 | struct mmu_gather_batch *batch; | |
124 | ||
125 | VM_BUG_ON(!tlb->end); | |
ed6a7935 PZ |
126 | |
127 | #ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE | |
196d9d8b | 128 | VM_WARN_ON(tlb->page_size != page_size); |
ed6a7935 | 129 | #endif |
196d9d8b PZ |
130 | |
131 | batch = tlb->active; | |
132 | /* | |
133 | * Add the page and check if we are full. If so | |
134 | * force a flush. | |
135 | */ | |
136 | batch->pages[batch->nr++] = page; | |
137 | if (batch->nr == batch->max) { | |
138 | if (!tlb_next_batch(tlb)) | |
139 | return true; | |
140 | batch = tlb->active; | |
141 | } | |
142 | VM_BUG_ON_PAGE(batch->nr > batch->max, page); | |
143 | ||
144 | return false; | |
145 | } | |
146 | ||
147 | #endif /* HAVE_GENERIC_MMU_GATHER */ | |
148 | ||
149 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE | |
150 | ||
151 | /* | |
152 | * See the comment near struct mmu_table_batch. | |
153 | */ | |
154 | ||
155 | /* | |
156 | * If we want tlb_remove_table() to imply TLB invalidates. | |
157 | */ | |
158 | static inline void tlb_table_invalidate(struct mmu_gather *tlb) | |
159 | { | |
160 | #ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE | |
161 | /* | |
162 | * Invalidate page-table caches used by hardware walkers. Then we still | |
163 | * need to RCU-sched wait while freeing the pages because software | |
164 | * walkers can still be in-flight. | |
165 | */ | |
166 | tlb_flush_mmu_tlbonly(tlb); | |
167 | #endif | |
168 | } | |
169 | ||
170 | static void tlb_remove_table_smp_sync(void *arg) | |
171 | { | |
172 | /* Simply deliver the interrupt */ | |
173 | } | |
174 | ||
175 | static void tlb_remove_table_one(void *table) | |
176 | { | |
177 | /* | |
178 | * This isn't an RCU grace period and hence the page-tables cannot be | |
179 | * assumed to be actually RCU-freed. | |
180 | * | |
181 | * It is however sufficient for software page-table walkers that rely on | |
182 | * IRQ disabling. See the comment near struct mmu_table_batch. | |
183 | */ | |
184 | smp_call_function(tlb_remove_table_smp_sync, NULL, 1); | |
185 | __tlb_remove_table(table); | |
186 | } | |
187 | ||
188 | static void tlb_remove_table_rcu(struct rcu_head *head) | |
189 | { | |
190 | struct mmu_table_batch *batch; | |
191 | int i; | |
192 | ||
193 | batch = container_of(head, struct mmu_table_batch, rcu); | |
194 | ||
195 | for (i = 0; i < batch->nr; i++) | |
196 | __tlb_remove_table(batch->tables[i]); | |
197 | ||
198 | free_page((unsigned long)batch); | |
199 | } | |
200 | ||
201 | void tlb_table_flush(struct mmu_gather *tlb) | |
202 | { | |
203 | struct mmu_table_batch **batch = &tlb->batch; | |
204 | ||
205 | if (*batch) { | |
206 | tlb_table_invalidate(tlb); | |
b401ec18 | 207 | call_rcu(&(*batch)->rcu, tlb_remove_table_rcu); |
196d9d8b PZ |
208 | *batch = NULL; |
209 | } | |
210 | } | |
211 | ||
212 | void tlb_remove_table(struct mmu_gather *tlb, void *table) | |
213 | { | |
214 | struct mmu_table_batch **batch = &tlb->batch; | |
215 | ||
216 | if (*batch == NULL) { | |
217 | *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); | |
218 | if (*batch == NULL) { | |
219 | tlb_table_invalidate(tlb); | |
220 | tlb_remove_table_one(table); | |
221 | return; | |
222 | } | |
223 | (*batch)->nr = 0; | |
224 | } | |
225 | ||
226 | (*batch)->tables[(*batch)->nr++] = table; | |
227 | if ((*batch)->nr == MAX_TABLE_BATCH) | |
228 | tlb_table_flush(tlb); | |
229 | } | |
230 | ||
231 | #endif /* CONFIG_HAVE_RCU_TABLE_FREE */ | |
232 | ||
233 | /** | |
234 | * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down | |
235 | * @tlb: the mmu_gather structure to initialize | |
236 | * @mm: the mm_struct of the target address space | |
237 | * @start: start of the region that will be removed from the page-table | |
238 | * @end: end of the region that will be removed from the page-table | |
239 | * | |
240 | * Called to initialize an (on-stack) mmu_gather structure for page-table | |
241 | * tear-down from @mm. The @start and @end are set to 0 and -1 | |
242 | * respectively when @mm is without users and we're going to destroy | |
243 | * the full address space (exit/execve). | |
244 | */ | |
245 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, | |
246 | unsigned long start, unsigned long end) | |
247 | { | |
248 | arch_tlb_gather_mmu(tlb, mm, start, end); | |
249 | inc_tlb_flush_pending(tlb->mm); | |
250 | } | |
251 | ||
252 | void tlb_finish_mmu(struct mmu_gather *tlb, | |
253 | unsigned long start, unsigned long end) | |
254 | { | |
255 | /* | |
256 | * If there are parallel threads are doing PTE changes on same range | |
257 | * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB | |
258 | * flush by batching, a thread has stable TLB entry can fail to flush | |
259 | * the TLB by observing pte_none|!pte_dirty, for example so flush TLB | |
260 | * forcefully if we detect parallel PTE batching threads. | |
261 | */ | |
262 | bool force = mm_tlb_flush_nested(tlb->mm); | |
263 | ||
264 | arch_tlb_finish_mmu(tlb, start, end, force); | |
265 | dec_tlb_flush_pending(tlb->mm); | |
266 | } |