Commit | Line | Data |
---|---|---|
cddb8a5c AA |
1 | /* |
2 | * linux/mm/mmu_notifier.c | |
3 | * | |
4 | * Copyright (C) 2008 Qumranet, Inc. | |
5 | * Copyright (C) 2008 SGI | |
6 | * Christoph Lameter <clameter@sgi.com> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
9 | * the COPYING file in the top-level directory. | |
10 | */ | |
11 | ||
12 | #include <linux/rculist.h> | |
13 | #include <linux/mmu_notifier.h> | |
b95f1b31 | 14 | #include <linux/export.h> |
cddb8a5c AA |
15 | #include <linux/mm.h> |
16 | #include <linux/err.h> | |
21a92735 | 17 | #include <linux/srcu.h> |
cddb8a5c AA |
18 | #include <linux/rcupdate.h> |
19 | #include <linux/sched.h> | |
5a0e3ad6 | 20 | #include <linux/slab.h> |
cddb8a5c | 21 | |
21a92735 | 22 | /* global SRCU for all MMs */ |
70400303 | 23 | static struct srcu_struct srcu; |
21a92735 | 24 | |
cddb8a5c AA |
25 | /* |
26 | * This function can't run concurrently against mmu_notifier_register | |
27 | * because mm->mm_users > 0 during mmu_notifier_register and exit_mmap | |
28 | * runs with mm_users == 0. Other tasks may still invoke mmu notifiers | |
29 | * in parallel despite there being no task using this mm any more, | |
30 | * through the vmas outside of the exit_mmap context, such as with | |
31 | * vmtruncate. This serializes against mmu_notifier_unregister with | |
21a92735 SG |
32 | * the mmu_notifier_mm->lock in addition to SRCU and it serializes |
33 | * against the other mmu notifiers with SRCU. struct mmu_notifier_mm | |
cddb8a5c AA |
34 | * can't go away from under us as exit_mmap holds an mm_count pin |
35 | * itself. | |
36 | */ | |
37 | void __mmu_notifier_release(struct mm_struct *mm) | |
38 | { | |
39 | struct mmu_notifier *mn; | |
3ad3d901 | 40 | struct hlist_node *n; |
21a92735 | 41 | int id; |
3ad3d901 XG |
42 | |
43 | /* | |
70400303 | 44 | * SRCU here will block mmu_notifier_unregister until |
3ad3d901 XG |
45 | * ->release returns. |
46 | */ | |
21a92735 | 47 | id = srcu_read_lock(&srcu); |
3ad3d901 XG |
48 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) |
49 | /* | |
50 | * if ->release runs before mmu_notifier_unregister it | |
51 | * must be handled as it's the only way for the driver | |
52 | * to flush all existing sptes and stop the driver | |
53 | * from establishing any more sptes before all the | |
54 | * pages in the mm are freed. | |
55 | */ | |
56 | if (mn->ops->release) | |
57 | mn->ops->release(mn, mm); | |
21a92735 | 58 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
59 | |
60 | spin_lock(&mm->mmu_notifier_mm->lock); | |
61 | while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { | |
62 | mn = hlist_entry(mm->mmu_notifier_mm->list.first, | |
63 | struct mmu_notifier, | |
64 | hlist); | |
65 | /* | |
66 | * We arrived before mmu_notifier_unregister so | |
67 | * mmu_notifier_unregister will do nothing other than | |
68 | * to wait ->release to finish and | |
69 | * mmu_notifier_unregister to return. | |
70 | */ | |
71 | hlist_del_init_rcu(&mn->hlist); | |
cddb8a5c AA |
72 | } |
73 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
74 | ||
75 | /* | |
21a92735 | 76 | * synchronize_srcu here prevents mmu_notifier_release to |
cddb8a5c AA |
77 | * return to exit_mmap (which would proceed freeing all pages |
78 | * in the mm) until the ->release method returns, if it was | |
79 | * invoked by mmu_notifier_unregister. | |
80 | * | |
81 | * The mmu_notifier_mm can't go away from under us because one | |
82 | * mm_count is hold by exit_mmap. | |
83 | */ | |
21a92735 | 84 | synchronize_srcu(&srcu); |
cddb8a5c AA |
85 | } |
86 | ||
87 | /* | |
88 | * If no young bitflag is supported by the hardware, ->clear_flush_young can | |
89 | * unmap the address and return 1 or 0 depending if the mapping previously | |
90 | * existed or not. | |
91 | */ | |
92 | int __mmu_notifier_clear_flush_young(struct mm_struct *mm, | |
93 | unsigned long address) | |
94 | { | |
95 | struct mmu_notifier *mn; | |
96 | struct hlist_node *n; | |
21a92735 | 97 | int young = 0, id; |
cddb8a5c | 98 | |
21a92735 | 99 | id = srcu_read_lock(&srcu); |
cddb8a5c AA |
100 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
101 | if (mn->ops->clear_flush_young) | |
102 | young |= mn->ops->clear_flush_young(mn, mm, address); | |
103 | } | |
21a92735 | 104 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
105 | |
106 | return young; | |
107 | } | |
108 | ||
8ee53820 AA |
109 | int __mmu_notifier_test_young(struct mm_struct *mm, |
110 | unsigned long address) | |
111 | { | |
112 | struct mmu_notifier *mn; | |
113 | struct hlist_node *n; | |
21a92735 | 114 | int young = 0, id; |
8ee53820 | 115 | |
21a92735 | 116 | id = srcu_read_lock(&srcu); |
8ee53820 AA |
117 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
118 | if (mn->ops->test_young) { | |
119 | young = mn->ops->test_young(mn, mm, address); | |
120 | if (young) | |
121 | break; | |
122 | } | |
123 | } | |
21a92735 | 124 | srcu_read_unlock(&srcu, id); |
8ee53820 AA |
125 | |
126 | return young; | |
127 | } | |
128 | ||
828502d3 IE |
129 | void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, |
130 | pte_t pte) | |
131 | { | |
132 | struct mmu_notifier *mn; | |
133 | struct hlist_node *n; | |
21a92735 | 134 | int id; |
828502d3 | 135 | |
21a92735 | 136 | id = srcu_read_lock(&srcu); |
828502d3 IE |
137 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
138 | if (mn->ops->change_pte) | |
139 | mn->ops->change_pte(mn, mm, address, pte); | |
828502d3 | 140 | } |
21a92735 | 141 | srcu_read_unlock(&srcu, id); |
828502d3 IE |
142 | } |
143 | ||
cddb8a5c AA |
144 | void __mmu_notifier_invalidate_page(struct mm_struct *mm, |
145 | unsigned long address) | |
146 | { | |
147 | struct mmu_notifier *mn; | |
148 | struct hlist_node *n; | |
21a92735 | 149 | int id; |
cddb8a5c | 150 | |
21a92735 | 151 | id = srcu_read_lock(&srcu); |
cddb8a5c AA |
152 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
153 | if (mn->ops->invalidate_page) | |
154 | mn->ops->invalidate_page(mn, mm, address); | |
155 | } | |
21a92735 | 156 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
157 | } |
158 | ||
159 | void __mmu_notifier_invalidate_range_start(struct mm_struct *mm, | |
160 | unsigned long start, unsigned long end) | |
161 | { | |
162 | struct mmu_notifier *mn; | |
163 | struct hlist_node *n; | |
21a92735 | 164 | int id; |
cddb8a5c | 165 | |
21a92735 | 166 | id = srcu_read_lock(&srcu); |
cddb8a5c AA |
167 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
168 | if (mn->ops->invalidate_range_start) | |
169 | mn->ops->invalidate_range_start(mn, mm, start, end); | |
170 | } | |
21a92735 | 171 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
172 | } |
173 | ||
174 | void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |
175 | unsigned long start, unsigned long end) | |
176 | { | |
177 | struct mmu_notifier *mn; | |
178 | struct hlist_node *n; | |
21a92735 | 179 | int id; |
cddb8a5c | 180 | |
21a92735 | 181 | id = srcu_read_lock(&srcu); |
cddb8a5c AA |
182 | hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist) { |
183 | if (mn->ops->invalidate_range_end) | |
184 | mn->ops->invalidate_range_end(mn, mm, start, end); | |
185 | } | |
21a92735 | 186 | srcu_read_unlock(&srcu, id); |
cddb8a5c AA |
187 | } |
188 | ||
189 | static int do_mmu_notifier_register(struct mmu_notifier *mn, | |
190 | struct mm_struct *mm, | |
191 | int take_mmap_sem) | |
192 | { | |
193 | struct mmu_notifier_mm *mmu_notifier_mm; | |
194 | int ret; | |
195 | ||
196 | BUG_ON(atomic_read(&mm->mm_users) <= 0); | |
197 | ||
21a92735 | 198 | /* |
35cfa2b0 GS |
199 | * Verify that mmu_notifier_init() already run and the global srcu is |
200 | * initialized. | |
201 | */ | |
21a92735 SG |
202 | BUG_ON(!srcu.per_cpu_ref); |
203 | ||
35cfa2b0 GS |
204 | ret = -ENOMEM; |
205 | mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); | |
206 | if (unlikely(!mmu_notifier_mm)) | |
207 | goto out; | |
208 | ||
cddb8a5c AA |
209 | if (take_mmap_sem) |
210 | down_write(&mm->mmap_sem); | |
211 | ret = mm_take_all_locks(mm); | |
212 | if (unlikely(ret)) | |
35cfa2b0 | 213 | goto out_clean; |
cddb8a5c AA |
214 | |
215 | if (!mm_has_notifiers(mm)) { | |
216 | INIT_HLIST_HEAD(&mmu_notifier_mm->list); | |
217 | spin_lock_init(&mmu_notifier_mm->lock); | |
e0f3c3f7 | 218 | |
cddb8a5c | 219 | mm->mmu_notifier_mm = mmu_notifier_mm; |
35cfa2b0 | 220 | mmu_notifier_mm = NULL; |
cddb8a5c AA |
221 | } |
222 | atomic_inc(&mm->mm_count); | |
223 | ||
224 | /* | |
225 | * Serialize the update against mmu_notifier_unregister. A | |
226 | * side note: mmu_notifier_release can't run concurrently with | |
227 | * us because we hold the mm_users pin (either implicitly as | |
228 | * current->mm or explicitly with get_task_mm() or similar). | |
229 | * We can't race against any other mmu notifier method either | |
230 | * thanks to mm_take_all_locks(). | |
231 | */ | |
232 | spin_lock(&mm->mmu_notifier_mm->lock); | |
233 | hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); | |
234 | spin_unlock(&mm->mmu_notifier_mm->lock); | |
235 | ||
236 | mm_drop_all_locks(mm); | |
35cfa2b0 | 237 | out_clean: |
cddb8a5c AA |
238 | if (take_mmap_sem) |
239 | up_write(&mm->mmap_sem); | |
35cfa2b0 GS |
240 | kfree(mmu_notifier_mm); |
241 | out: | |
cddb8a5c AA |
242 | BUG_ON(atomic_read(&mm->mm_users) <= 0); |
243 | return ret; | |
244 | } | |
245 | ||
246 | /* | |
247 | * Must not hold mmap_sem nor any other VM related lock when calling | |
248 | * this registration function. Must also ensure mm_users can't go down | |
249 | * to zero while this runs to avoid races with mmu_notifier_release, | |
250 | * so mm has to be current->mm or the mm should be pinned safely such | |
251 | * as with get_task_mm(). If the mm is not current->mm, the mm_users | |
252 | * pin should be released by calling mmput after mmu_notifier_register | |
253 | * returns. mmu_notifier_unregister must be always called to | |
254 | * unregister the notifier. mm_count is automatically pinned to allow | |
255 | * mmu_notifier_unregister to safely run at any time later, before or | |
256 | * after exit_mmap. ->release will always be called before exit_mmap | |
257 | * frees the pages. | |
258 | */ | |
259 | int mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
260 | { | |
261 | return do_mmu_notifier_register(mn, mm, 1); | |
262 | } | |
263 | EXPORT_SYMBOL_GPL(mmu_notifier_register); | |
264 | ||
265 | /* | |
266 | * Same as mmu_notifier_register but here the caller must hold the | |
267 | * mmap_sem in write mode. | |
268 | */ | |
269 | int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) | |
270 | { | |
271 | return do_mmu_notifier_register(mn, mm, 0); | |
272 | } | |
273 | EXPORT_SYMBOL_GPL(__mmu_notifier_register); | |
274 | ||
275 | /* this is called after the last mmu_notifier_unregister() returned */ | |
276 | void __mmu_notifier_mm_destroy(struct mm_struct *mm) | |
277 | { | |
278 | BUG_ON(!hlist_empty(&mm->mmu_notifier_mm->list)); | |
279 | kfree(mm->mmu_notifier_mm); | |
280 | mm->mmu_notifier_mm = LIST_POISON1; /* debug */ | |
281 | } | |
282 | ||
283 | /* | |
284 | * This releases the mm_count pin automatically and frees the mm | |
285 | * structure if it was the last user of it. It serializes against | |
21a92735 SG |
286 | * running mmu notifiers with SRCU and against mmu_notifier_unregister |
287 | * with the unregister lock + SRCU. All sptes must be dropped before | |
cddb8a5c AA |
288 | * calling mmu_notifier_unregister. ->release or any other notifier |
289 | * method may be invoked concurrently with mmu_notifier_unregister, | |
290 | * and only after mmu_notifier_unregister returned we're guaranteed | |
291 | * that ->release or any other method can't run anymore. | |
292 | */ | |
293 | void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) | |
294 | { | |
295 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
296 | ||
cddb8a5c | 297 | if (!hlist_unhashed(&mn->hlist)) { |
cddb8a5c | 298 | /* |
70400303 | 299 | * SRCU here will force exit_mmap to wait ->release to finish |
cddb8a5c AA |
300 | * before freeing the pages. |
301 | */ | |
21a92735 | 302 | int id; |
3ad3d901 | 303 | |
21a92735 | 304 | id = srcu_read_lock(&srcu); |
cddb8a5c AA |
305 | /* |
306 | * exit_mmap will block in mmu_notifier_release to | |
307 | * guarantee ->release is called before freeing the | |
308 | * pages. | |
309 | */ | |
310 | if (mn->ops->release) | |
311 | mn->ops->release(mn, mm); | |
21a92735 | 312 | srcu_read_unlock(&srcu, id); |
3ad3d901 XG |
313 | |
314 | spin_lock(&mm->mmu_notifier_mm->lock); | |
315 | hlist_del_rcu(&mn->hlist); | |
cddb8a5c | 316 | spin_unlock(&mm->mmu_notifier_mm->lock); |
3ad3d901 | 317 | } |
cddb8a5c AA |
318 | |
319 | /* | |
320 | * Wait any running method to finish, of course including | |
321 | * ->release if it was run by mmu_notifier_relase instead of us. | |
322 | */ | |
21a92735 | 323 | synchronize_srcu(&srcu); |
cddb8a5c AA |
324 | |
325 | BUG_ON(atomic_read(&mm->mm_count) <= 0); | |
326 | ||
327 | mmdrop(mm); | |
328 | } | |
329 | EXPORT_SYMBOL_GPL(mmu_notifier_unregister); | |
21a92735 SG |
330 | |
331 | static int __init mmu_notifier_init(void) | |
332 | { | |
333 | return init_srcu_struct(&srcu); | |
334 | } | |
335 | ||
336 | module_init(mmu_notifier_init); |