1 #ifndef _LINUX_MMU_NOTIFIER_H
2 #define _LINUX_MMU_NOTIFIER_H
4 #include <linux/list.h>
5 #include <linux/spinlock.h>
6 #include <linux/mm_types.h>
7 #include <linux/srcu.h>
10 struct mmu_notifier_ops;
12 #ifdef CONFIG_MMU_NOTIFIER
15 * The mmu notifier_mm structure is allocated and installed in
16 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
17 * critical section and it's released only when mm_count reaches zero
20 struct mmu_notifier_mm {
21 /* all mmu notifiers registerd in this mm are queued in this list */
22 struct hlist_head list;
23 /* to serialize the list modifications and hlist_unhashed */
27 struct mmu_notifier_ops {
29 * Called either by mmu_notifier_unregister or when the mm is
30 * being destroyed by exit_mmap, always before all pages are
31 * freed. This can run concurrently with other mmu notifier
32 * methods (the ones invoked outside the mm context) and it
33 * should tear down all secondary mmu mappings and freeze the
34 * secondary mmu. If this method isn't implemented you've to
35 * be sure that nothing could possibly write to the pages
36 * through the secondary mmu by the time the last thread with
37 * tsk->mm == mm exits.
39 * As side note: the pages freed after ->release returns could
40 * be immediately reallocated by the gart at an alias physical
41 * address with a different cache model, so if ->release isn't
42 * implemented because all _software_ driven memory accesses
43 * through the secondary mmu are terminated by the time the
44 * last thread of this mm quits, you've also to be sure that
45 * speculative _hardware_ operations can't allocate dirty
46 * cachelines in the cpu that could not be snooped and made
47 * coherent with the other read and write operations happening
48 * through the gart alias address, so leading to memory
51 void (*release)(struct mmu_notifier *mn,
52 struct mm_struct *mm);
55 * clear_flush_young is called after the VM is
56 * test-and-clearing the young/accessed bitflag in the
57 * pte. This way the VM will provide proper aging to the
58 * accesses to the page through the secondary MMUs and not
59 * only to the ones through the Linux pte.
60 * Start-end is necessary in case the secondary MMU is mapping the page
61 * at a smaller granularity than the primary MMU.
63 int (*clear_flush_young)(struct mmu_notifier *mn,
69 * clear_young is a lightweight version of clear_flush_young. Like the
70 * latter, it is supposed to test-and-clear the young/accessed bitflag
71 * in the secondary pte, but it may omit flushing the secondary tlb.
73 int (*clear_young)(struct mmu_notifier *mn,
79 * test_young is called to check the young/accessed bitflag in
80 * the secondary pte. This is used to know if the page is
81 * frequently used without actually clearing the flag or tearing
82 * down the secondary mapping on the page.
84 int (*test_young)(struct mmu_notifier *mn,
86 unsigned long address);
89 * change_pte is called in cases that pte mapping to page is changed:
90 * for example, when ksm remaps pte to point to a new shared page.
92 void (*change_pte)(struct mmu_notifier *mn,
94 unsigned long address,
98 * invalidate_range_start() and invalidate_range_end() must be
99 * paired and are called only when the mmap_sem and/or the
100 * locks protecting the reverse maps are held. If the subsystem
101 * can't guarantee that no additional references are taken to
102 * the pages in the range, it has to implement the
103 * invalidate_range() notifier to remove any references taken
104 * after invalidate_range_start().
106 * Invalidation of multiple concurrent ranges may be
107 * optionally permitted by the driver. Either way the
108 * establishment of sptes is forbidden in the range passed to
109 * invalidate_range_begin/end for the whole duration of the
110 * invalidate_range_begin/end critical section.
112 * invalidate_range_start() is called when all pages in the
113 * range are still mapped and have at least a refcount of one.
115 * invalidate_range_end() is called when all pages in the
116 * range have been unmapped and the pages have been freed by
119 * The VM will remove the page table entries and potentially
120 * the page between invalidate_range_start() and
121 * invalidate_range_end(). If the page must not be freed
122 * because of pending I/O or other circumstances then the
123 * invalidate_range_start() callback (or the initial mapping
124 * by the driver) must make sure that the refcount is kept
127 * If the driver increases the refcount when the pages are
128 * initially mapped into an address space then either
129 * invalidate_range_start() or invalidate_range_end() may
130 * decrease the refcount. If the refcount is decreased on
131 * invalidate_range_start() then the VM can free pages as page
132 * table entries are removed. If the refcount is only
133 * droppped on invalidate_range_end() then the driver itself
134 * will drop the last refcount but it must take care to flush
135 * any secondary tlb before doing the final free on the
136 * page. Pages will no longer be referenced by the linux
137 * address space but may still be referenced by sptes until
138 * the last refcount is dropped.
140 void (*invalidate_range_start)(struct mmu_notifier *mn,
141 struct mm_struct *mm,
142 unsigned long start, unsigned long end);
143 void (*invalidate_range_end)(struct mmu_notifier *mn,
144 struct mm_struct *mm,
145 unsigned long start, unsigned long end);
148 * invalidate_range() is either called between
149 * invalidate_range_start() and invalidate_range_end() when the
150 * VM has to free pages that where unmapped, but before the
151 * pages are actually freed, or outside of _start()/_end() when
152 * a (remote) TLB is necessary.
154 * If invalidate_range() is used to manage a non-CPU TLB with
155 * shared page-tables, it not necessary to implement the
156 * invalidate_range_start()/end() notifiers, as
157 * invalidate_range() alread catches the points in time when an
158 * external TLB range needs to be flushed.
160 * The invalidate_range() function is called under the ptl
161 * spin-lock and not allowed to sleep.
163 * Note that this function might be called with just a sub-range
164 * of what was passed to invalidate_range_start()/end(), if
165 * called between those functions.
167 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
168 unsigned long start, unsigned long end);
172 * The notifier chains are protected by mmap_sem and/or the reverse map
173 * semaphores. Notifier chains are only changed when all reverse maps and
174 * the mmap_sem locks are taken.
176 * Therefore notifier chains can only be traversed when either
178 * 1. mmap_sem is held.
179 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
180 * 3. No other concurrent thread can access the list (release)
182 struct mmu_notifier {
183 struct hlist_node hlist;
184 const struct mmu_notifier_ops *ops;
187 static inline int mm_has_notifiers(struct mm_struct *mm)
189 return unlikely(mm->mmu_notifier_mm);
192 extern int mmu_notifier_register(struct mmu_notifier *mn,
193 struct mm_struct *mm);
194 extern int __mmu_notifier_register(struct mmu_notifier *mn,
195 struct mm_struct *mm);
196 extern void mmu_notifier_unregister(struct mmu_notifier *mn,
197 struct mm_struct *mm);
198 extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
199 struct mm_struct *mm);
200 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
201 extern void __mmu_notifier_release(struct mm_struct *mm);
202 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
205 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
208 extern int __mmu_notifier_test_young(struct mm_struct *mm,
209 unsigned long address);
210 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
211 unsigned long address, pte_t pte);
212 extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
213 unsigned long start, unsigned long end);
214 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
215 unsigned long start, unsigned long end);
216 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
217 unsigned long start, unsigned long end);
219 static inline void mmu_notifier_release(struct mm_struct *mm)
221 if (mm_has_notifiers(mm))
222 __mmu_notifier_release(mm);
225 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
229 if (mm_has_notifiers(mm))
230 return __mmu_notifier_clear_flush_young(mm, start, end);
234 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
238 if (mm_has_notifiers(mm))
239 return __mmu_notifier_clear_young(mm, start, end);
243 static inline int mmu_notifier_test_young(struct mm_struct *mm,
244 unsigned long address)
246 if (mm_has_notifiers(mm))
247 return __mmu_notifier_test_young(mm, address);
251 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
252 unsigned long address, pte_t pte)
254 if (mm_has_notifiers(mm))
255 __mmu_notifier_change_pte(mm, address, pte);
258 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
259 unsigned long start, unsigned long end)
261 if (mm_has_notifiers(mm))
262 __mmu_notifier_invalidate_range_start(mm, start, end);
265 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
266 unsigned long start, unsigned long end)
268 if (mm_has_notifiers(mm))
269 __mmu_notifier_invalidate_range_end(mm, start, end);
272 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
273 unsigned long start, unsigned long end)
275 if (mm_has_notifiers(mm))
276 __mmu_notifier_invalidate_range(mm, start, end);
279 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
281 mm->mmu_notifier_mm = NULL;
284 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
286 if (mm_has_notifiers(mm))
287 __mmu_notifier_mm_destroy(mm);
290 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
293 struct vm_area_struct *___vma = __vma; \
294 unsigned long ___address = __address; \
295 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
296 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
303 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
306 struct vm_area_struct *___vma = __vma; \
307 unsigned long ___address = __address; \
308 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
309 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
316 #define ptep_clear_young_notify(__vma, __address, __ptep) \
319 struct vm_area_struct *___vma = __vma; \
320 unsigned long ___address = __address; \
321 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
322 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
323 ___address + PAGE_SIZE); \
327 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
330 struct vm_area_struct *___vma = __vma; \
331 unsigned long ___address = __address; \
332 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
333 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
334 ___address + PMD_SIZE); \
338 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
340 unsigned long ___addr = __address & PAGE_MASK; \
341 struct mm_struct *___mm = (__vma)->vm_mm; \
344 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
345 mmu_notifier_invalidate_range(___mm, ___addr, \
346 ___addr + PAGE_SIZE); \
351 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
353 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
354 struct mm_struct *___mm = (__vma)->vm_mm; \
357 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
358 mmu_notifier_invalidate_range(___mm, ___haddr, \
359 ___haddr + HPAGE_PMD_SIZE); \
364 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
366 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
367 struct mm_struct *___mm = (__vma)->vm_mm; \
370 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
371 mmu_notifier_invalidate_range(___mm, ___haddr, \
372 ___haddr + HPAGE_PUD_SIZE); \
378 * set_pte_at_notify() sets the pte _after_ running the notifier.
379 * This is safe to start by updating the secondary MMUs, because the primary MMU
380 * pte invalidate must have already happened with a ptep_clear_flush() before
381 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
382 * required when we change both the protection of the mapping from read-only to
383 * read-write and the pfn (like during copy on write page faults). Otherwise the
384 * old page would remain mapped readonly in the secondary MMUs after the new
385 * page is already writable by some CPU through the primary MMU.
387 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
389 struct mm_struct *___mm = __mm; \
390 unsigned long ___address = __address; \
391 pte_t ___pte = __pte; \
393 mmu_notifier_change_pte(___mm, ___address, ___pte); \
394 set_pte_at(___mm, ___address, __ptep, ___pte); \
397 extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
398 void (*func)(struct rcu_head *rcu));
399 extern void mmu_notifier_synchronize(void);
401 #else /* CONFIG_MMU_NOTIFIER */
403 static inline void mmu_notifier_release(struct mm_struct *mm)
407 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
414 static inline int mmu_notifier_test_young(struct mm_struct *mm,
415 unsigned long address)
420 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
421 unsigned long address, pte_t pte)
425 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
426 unsigned long start, unsigned long end)
430 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
431 unsigned long start, unsigned long end)
435 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
436 unsigned long start, unsigned long end)
440 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
444 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
448 #define ptep_clear_flush_young_notify ptep_clear_flush_young
449 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
450 #define ptep_clear_young_notify ptep_test_and_clear_young
451 #define pmdp_clear_young_notify pmdp_test_and_clear_young
452 #define ptep_clear_flush_notify ptep_clear_flush
453 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
454 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
455 #define set_pte_at_notify set_pte_at
457 #endif /* CONFIG_MMU_NOTIFIER */
459 #endif /* _LINUX_MMU_NOTIFIER_H */