1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MMU_NOTIFIER_H
3 #define _LINUX_MMU_NOTIFIER_H
5 #include <linux/list.h>
6 #include <linux/spinlock.h>
7 #include <linux/mm_types.h>
8 #include <linux/srcu.h>
11 struct mmu_notifier_ops;
13 #ifdef CONFIG_MMU_NOTIFIER
16 * The mmu notifier_mm structure is allocated and installed in
17 * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
18 * critical section and it's released only when mm_count reaches zero
21 struct mmu_notifier_mm {
22 /* all mmu notifiers registerd in this mm are queued in this list */
23 struct hlist_head list;
24 /* to serialize the list modifications and hlist_unhashed */
28 struct mmu_notifier_ops {
30 * Called either by mmu_notifier_unregister or when the mm is
31 * being destroyed by exit_mmap, always before all pages are
32 * freed. This can run concurrently with other mmu notifier
33 * methods (the ones invoked outside the mm context) and it
34 * should tear down all secondary mmu mappings and freeze the
35 * secondary mmu. If this method isn't implemented you've to
36 * be sure that nothing could possibly write to the pages
37 * through the secondary mmu by the time the last thread with
38 * tsk->mm == mm exits.
40 * As side note: the pages freed after ->release returns could
41 * be immediately reallocated by the gart at an alias physical
42 * address with a different cache model, so if ->release isn't
43 * implemented because all _software_ driven memory accesses
44 * through the secondary mmu are terminated by the time the
45 * last thread of this mm quits, you've also to be sure that
46 * speculative _hardware_ operations can't allocate dirty
47 * cachelines in the cpu that could not be snooped and made
48 * coherent with the other read and write operations happening
49 * through the gart alias address, so leading to memory
52 void (*release)(struct mmu_notifier *mn,
53 struct mm_struct *mm);
56 * clear_flush_young is called after the VM is
57 * test-and-clearing the young/accessed bitflag in the
58 * pte. This way the VM will provide proper aging to the
59 * accesses to the page through the secondary MMUs and not
60 * only to the ones through the Linux pte.
61 * Start-end is necessary in case the secondary MMU is mapping the page
62 * at a smaller granularity than the primary MMU.
64 int (*clear_flush_young)(struct mmu_notifier *mn,
70 * clear_young is a lightweight version of clear_flush_young. Like the
71 * latter, it is supposed to test-and-clear the young/accessed bitflag
72 * in the secondary pte, but it may omit flushing the secondary tlb.
74 int (*clear_young)(struct mmu_notifier *mn,
80 * test_young is called to check the young/accessed bitflag in
81 * the secondary pte. This is used to know if the page is
82 * frequently used without actually clearing the flag or tearing
83 * down the secondary mapping on the page.
85 int (*test_young)(struct mmu_notifier *mn,
87 unsigned long address);
90 * change_pte is called in cases that pte mapping to page is changed:
91 * for example, when ksm remaps pte to point to a new shared page.
93 void (*change_pte)(struct mmu_notifier *mn,
95 unsigned long address,
99 * invalidate_range_start() and invalidate_range_end() must be
100 * paired and are called only when the mmap_sem and/or the
101 * locks protecting the reverse maps are held. If the subsystem
102 * can't guarantee that no additional references are taken to
103 * the pages in the range, it has to implement the
104 * invalidate_range() notifier to remove any references taken
105 * after invalidate_range_start().
107 * Invalidation of multiple concurrent ranges may be
108 * optionally permitted by the driver. Either way the
109 * establishment of sptes is forbidden in the range passed to
110 * invalidate_range_begin/end for the whole duration of the
111 * invalidate_range_begin/end critical section.
113 * invalidate_range_start() is called when all pages in the
114 * range are still mapped and have at least a refcount of one.
116 * invalidate_range_end() is called when all pages in the
117 * range have been unmapped and the pages have been freed by
120 * The VM will remove the page table entries and potentially
121 * the page between invalidate_range_start() and
122 * invalidate_range_end(). If the page must not be freed
123 * because of pending I/O or other circumstances then the
124 * invalidate_range_start() callback (or the initial mapping
125 * by the driver) must make sure that the refcount is kept
128 * If the driver increases the refcount when the pages are
129 * initially mapped into an address space then either
130 * invalidate_range_start() or invalidate_range_end() may
131 * decrease the refcount. If the refcount is decreased on
132 * invalidate_range_start() then the VM can free pages as page
133 * table entries are removed. If the refcount is only
134 * droppped on invalidate_range_end() then the driver itself
135 * will drop the last refcount but it must take care to flush
136 * any secondary tlb before doing the final free on the
137 * page. Pages will no longer be referenced by the linux
138 * address space but may still be referenced by sptes until
139 * the last refcount is dropped.
141 * If blockable argument is set to false then the callback cannot
142 * sleep and has to return with -EAGAIN. 0 should be returned
143 * otherwise. Please note that if invalidate_range_start approves
144 * a non-blocking behavior then the same applies to
145 * invalidate_range_end.
148 int (*invalidate_range_start)(struct mmu_notifier *mn,
149 struct mm_struct *mm,
150 unsigned long start, unsigned long end,
152 void (*invalidate_range_end)(struct mmu_notifier *mn,
153 struct mm_struct *mm,
154 unsigned long start, unsigned long end);
157 * invalidate_range() is either called between
158 * invalidate_range_start() and invalidate_range_end() when the
159 * VM has to free pages that where unmapped, but before the
160 * pages are actually freed, or outside of _start()/_end() when
161 * a (remote) TLB is necessary.
163 * If invalidate_range() is used to manage a non-CPU TLB with
164 * shared page-tables, it not necessary to implement the
165 * invalidate_range_start()/end() notifiers, as
166 * invalidate_range() alread catches the points in time when an
167 * external TLB range needs to be flushed. For more in depth
168 * discussion on this see Documentation/vm/mmu_notifier.rst
170 * Note that this function might be called with just a sub-range
171 * of what was passed to invalidate_range_start()/end(), if
172 * called between those functions.
174 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
175 unsigned long start, unsigned long end);
179 * The notifier chains are protected by mmap_sem and/or the reverse map
180 * semaphores. Notifier chains are only changed when all reverse maps and
181 * the mmap_sem locks are taken.
183 * Therefore notifier chains can only be traversed when either
185 * 1. mmap_sem is held.
186 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
187 * 3. No other concurrent thread can access the list (release)
189 struct mmu_notifier {
190 struct hlist_node hlist;
191 const struct mmu_notifier_ops *ops;
194 static inline int mm_has_notifiers(struct mm_struct *mm)
196 return unlikely(mm->mmu_notifier_mm);
199 extern int mmu_notifier_register(struct mmu_notifier *mn,
200 struct mm_struct *mm);
201 extern int __mmu_notifier_register(struct mmu_notifier *mn,
202 struct mm_struct *mm);
203 extern void mmu_notifier_unregister(struct mmu_notifier *mn,
204 struct mm_struct *mm);
205 extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
206 struct mm_struct *mm);
207 extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
208 extern void __mmu_notifier_release(struct mm_struct *mm);
209 extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
212 extern int __mmu_notifier_clear_young(struct mm_struct *mm,
215 extern int __mmu_notifier_test_young(struct mm_struct *mm,
216 unsigned long address);
217 extern void __mmu_notifier_change_pte(struct mm_struct *mm,
218 unsigned long address, pte_t pte);
219 extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
220 unsigned long start, unsigned long end,
222 extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
223 unsigned long start, unsigned long end,
225 extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
226 unsigned long start, unsigned long end);
228 static inline void mmu_notifier_release(struct mm_struct *mm)
230 if (mm_has_notifiers(mm))
231 __mmu_notifier_release(mm);
234 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
238 if (mm_has_notifiers(mm))
239 return __mmu_notifier_clear_flush_young(mm, start, end);
243 static inline int mmu_notifier_clear_young(struct mm_struct *mm,
247 if (mm_has_notifiers(mm))
248 return __mmu_notifier_clear_young(mm, start, end);
252 static inline int mmu_notifier_test_young(struct mm_struct *mm,
253 unsigned long address)
255 if (mm_has_notifiers(mm))
256 return __mmu_notifier_test_young(mm, address);
260 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
261 unsigned long address, pte_t pte)
263 if (mm_has_notifiers(mm))
264 __mmu_notifier_change_pte(mm, address, pte);
267 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
268 unsigned long start, unsigned long end)
270 if (mm_has_notifiers(mm))
271 __mmu_notifier_invalidate_range_start(mm, start, end, true);
274 static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
275 unsigned long start, unsigned long end)
277 if (mm_has_notifiers(mm))
278 return __mmu_notifier_invalidate_range_start(mm, start, end, false);
282 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
283 unsigned long start, unsigned long end)
285 if (mm_has_notifiers(mm))
286 __mmu_notifier_invalidate_range_end(mm, start, end, false);
289 static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
290 unsigned long start, unsigned long end)
292 if (mm_has_notifiers(mm))
293 __mmu_notifier_invalidate_range_end(mm, start, end, true);
296 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
297 unsigned long start, unsigned long end)
299 if (mm_has_notifiers(mm))
300 __mmu_notifier_invalidate_range(mm, start, end);
303 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
305 mm->mmu_notifier_mm = NULL;
308 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
310 if (mm_has_notifiers(mm))
311 __mmu_notifier_mm_destroy(mm);
314 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
317 struct vm_area_struct *___vma = __vma; \
318 unsigned long ___address = __address; \
319 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
320 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
327 #define pmdp_clear_flush_young_notify(__vma, __address, __pmdp) \
330 struct vm_area_struct *___vma = __vma; \
331 unsigned long ___address = __address; \
332 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
333 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
340 #define ptep_clear_young_notify(__vma, __address, __ptep) \
343 struct vm_area_struct *___vma = __vma; \
344 unsigned long ___address = __address; \
345 __young = ptep_test_and_clear_young(___vma, ___address, __ptep);\
346 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
347 ___address + PAGE_SIZE); \
351 #define pmdp_clear_young_notify(__vma, __address, __pmdp) \
354 struct vm_area_struct *___vma = __vma; \
355 unsigned long ___address = __address; \
356 __young = pmdp_test_and_clear_young(___vma, ___address, __pmdp);\
357 __young |= mmu_notifier_clear_young(___vma->vm_mm, ___address, \
358 ___address + PMD_SIZE); \
362 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
364 unsigned long ___addr = __address & PAGE_MASK; \
365 struct mm_struct *___mm = (__vma)->vm_mm; \
368 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
369 mmu_notifier_invalidate_range(___mm, ___addr, \
370 ___addr + PAGE_SIZE); \
375 #define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
377 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
378 struct mm_struct *___mm = (__vma)->vm_mm; \
381 ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
382 mmu_notifier_invalidate_range(___mm, ___haddr, \
383 ___haddr + HPAGE_PMD_SIZE); \
388 #define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
390 unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
391 struct mm_struct *___mm = (__vma)->vm_mm; \
394 ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
395 mmu_notifier_invalidate_range(___mm, ___haddr, \
396 ___haddr + HPAGE_PUD_SIZE); \
402 * set_pte_at_notify() sets the pte _after_ running the notifier.
403 * This is safe to start by updating the secondary MMUs, because the primary MMU
404 * pte invalidate must have already happened with a ptep_clear_flush() before
405 * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is
406 * required when we change both the protection of the mapping from read-only to
407 * read-write and the pfn (like during copy on write page faults). Otherwise the
408 * old page would remain mapped readonly in the secondary MMUs after the new
409 * page is already writable by some CPU through the primary MMU.
411 #define set_pte_at_notify(__mm, __address, __ptep, __pte) \
413 struct mm_struct *___mm = __mm; \
414 unsigned long ___address = __address; \
415 pte_t ___pte = __pte; \
417 mmu_notifier_change_pte(___mm, ___address, ___pte); \
418 set_pte_at(___mm, ___address, __ptep, ___pte); \
421 extern void mmu_notifier_call_srcu(struct rcu_head *rcu,
422 void (*func)(struct rcu_head *rcu));
423 extern void mmu_notifier_synchronize(void);
425 #else /* CONFIG_MMU_NOTIFIER */
427 static inline int mm_has_notifiers(struct mm_struct *mm)
432 static inline void mmu_notifier_release(struct mm_struct *mm)
436 static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
443 static inline int mmu_notifier_test_young(struct mm_struct *mm,
444 unsigned long address)
449 static inline void mmu_notifier_change_pte(struct mm_struct *mm,
450 unsigned long address, pte_t pte)
454 static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
455 unsigned long start, unsigned long end)
459 static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
460 unsigned long start, unsigned long end)
465 static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
466 unsigned long start, unsigned long end)
470 static inline void mmu_notifier_invalidate_range_only_end(struct mm_struct *mm,
471 unsigned long start, unsigned long end)
475 static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
476 unsigned long start, unsigned long end)
480 static inline void mmu_notifier_mm_init(struct mm_struct *mm)
484 static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
488 #define ptep_clear_flush_young_notify ptep_clear_flush_young
489 #define pmdp_clear_flush_young_notify pmdp_clear_flush_young
490 #define ptep_clear_young_notify ptep_test_and_clear_young
491 #define pmdp_clear_young_notify pmdp_test_and_clear_young
492 #define ptep_clear_flush_notify ptep_clear_flush
493 #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
494 #define pudp_huge_clear_flush_notify pudp_huge_clear_flush
495 #define set_pte_at_notify set_pte_at
497 #endif /* CONFIG_MMU_NOTIFIER */
499 #endif /* _LINUX_MMU_NOTIFIER_H */