1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef LINUX_MM_INLINE_H
3 #define LINUX_MM_INLINE_H
5 #include <linux/atomic.h>
6 #include <linux/huge_mm.h>
7 #include <linux/swap.h>
8 #include <linux/string.h>
9 #include <linux/userfaultfd_k.h>
10 #include <linux/swapops.h>
13 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
14 * @folio: The folio to test.
16 * We would like to get this info without a page flag, but the state
17 * needs to survive until the folio is last deleted from the LRU, which
18 * could be as far down as __page_cache_release.
20 * Return: An integer (not a boolean!) used to sort a folio onto the
21 * right LRU list and to account folios correctly.
22 * 1 if @folio is a regular filesystem backed page cache folio
23 * or a lazily freed anonymous folio (e.g. via MADV_FREE).
24 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
25 * ram or swap backed folio.
27 static inline int folio_is_file_lru(struct folio *folio)
29 return !folio_test_swapbacked(folio);
32 static inline int page_is_file_lru(struct page *page)
34 return folio_is_file_lru(page_folio(page));
37 static __always_inline void update_lru_size(struct lruvec *lruvec,
38 enum lru_list lru, enum zone_type zid,
41 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
43 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
44 __mod_zone_page_state(&pgdat->node_zones[zid],
45 NR_ZONE_LRU_BASE + lru, nr_pages);
47 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
52 * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
53 * @folio: The folio that was on lru and now has a zero reference.
55 static __always_inline void __folio_clear_lru_flags(struct folio *folio)
57 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
59 __folio_clear_lru(folio);
61 /* this shouldn't happen, so leave the flags to bad_page() */
62 if (folio_test_active(folio) && folio_test_unevictable(folio))
65 __folio_clear_active(folio);
66 __folio_clear_unevictable(folio);
69 static __always_inline void __clear_page_lru_flags(struct page *page)
71 __folio_clear_lru_flags(page_folio(page));
75 * folio_lru_list - Which LRU list should a folio be on?
76 * @folio: The folio to test.
78 * Return: The LRU list a folio should be on, as an index
79 * into the array of LRU lists.
81 static __always_inline enum lru_list folio_lru_list(struct folio *folio)
85 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
87 if (folio_test_unevictable(folio))
88 return LRU_UNEVICTABLE;
90 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
91 if (folio_test_active(folio))
97 static __always_inline
98 void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
100 enum lru_list lru = folio_lru_list(folio);
102 update_lru_size(lruvec, lru, folio_zonenum(folio),
103 folio_nr_pages(folio));
104 if (lru != LRU_UNEVICTABLE)
105 list_add(&folio->lru, &lruvec->lists[lru]);
108 static __always_inline void add_page_to_lru_list(struct page *page,
109 struct lruvec *lruvec)
111 lruvec_add_folio(lruvec, page_folio(page));
114 static __always_inline
115 void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
117 enum lru_list lru = folio_lru_list(folio);
119 update_lru_size(lruvec, lru, folio_zonenum(folio),
120 folio_nr_pages(folio));
121 /* This is not expected to be used on LRU_UNEVICTABLE */
122 list_add_tail(&folio->lru, &lruvec->lists[lru]);
125 static __always_inline void add_page_to_lru_list_tail(struct page *page,
126 struct lruvec *lruvec)
128 lruvec_add_folio_tail(lruvec, page_folio(page));
131 static __always_inline
132 void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
134 enum lru_list lru = folio_lru_list(folio);
136 if (lru != LRU_UNEVICTABLE)
137 list_del(&folio->lru);
138 update_lru_size(lruvec, lru, folio_zonenum(folio),
139 -folio_nr_pages(folio));
142 static __always_inline void del_page_from_lru_list(struct page *page,
143 struct lruvec *lruvec)
145 lruvec_del_folio(lruvec, page_folio(page));
148 #ifdef CONFIG_ANON_VMA_NAME
150 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
151 * either keep holding the lock while using the returned pointer or it should
152 * raise anon_vma_name refcount before releasing the lock.
154 extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
155 extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
156 extern void anon_vma_name_free(struct kref *kref);
158 /* mmap_lock should be read-locked */
159 static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
162 kref_get(&anon_name->kref);
165 static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
168 kref_put(&anon_name->kref, anon_vma_name_free);
172 struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
174 /* Prevent anon_name refcount saturation early on */
175 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
176 anon_vma_name_get(anon_name);
180 return anon_vma_name_alloc(anon_name->name);
183 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
184 struct vm_area_struct *new_vma)
186 struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
189 new_vma->anon_name = anon_vma_name_reuse(anon_name);
192 static inline void free_anon_vma_name(struct vm_area_struct *vma)
195 * Not using anon_vma_name because it generates a warning if mmap_lock
196 * is not held, which might be the case here.
199 anon_vma_name_put(vma->anon_name);
202 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
203 struct anon_vma_name *anon_name2)
205 if (anon_name1 == anon_name2)
208 return anon_name1 && anon_name2 &&
209 !strcmp(anon_name1->name, anon_name2->name);
212 #else /* CONFIG_ANON_VMA_NAME */
213 static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
218 static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
223 static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
224 static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
225 static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
226 struct vm_area_struct *new_vma) {}
227 static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
229 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
230 struct anon_vma_name *anon_name2)
235 #endif /* CONFIG_ANON_VMA_NAME */
237 static inline void init_tlb_flush_pending(struct mm_struct *mm)
239 atomic_set(&mm->tlb_flush_pending, 0);
242 static inline void inc_tlb_flush_pending(struct mm_struct *mm)
244 atomic_inc(&mm->tlb_flush_pending);
246 * The only time this value is relevant is when there are indeed pages
247 * to flush. And we'll only flush pages after changing them, which
250 * So the ordering here is:
252 * atomic_inc(&mm->tlb_flush_pending);
259 * mm_tlb_flush_pending();
264 * atomic_dec(&mm->tlb_flush_pending);
266 * Where the increment if constrained by the PTL unlock, it thus
267 * ensures that the increment is visible if the PTE modification is
268 * visible. After all, if there is no PTE modification, nobody cares
269 * about TLB flushes either.
271 * This very much relies on users (mm_tlb_flush_pending() and
272 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
273 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
274 * locks (PPC) the unlock of one doesn't order against the lock of
277 * The decrement is ordered by the flush_tlb_range(), such that
278 * mm_tlb_flush_pending() will not return false unless all flushes have
283 static inline void dec_tlb_flush_pending(struct mm_struct *mm)
286 * See inc_tlb_flush_pending().
288 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
289 * not order against TLB invalidate completion, which is what we need.
291 * Therefore we must rely on tlb_flush_*() to guarantee order.
293 atomic_dec(&mm->tlb_flush_pending);
296 static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
299 * Must be called after having acquired the PTL; orders against that
300 * PTLs release and therefore ensures that if we observe the modified
301 * PTE we must also observe the increment from inc_tlb_flush_pending().
303 * That is, it only guarantees to return true if there is a flush
304 * pending for _this_ PTL.
306 return atomic_read(&mm->tlb_flush_pending);
309 static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
312 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
313 * for which there is a TLB flush pending in order to guarantee
314 * we've seen both that PTE modification and the increment.
316 * (no requirement on actually still holding the PTL, that is irrelevant)
318 return atomic_read(&mm->tlb_flush_pending) > 1;
322 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
323 * replace a none pte. NOTE! This should only be called when *pte is already
324 * cleared so we will never accidentally replace something valuable. Meanwhile
325 * none pte also means we are not demoting the pte so tlb flushed is not needed.
326 * E.g., when pte cleared the caller should have taken care of the tlb flush.
328 * Must be called with pgtable lock held so that no thread will see the none
329 * pte, and if they see it, they'll fault and serialize at the pgtable lock.
331 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
334 pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
335 pte_t *pte, pte_t pteval)
337 #ifdef CONFIG_PTE_MARKER_UFFD_WP
338 bool arm_uffd_pte = false;
340 /* The current status of the pte should be "cleared" before calling */
341 WARN_ON_ONCE(!pte_none(*pte));
343 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
346 /* A uffd-wp wr-protected normal pte */
347 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
351 * A uffd-wp wr-protected swap pte. Note: this should even cover an
352 * existing pte marker with uffd-wp bit set.
354 if (unlikely(pte_swp_uffd_wp_any(pteval)))
357 if (unlikely(arm_uffd_pte))
358 set_pte_at(vma->vm_mm, addr, pte,
359 make_pte_marker(PTE_MARKER_UFFD_WP));