Merge tag 'omap-fixes-audio-clock-and-modem-signed' of git://git.kernel.org/pub/scm...
[linux-2.6-block.git] / include / linux / mm_inline.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
b2e18538
RR
2#ifndef LINUX_MM_INLINE_H
3#define LINUX_MM_INLINE_H
4
36090def 5#include <linux/atomic.h>
2c888cfb 6#include <linux/huge_mm.h>
6e543d57 7#include <linux/swap.h>
17fca131 8#include <linux/string.h>
999dad82
PX
9#include <linux/userfaultfd_k.h>
10#include <linux/swapops.h>
2c888cfb 11
b2e18538 12/**
889a3747
MWO
13 * folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
14 * @folio: The folio to test.
b2e18538
RR
15 *
16 * We would like to get this info without a page flag, but the state
889a3747 17 * needs to survive until the folio is last deleted from the LRU, which
b2e18538 18 * could be as far down as __page_cache_release.
889a3747
MWO
19 *
20 * Return: An integer (not a boolean!) used to sort a folio onto the
21 * right LRU list and to account folios correctly.
22 * 1 if @folio is a regular filesystem backed page cache folio
23 * or a lazily freed anonymous folio (e.g. via MADV_FREE).
24 * 0 if @folio is a normal anonymous folio, a tmpfs folio or otherwise
25 * ram or swap backed folio.
b2e18538 26 */
889a3747
MWO
27static inline int folio_is_file_lru(struct folio *folio)
28{
29 return !folio_test_swapbacked(folio);
30}
31
9de4f22a 32static inline int page_is_file_lru(struct page *page)
b2e18538 33{
889a3747 34 return folio_is_file_lru(page_folio(page));
b2e18538
RR
35}
36
aa1b6790 37static __always_inline void __update_lru_size(struct lruvec *lruvec,
599d0c95 38 enum lru_list lru, enum zone_type zid,
889a3747 39 long nr_pages)
9d5e6a9f 40{
599d0c95
MG
41 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
42
ec1c86b2
YZ
43 lockdep_assert_held(&lruvec->lru_lock);
44 WARN_ON_ONCE(nr_pages != (int)nr_pages);
45
e0ee0e71 46 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
71c799f4
MK
47 __mod_zone_page_state(&pgdat->node_zones[zid],
48 NR_ZONE_LRU_BASE + lru, nr_pages);
aa1b6790
YZ
49}
50
51static __always_inline void update_lru_size(struct lruvec *lruvec,
52 enum lru_list lru, enum zone_type zid,
53 long nr_pages)
54{
55 __update_lru_size(lruvec, lru, zid, nr_pages);
7ee36a14 56#ifdef CONFIG_MEMCG
b4536f0c 57 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
9d5e6a9f
HD
58#endif
59}
60
1c1c53d4 61/**
889a3747
MWO
62 * __folio_clear_lru_flags - Clear page lru flags before releasing a page.
63 * @folio: The folio that was on lru and now has a zero reference.
1c1c53d4 64 */
889a3747 65static __always_inline void __folio_clear_lru_flags(struct folio *folio)
1da177e4 66{
889a3747 67 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
bc711271 68
889a3747 69 __folio_clear_lru(folio);
87560179
YZ
70
71 /* this shouldn't happen, so leave the flags to bad_page() */
889a3747 72 if (folio_test_active(folio) && folio_test_unevictable(folio))
87560179 73 return;
b69408e8 74
889a3747
MWO
75 __folio_clear_active(folio);
76 __folio_clear_unevictable(folio);
77}
78
b69408e8 79/**
889a3747
MWO
80 * folio_lru_list - Which LRU list should a folio be on?
81 * @folio: The folio to test.
b69408e8 82 *
889a3747 83 * Return: The LRU list a folio should be on, as an index
b69408e8
CL
84 * into the array of LRU lists.
85 */
889a3747 86static __always_inline enum lru_list folio_lru_list(struct folio *folio)
b69408e8 87{
401a8e1c 88 enum lru_list lru;
b69408e8 89
889a3747 90 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
bc711271 91
889a3747 92 if (folio_test_unevictable(folio))
c1770e34
YZ
93 return LRU_UNEVICTABLE;
94
889a3747
MWO
95 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
96 if (folio_test_active(folio))
c1770e34
YZ
97 lru += LRU_ACTIVE;
98
b69408e8
CL
99 return lru;
100}
f90d8191 101
ec1c86b2
YZ
102#ifdef CONFIG_LRU_GEN
103
354ed597 104#ifdef CONFIG_LRU_GEN_ENABLED
ec1c86b2
YZ
105static inline bool lru_gen_enabled(void)
106{
354ed597
YZ
107 DECLARE_STATIC_KEY_TRUE(lru_gen_caps[NR_LRU_GEN_CAPS]);
108
109 return static_branch_likely(&lru_gen_caps[LRU_GEN_CORE]);
ec1c86b2 110}
354ed597
YZ
111#else
112static inline bool lru_gen_enabled(void)
113{
114 DECLARE_STATIC_KEY_FALSE(lru_gen_caps[NR_LRU_GEN_CAPS]);
115
116 return static_branch_unlikely(&lru_gen_caps[LRU_GEN_CORE]);
117}
118#endif
ec1c86b2
YZ
119
120static inline bool lru_gen_in_fault(void)
121{
122 return current->in_lru_fault;
123}
124
125static inline int lru_gen_from_seq(unsigned long seq)
126{
127 return seq % MAX_NR_GENS;
128}
129
ac35a490
YZ
130static inline int lru_hist_from_seq(unsigned long seq)
131{
132 return seq % NR_HIST_GENS;
133}
134
135static inline int lru_tier_from_refs(int refs)
136{
137 VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH));
138
139 /* see the comment in folio_lru_refs() */
140 return order_base_2(refs + 1);
141}
142
143static inline int folio_lru_refs(struct folio *folio)
144{
145 unsigned long flags = READ_ONCE(folio->flags);
146 bool workingset = flags & BIT(PG_workingset);
147
148 /*
149 * Return the number of accesses beyond PG_referenced, i.e., N-1 if the
150 * total number of accesses is N>1, since N=0,1 both map to the first
151 * tier. lru_tier_from_refs() will account for this off-by-one. Also see
152 * the comment on MAX_NR_TIERS.
153 */
154 return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset;
155}
156
ec1c86b2
YZ
157static inline int folio_lru_gen(struct folio *folio)
158{
159 unsigned long flags = READ_ONCE(folio->flags);
160
161 return ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
162}
163
164static inline bool lru_gen_is_active(struct lruvec *lruvec, int gen)
165{
166 unsigned long max_seq = lruvec->lrugen.max_seq;
167
168 VM_WARN_ON_ONCE(gen >= MAX_NR_GENS);
169
170 /* see the comment on MIN_NR_GENS */
171 return gen == lru_gen_from_seq(max_seq) || gen == lru_gen_from_seq(max_seq - 1);
172}
173
174static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *folio,
175 int old_gen, int new_gen)
176{
177 int type = folio_is_file_lru(folio);
178 int zone = folio_zonenum(folio);
179 int delta = folio_nr_pages(folio);
180 enum lru_list lru = type * LRU_INACTIVE_FILE;
391655fe 181 struct lru_gen_folio *lrugen = &lruvec->lrugen;
ec1c86b2
YZ
182
183 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS);
184 VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS);
185 VM_WARN_ON_ONCE(old_gen == -1 && new_gen == -1);
186
187 if (old_gen >= 0)
188 WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
189 lrugen->nr_pages[old_gen][type][zone] - delta);
190 if (new_gen >= 0)
191 WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
192 lrugen->nr_pages[new_gen][type][zone] + delta);
193
194 /* addition */
195 if (old_gen < 0) {
196 if (lru_gen_is_active(lruvec, new_gen))
197 lru += LRU_ACTIVE;
198 __update_lru_size(lruvec, lru, zone, delta);
199 return;
200 }
201
202 /* deletion */
203 if (new_gen < 0) {
204 if (lru_gen_is_active(lruvec, old_gen))
205 lru += LRU_ACTIVE;
206 __update_lru_size(lruvec, lru, zone, -delta);
207 return;
208 }
ac35a490
YZ
209
210 /* promotion */
211 if (!lru_gen_is_active(lruvec, old_gen) && lru_gen_is_active(lruvec, new_gen)) {
212 __update_lru_size(lruvec, lru, zone, -delta);
213 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
214 }
215
216 /* demotion requires isolation, e.g., lru_deactivate_fn() */
217 VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen));
ec1c86b2
YZ
218}
219
220static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
221{
222 unsigned long seq;
223 unsigned long flags;
224 int gen = folio_lru_gen(folio);
225 int type = folio_is_file_lru(folio);
226 int zone = folio_zonenum(folio);
391655fe 227 struct lru_gen_folio *lrugen = &lruvec->lrugen;
ec1c86b2
YZ
228
229 VM_WARN_ON_ONCE_FOLIO(gen != -1, folio);
230
354ed597 231 if (folio_test_unevictable(folio) || !lrugen->enabled)
ec1c86b2
YZ
232 return false;
233 /*
234 * There are three common cases for this page:
235 * 1. If it's hot, e.g., freshly faulted in or previously hot and
236 * migrated, add it to the youngest generation.
237 * 2. If it's cold but can't be evicted immediately, i.e., an anon page
238 * not in swapcache or a dirty page pending writeback, add it to the
239 * second oldest generation.
240 * 3. Everything else (clean, cold) is added to the oldest generation.
241 */
242 if (folio_test_active(folio))
243 seq = lrugen->max_seq;
244 else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) ||
245 (folio_test_reclaim(folio) &&
246 (folio_test_dirty(folio) || folio_test_writeback(folio))))
247 seq = lrugen->min_seq[type] + 1;
248 else
249 seq = lrugen->min_seq[type];
250
251 gen = lru_gen_from_seq(seq);
252 flags = (gen + 1UL) << LRU_GEN_PGOFF;
253 /* see the comment on MIN_NR_GENS about PG_active */
254 set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags);
255
256 lru_gen_update_size(lruvec, folio, -1, gen);
257 /* for folio_rotate_reclaimable() */
258 if (reclaiming)
6df1b221 259 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
ec1c86b2 260 else
6df1b221 261 list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
ec1c86b2
YZ
262
263 return true;
264}
265
266static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
267{
268 unsigned long flags;
269 int gen = folio_lru_gen(folio);
270
271 if (gen < 0)
272 return false;
273
274 VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio);
275 VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio);
276
277 /* for folio_migrate_flags() */
278 flags = !reclaiming && lru_gen_is_active(lruvec, gen) ? BIT(PG_active) : 0;
279 flags = set_mask_bits(&folio->flags, LRU_GEN_MASK, flags);
280 gen = ((flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1;
281
282 lru_gen_update_size(lruvec, folio, gen, -1);
283 list_del(&folio->lru);
284
285 return true;
286}
287
288#else /* !CONFIG_LRU_GEN */
289
290static inline bool lru_gen_enabled(void)
291{
292 return false;
293}
294
295static inline bool lru_gen_in_fault(void)
296{
297 return false;
298}
299
300static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
301{
302 return false;
303}
304
305static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming)
306{
307 return false;
308}
309
310#endif /* CONFIG_LRU_GEN */
311
889a3747
MWO
312static __always_inline
313void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
314{
315 enum lru_list lru = folio_lru_list(folio);
316
ec1c86b2
YZ
317 if (lru_gen_add_folio(lruvec, folio, false))
318 return;
319
889a3747
MWO
320 update_lru_size(lruvec, lru, folio_zonenum(folio),
321 folio_nr_pages(folio));
07ca7606
HD
322 if (lru != LRU_UNEVICTABLE)
323 list_add(&folio->lru, &lruvec->lists[lru]);
889a3747
MWO
324}
325
889a3747
MWO
326static __always_inline
327void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
328{
329 enum lru_list lru = folio_lru_list(folio);
330
ec1c86b2
YZ
331 if (lru_gen_add_folio(lruvec, folio, true))
332 return;
333
889a3747
MWO
334 update_lru_size(lruvec, lru, folio_zonenum(folio),
335 folio_nr_pages(folio));
07ca7606 336 /* This is not expected to be used on LRU_UNEVICTABLE */
889a3747 337 list_add_tail(&folio->lru, &lruvec->lists[lru]);
f90d8191
YZ
338}
339
889a3747
MWO
340static __always_inline
341void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
342{
07ca7606
HD
343 enum lru_list lru = folio_lru_list(folio);
344
ec1c86b2
YZ
345 if (lru_gen_del_folio(lruvec, folio, false))
346 return;
347
07ca7606
HD
348 if (lru != LRU_UNEVICTABLE)
349 list_del(&folio->lru);
350 update_lru_size(lruvec, lru, folio_zonenum(folio),
889a3747 351 -folio_nr_pages(folio));
f90d8191
YZ
352}
353
17fca131
AB
354#ifdef CONFIG_ANON_VMA_NAME
355/*
5c26f6ac
SB
356 * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
357 * either keep holding the lock while using the returned pointer or it should
358 * raise anon_vma_name refcount before releasing the lock.
17fca131 359 */
5c26f6ac
SB
360extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
361extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
362extern void anon_vma_name_free(struct kref *kref);
17fca131 363
5c26f6ac
SB
364/* mmap_lock should be read-locked */
365static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
366{
367 if (anon_name)
368 kref_get(&anon_name->kref);
369}
17fca131 370
5c26f6ac
SB
371static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
372{
373 if (anon_name)
374 kref_put(&anon_name->kref, anon_vma_name_free);
375}
17fca131 376
96403e11
SB
377static inline
378struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
379{
380 /* Prevent anon_name refcount saturation early on */
381 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
382 anon_vma_name_get(anon_name);
383 return anon_name;
384
385 }
386 return anon_vma_name_alloc(anon_name->name);
387}
388
5c26f6ac
SB
389static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
390 struct vm_area_struct *new_vma)
17fca131 391{
5c26f6ac 392 struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
17fca131 393
96403e11
SB
394 if (anon_name)
395 new_vma->anon_name = anon_vma_name_reuse(anon_name);
5c26f6ac
SB
396}
397
398static inline void free_anon_vma_name(struct vm_area_struct *vma)
399{
400 /*
401 * Not using anon_vma_name because it generates a warning if mmap_lock
402 * is not held, which might be the case here.
403 */
a1193de5 404 anon_vma_name_put(vma->anon_name);
5c26f6ac
SB
405}
406
407static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
408 struct anon_vma_name *anon_name2)
409{
410 if (anon_name1 == anon_name2)
17fca131
AB
411 return true;
412
5c26f6ac
SB
413 return anon_name1 && anon_name2 &&
414 !strcmp(anon_name1->name, anon_name2->name);
17fca131 415}
5c26f6ac 416
17fca131 417#else /* CONFIG_ANON_VMA_NAME */
5c26f6ac
SB
418static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
419{
420 return NULL;
421}
422
423static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
17fca131
AB
424{
425 return NULL;
426}
5c26f6ac
SB
427
428static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
429static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
430static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
431 struct vm_area_struct *new_vma) {}
432static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
433
434static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
435 struct anon_vma_name *anon_name2)
17fca131
AB
436{
437 return true;
438}
5c26f6ac 439
17fca131
AB
440#endif /* CONFIG_ANON_VMA_NAME */
441
36090def
AB
442static inline void init_tlb_flush_pending(struct mm_struct *mm)
443{
444 atomic_set(&mm->tlb_flush_pending, 0);
445}
446
447static inline void inc_tlb_flush_pending(struct mm_struct *mm)
448{
449 atomic_inc(&mm->tlb_flush_pending);
450 /*
451 * The only time this value is relevant is when there are indeed pages
452 * to flush. And we'll only flush pages after changing them, which
453 * requires the PTL.
454 *
455 * So the ordering here is:
456 *
457 * atomic_inc(&mm->tlb_flush_pending);
458 * spin_lock(&ptl);
459 * ...
460 * set_pte_at();
461 * spin_unlock(&ptl);
462 *
463 * spin_lock(&ptl)
464 * mm_tlb_flush_pending();
465 * ....
466 * spin_unlock(&ptl);
467 *
468 * flush_tlb_range();
469 * atomic_dec(&mm->tlb_flush_pending);
470 *
471 * Where the increment if constrained by the PTL unlock, it thus
472 * ensures that the increment is visible if the PTE modification is
473 * visible. After all, if there is no PTE modification, nobody cares
474 * about TLB flushes either.
475 *
476 * This very much relies on users (mm_tlb_flush_pending() and
477 * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
478 * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
479 * locks (PPC) the unlock of one doesn't order against the lock of
480 * another PTL.
481 *
482 * The decrement is ordered by the flush_tlb_range(), such that
483 * mm_tlb_flush_pending() will not return false unless all flushes have
484 * completed.
485 */
486}
487
488static inline void dec_tlb_flush_pending(struct mm_struct *mm)
489{
490 /*
491 * See inc_tlb_flush_pending().
492 *
493 * This cannot be smp_mb__before_atomic() because smp_mb() simply does
494 * not order against TLB invalidate completion, which is what we need.
495 *
496 * Therefore we must rely on tlb_flush_*() to guarantee order.
497 */
498 atomic_dec(&mm->tlb_flush_pending);
499}
500
501static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
502{
503 /*
504 * Must be called after having acquired the PTL; orders against that
505 * PTLs release and therefore ensures that if we observe the modified
506 * PTE we must also observe the increment from inc_tlb_flush_pending().
507 *
508 * That is, it only guarantees to return true if there is a flush
509 * pending for _this_ PTL.
510 */
511 return atomic_read(&mm->tlb_flush_pending);
512}
513
514static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
515{
516 /*
517 * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
518 * for which there is a TLB flush pending in order to guarantee
519 * we've seen both that PTE modification and the increment.
520 *
521 * (no requirement on actually still holding the PTL, that is irrelevant)
522 */
523 return atomic_read(&mm->tlb_flush_pending) > 1;
524}
525
f92cedfa 526#ifdef CONFIG_MMU
af19487f
AR
527/*
528 * Computes the pte marker to copy from the given source entry into dst_vma.
529 * If no marker should be copied, returns 0.
530 * The caller should insert a new pte created with make_pte_marker().
531 */
532static inline pte_marker copy_pte_marker(
533 swp_entry_t entry, struct vm_area_struct *dst_vma)
534{
535 pte_marker srcm = pte_marker_get(entry);
536 /* Always copy error entries. */
537 pte_marker dstm = srcm & PTE_MARKER_POISONED;
538
539 /* Only copy PTE markers if UFFD register matches. */
540 if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
541 dstm |= PTE_MARKER_UFFD_WP;
542
543 return dstm;
544}
f92cedfa 545#endif
af19487f 546
999dad82
PX
547/*
548 * If this pte is wr-protected by uffd-wp in any form, arm the special pte to
549 * replace a none pte. NOTE! This should only be called when *pte is already
550 * cleared so we will never accidentally replace something valuable. Meanwhile
551 * none pte also means we are not demoting the pte so tlb flushed is not needed.
552 * E.g., when pte cleared the caller should have taken care of the tlb flush.
553 *
554 * Must be called with pgtable lock held so that no thread will see the none
555 * pte, and if they see it, they'll fault and serialize at the pgtable lock.
556 *
557 * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled.
558 */
559static inline void
560pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr,
561 pte_t *pte, pte_t pteval)
562{
563#ifdef CONFIG_PTE_MARKER_UFFD_WP
564 bool arm_uffd_pte = false;
565
566 /* The current status of the pte should be "cleared" before calling */
c33c7948 567 WARN_ON_ONCE(!pte_none(ptep_get(pte)));
999dad82 568
2bad466c
PX
569 /*
570 * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole
571 * thing, because when zapping either it means it's dropping the
572 * page, or in TTU where the present pte will be quickly replaced
573 * with a swap pte. There's no way of leaking the bit.
574 */
999dad82
PX
575 if (vma_is_anonymous(vma) || !userfaultfd_wp(vma))
576 return;
577
578 /* A uffd-wp wr-protected normal pte */
579 if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval)))
580 arm_uffd_pte = true;
581
582 /*
583 * A uffd-wp wr-protected swap pte. Note: this should even cover an
584 * existing pte marker with uffd-wp bit set.
585 */
586 if (unlikely(pte_swp_uffd_wp_any(pteval)))
587 arm_uffd_pte = true;
588
589 if (unlikely(arm_uffd_pte))
590 set_pte_at(vma->vm_mm, addr, pte,
591 make_pte_marker(PTE_MARKER_UFFD_WP));
592#endif
593}
36090def 594
8788f678
YZ
595static inline bool vma_has_recency(struct vm_area_struct *vma)
596{
597 if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ))
598 return false;
599
17e81022
YZ
600 if (vma->vm_file && (vma->vm_file->f_mode & FMODE_NOREUSE))
601 return false;
602
8788f678
YZ
603 return true;
604}
605
b2e18538 606#endif