2 * Macros for manipulating and testing page->flags
8 #include <linux/types.h>
10 #include <linux/mmdebug.h>
11 #ifndef __GENERATING_BOUNDS_H
12 #include <linux/mm_types.h>
13 #include <generated/bounds.h>
14 #endif /* !__GENERATING_BOUNDS_H */
17 * Various page->flags bits:
19 * PG_reserved is set for special pages, which can never be swapped out. Some
20 * of them might not even exist (eg empty_bad_page)...
22 * The PG_private bitflag is set on pagecache pages if they contain filesystem
23 * specific data (which is normally at page->private). It can be used by
24 * private allocations for its own usage.
26 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
27 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
28 * is set before writeback starts and cleared when it finishes.
30 * PG_locked also pins a page in pagecache, and blocks truncation of the file
33 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
36 * PG_uptodate tells whether the page's contents is valid. When a read
37 * completes, the page becomes uptodate, unless a disk I/O error happened.
39 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
40 * file-backed pagecache (see mm/vmscan.c).
42 * PG_error is set to indicate that an I/O error occurred on this page.
44 * PG_arch_1 is an architecture specific page state bit. The generic code
45 * guarantees that this bit is cleared for a page when it first is entered into
48 * PG_highmem pages are not permanently mapped into the kernel virtual address
49 * space, they need to be kmapped separately for doing IO on the pages. The
50 * struct page (these bits with information) are always mapped into kernel
53 * PG_hwpoison indicates that a page got corrupted in hardware and contains
54 * data with incorrect ECC bits that triggered a machine check. Accessing is
55 * not safe since it may cause another machine check. Don't touch!
59 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
60 * locked- and dirty-page accounting.
62 * The page flags field is split into two parts, the main flags area
63 * which extends from the low bits upwards, and the fields area which
64 * extends from the high bits downwards.
66 * | FIELD | ... | FLAGS |
70 * The fields area is reserved for fields mapping zone, node (for NUMA) and
71 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
72 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
75 PG_locked, /* Page is locked. Don't touch. */
83 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
86 PG_private, /* If pagecache, has fs-private data */
87 PG_private_2, /* If pagecache, has fs aux data */
88 PG_writeback, /* Page is under writeback */
89 PG_head, /* A head page */
90 PG_swapcache, /* Swap page: swp_entry_t in private */
91 PG_mappedtodisk, /* Has blocks allocated on-disk */
92 PG_reclaim, /* To be reclaimed asap */
93 PG_swapbacked, /* Page is backed by RAM/swap */
94 PG_unevictable, /* Page is "unevictable" */
96 PG_mlocked, /* Page is vma mlocked */
98 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
99 PG_uncached, /* Page has been mapped as uncached */
101 #ifdef CONFIG_MEMORY_FAILURE
102 PG_hwpoison, /* hardware poisoned page. Don't touch */
104 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
107 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
114 PG_checked = PG_owner_priv_1,
116 /* Two page bits are conscripted by FS-Cache to maintain local caching
117 * state. These bits are set on pages belonging to the netfs's inodes
118 * when those inodes are being locally cached.
120 PG_fscache = PG_private_2, /* page backed by cache */
123 /* Pinned in Xen as a read-only pagetable page. */
124 PG_pinned = PG_owner_priv_1,
125 /* Pinned as part of domain save (see xen_mm_pin_all()). */
126 PG_savepinned = PG_dirty,
127 /* Has a grant mapping of another (foreign) domain's page. */
128 PG_foreign = PG_owner_priv_1,
131 PG_slob_free = PG_private,
134 #ifndef __GENERATING_BOUNDS_H
136 struct page; /* forward declaration */
138 static inline struct page *compound_head(struct page *page)
140 unsigned long head = READ_ONCE(page->compound_head);
142 if (unlikely(head & 1))
143 return (struct page *) (head - 1);
147 static inline int PageTail(struct page *page)
149 return READ_ONCE(page->compound_head) & 1;
152 static inline int PageCompound(struct page *page)
154 return test_bit(PG_head, &page->flags) || PageTail(page);
158 * Page flags policies wrt compound pages
161 * the page flag is relevant for small, head and tail pages.
164 * for compound page all operations related to the page flag applied to
168 * modifications of the page flag must be done on small or head pages,
169 * checks can be done on tail pages too.
172 * the page flag is not relevant for compound pages.
174 #define PF_ANY(page, enforce) page
175 #define PF_HEAD(page, enforce) compound_head(page)
176 #define PF_NO_TAIL(page, enforce) ({ \
177 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
178 compound_head(page);})
179 #define PF_NO_COMPOUND(page, enforce) ({ \
180 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
184 * Macros to create function definitions for page flags
186 #define TESTPAGEFLAG(uname, lname, policy) \
187 static inline int Page##uname(struct page *page) \
188 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
190 #define SETPAGEFLAG(uname, lname, policy) \
191 static inline void SetPage##uname(struct page *page) \
192 { set_bit(PG_##lname, &policy(page, 1)->flags); }
194 #define CLEARPAGEFLAG(uname, lname, policy) \
195 static inline void ClearPage##uname(struct page *page) \
196 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
198 #define __SETPAGEFLAG(uname, lname, policy) \
199 static inline void __SetPage##uname(struct page *page) \
200 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
202 #define __CLEARPAGEFLAG(uname, lname, policy) \
203 static inline void __ClearPage##uname(struct page *page) \
204 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
206 #define TESTSETFLAG(uname, lname, policy) \
207 static inline int TestSetPage##uname(struct page *page) \
208 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
210 #define TESTCLEARFLAG(uname, lname, policy) \
211 static inline int TestClearPage##uname(struct page *page) \
212 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
214 #define __TESTCLEARFLAG(uname, lname, policy) \
215 static inline int __TestClearPage##uname(struct page *page) \
216 { return __test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
218 #define PAGEFLAG(uname, lname, policy) \
219 TESTPAGEFLAG(uname, lname, policy) \
220 SETPAGEFLAG(uname, lname, policy) \
221 CLEARPAGEFLAG(uname, lname, policy)
223 #define __PAGEFLAG(uname, lname, policy) \
224 TESTPAGEFLAG(uname, lname, policy) \
225 __SETPAGEFLAG(uname, lname, policy) \
226 __CLEARPAGEFLAG(uname, lname, policy)
228 #define TESTSCFLAG(uname, lname, policy) \
229 TESTSETFLAG(uname, lname, policy) \
230 TESTCLEARFLAG(uname, lname, policy)
232 #define TESTPAGEFLAG_FALSE(uname) \
233 static inline int Page##uname(const struct page *page) { return 0; }
235 #define SETPAGEFLAG_NOOP(uname) \
236 static inline void SetPage##uname(struct page *page) { }
238 #define CLEARPAGEFLAG_NOOP(uname) \
239 static inline void ClearPage##uname(struct page *page) { }
241 #define __CLEARPAGEFLAG_NOOP(uname) \
242 static inline void __ClearPage##uname(struct page *page) { }
244 #define TESTSETFLAG_FALSE(uname) \
245 static inline int TestSetPage##uname(struct page *page) { return 0; }
247 #define TESTCLEARFLAG_FALSE(uname) \
248 static inline int TestClearPage##uname(struct page *page) { return 0; }
250 #define __TESTCLEARFLAG_FALSE(uname) \
251 static inline int __TestClearPage##uname(struct page *page) { return 0; }
253 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
254 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
256 #define TESTSCFLAG_FALSE(uname) \
257 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
259 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
260 PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
261 PAGEFLAG(Referenced, referenced, PF_HEAD)
262 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
263 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
264 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
265 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
266 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
267 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
268 TESTCLEARFLAG(Active, active, PF_HEAD)
269 __PAGEFLAG(Slab, slab, PF_ANY)
270 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
271 PAGEFLAG(Pinned, pinned, PF_ANY) TESTSCFLAG(Pinned, pinned, PF_ANY) /* Xen */
272 PAGEFLAG(SavePinned, savepinned, PF_ANY); /* Xen */
273 PAGEFLAG(Foreign, foreign, PF_ANY); /* Xen */
274 PAGEFLAG(Reserved, reserved, PF_ANY) __CLEARPAGEFLAG(Reserved, reserved, PF_ANY)
275 PAGEFLAG(SwapBacked, swapbacked, PF_ANY)
276 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_ANY)
277 __SETPAGEFLAG(SwapBacked, swapbacked, PF_ANY)
279 __PAGEFLAG(SlobFree, slob_free, PF_ANY)
282 * Private page markings that may be used by the filesystem that owns the page
283 * for its own purposes.
284 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
286 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
287 __CLEARPAGEFLAG(Private, private, PF_ANY)
288 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
289 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
290 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
293 * Only test-and-set exist for PG_writeback. The unconditional operators are
294 * risky: they bypass page accounting.
296 TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
297 TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
298 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_COMPOUND)
300 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
301 PAGEFLAG(Reclaim, reclaim, PF_NO_COMPOUND)
302 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_COMPOUND)
303 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
304 TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
306 #ifdef CONFIG_HIGHMEM
308 * Must use a macro here due to header dependency issues. page_zone() is not
309 * available at this point.
311 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
313 PAGEFLAG_FALSE(HighMem)
317 PAGEFLAG(SwapCache, swapcache, PF_ANY)
319 PAGEFLAG_FALSE(SwapCache)
322 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
323 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
324 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
327 PAGEFLAG(Mlocked, mlocked, PF_ANY) __CLEARPAGEFLAG(Mlocked, mlocked, PF_ANY)
328 TESTSCFLAG(Mlocked, mlocked, PF_ANY) __TESTCLEARFLAG(Mlocked, mlocked, PF_ANY)
330 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
331 TESTSCFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
334 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
335 PAGEFLAG(Uncached, uncached, PF_ANY)
337 PAGEFLAG_FALSE(Uncached)
340 #ifdef CONFIG_MEMORY_FAILURE
341 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
342 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
343 #define __PG_HWPOISON (1UL << PG_hwpoison)
345 PAGEFLAG_FALSE(HWPoison)
346 #define __PG_HWPOISON 0
349 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
350 TESTPAGEFLAG(Young, young, PF_ANY)
351 SETPAGEFLAG(Young, young, PF_ANY)
352 TESTCLEARFLAG(Young, young, PF_ANY)
353 PAGEFLAG(Idle, idle, PF_ANY)
357 * On an anonymous page mapped into a user virtual memory area,
358 * page->mapping points to its anon_vma, not to a struct address_space;
359 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
361 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
362 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
363 * and then page->mapping points, not to an anon_vma, but to a private
364 * structure which KSM associates with that merged page. See ksm.h.
366 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
368 * Please note that, confusingly, "page_mapping" refers to the inode
369 * address_space which maps the page from disk; whereas "page_mapped"
370 * refers to user virtual address space into which the page is mapped.
372 #define PAGE_MAPPING_ANON 1
373 #define PAGE_MAPPING_KSM 2
374 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
376 static inline int PageAnon(struct page *page)
378 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
383 * A KSM page is one of those write-protected "shared pages" or "merged pages"
384 * which KSM maps into multiple mms, wherever identical anonymous page content
385 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
386 * anon_vma, but to that page's node of the stable tree.
388 static inline int PageKsm(struct page *page)
390 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
391 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
394 TESTPAGEFLAG_FALSE(Ksm)
397 u64 stable_page_flags(struct page *page);
399 static inline int PageUptodate(struct page *page)
401 int ret = test_bit(PG_uptodate, &(page)->flags);
404 * Must ensure that the data we read out of the page is loaded
405 * _after_ we've loaded page->flags to check for PageUptodate.
406 * We can skip the barrier if the page is not uptodate, because
407 * we wouldn't be reading anything from it.
409 * See SetPageUptodate() for the other side of the story.
417 static inline void __SetPageUptodate(struct page *page)
420 __set_bit(PG_uptodate, &page->flags);
423 static inline void SetPageUptodate(struct page *page)
426 * Memory barrier must be issued before setting the PG_uptodate bit,
427 * so that all previous stores issued in order to bring the page
428 * uptodate are actually visible before PageUptodate becomes true.
431 set_bit(PG_uptodate, &page->flags);
434 CLEARPAGEFLAG(Uptodate, uptodate, PF_ANY)
436 int test_clear_page_writeback(struct page *page);
437 int __test_set_page_writeback(struct page *page, bool keep_write);
439 #define test_set_page_writeback(page) \
440 __test_set_page_writeback(page, false)
441 #define test_set_page_writeback_keepwrite(page) \
442 __test_set_page_writeback(page, true)
444 static inline void set_page_writeback(struct page *page)
446 test_set_page_writeback(page);
449 static inline void set_page_writeback_keepwrite(struct page *page)
451 test_set_page_writeback_keepwrite(page);
454 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
456 static inline void set_compound_head(struct page *page, struct page *head)
458 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
461 static inline void clear_compound_head(struct page *page)
463 WRITE_ONCE(page->compound_head, 0);
466 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
467 static inline void ClearPageCompound(struct page *page)
469 BUG_ON(!PageHead(page));
474 #define PG_head_mask ((1L << PG_head))
476 #ifdef CONFIG_HUGETLB_PAGE
477 int PageHuge(struct page *page);
478 int PageHeadHuge(struct page *page);
479 bool page_huge_active(struct page *page);
481 TESTPAGEFLAG_FALSE(Huge)
482 TESTPAGEFLAG_FALSE(HeadHuge)
484 static inline bool page_huge_active(struct page *page)
491 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
493 * PageHuge() only returns true for hugetlbfs pages, but not for
494 * normal or transparent huge pages.
496 * PageTransHuge() returns true for both transparent huge and
497 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
498 * called only in the core VM paths where hugetlbfs pages can't exist.
500 static inline int PageTransHuge(struct page *page)
502 VM_BUG_ON_PAGE(PageTail(page), page);
503 return PageHead(page);
507 * PageTransCompound returns true for both transparent huge pages
508 * and hugetlbfs pages, so it should only be called when it's known
509 * that hugetlbfs pages aren't involved.
511 static inline int PageTransCompound(struct page *page)
513 return PageCompound(page);
517 * PageTransTail returns true for both transparent huge pages
518 * and hugetlbfs pages, so it should only be called when it's known
519 * that hugetlbfs pages aren't involved.
521 static inline int PageTransTail(struct page *page)
523 return PageTail(page);
527 TESTPAGEFLAG_FALSE(TransHuge)
528 TESTPAGEFLAG_FALSE(TransCompound)
529 TESTPAGEFLAG_FALSE(TransTail)
533 * PageBuddy() indicate that the page is free and in the buddy system
534 * (see mm/page_alloc.c).
536 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
537 * -2 so that an underflow of the page_mapcount() won't be mistaken
538 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
539 * efficiently by most CPU architectures.
541 #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
543 static inline int PageBuddy(struct page *page)
545 return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
548 static inline void __SetPageBuddy(struct page *page)
550 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
551 atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
554 static inline void __ClearPageBuddy(struct page *page)
556 VM_BUG_ON_PAGE(!PageBuddy(page), page);
557 atomic_set(&page->_mapcount, -1);
560 #define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
562 static inline int PageBalloon(struct page *page)
564 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
567 static inline void __SetPageBalloon(struct page *page)
569 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
570 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
573 static inline void __ClearPageBalloon(struct page *page)
575 VM_BUG_ON_PAGE(!PageBalloon(page), page);
576 atomic_set(&page->_mapcount, -1);
580 * If network-based swap is enabled, sl*b must keep track of whether pages
581 * were allocated from pfmemalloc reserves.
583 static inline int PageSlabPfmemalloc(struct page *page)
585 VM_BUG_ON_PAGE(!PageSlab(page), page);
586 return PageActive(page);
589 static inline void SetPageSlabPfmemalloc(struct page *page)
591 VM_BUG_ON_PAGE(!PageSlab(page), page);
595 static inline void __ClearPageSlabPfmemalloc(struct page *page)
597 VM_BUG_ON_PAGE(!PageSlab(page), page);
598 __ClearPageActive(page);
601 static inline void ClearPageSlabPfmemalloc(struct page *page)
603 VM_BUG_ON_PAGE(!PageSlab(page), page);
604 ClearPageActive(page);
608 #define __PG_MLOCKED (1 << PG_mlocked)
610 #define __PG_MLOCKED 0
613 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
614 #define __PG_COMPOUND_LOCK (1 << PG_compound_lock)
616 #define __PG_COMPOUND_LOCK 0
620 * Flags checked when a page is freed. Pages being freed should not have
621 * these flags set. It they are, there is a problem.
623 #define PAGE_FLAGS_CHECK_AT_FREE \
624 (1 << PG_lru | 1 << PG_locked | \
625 1 << PG_private | 1 << PG_private_2 | \
626 1 << PG_writeback | 1 << PG_reserved | \
627 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
628 1 << PG_unevictable | __PG_MLOCKED | \
632 * Flags checked when a page is prepped for return by the page allocator.
633 * Pages being prepped should not have these flags set. It they are set,
634 * there has been a kernel bug or struct page corruption.
636 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
637 * alloc-free cycle to prevent from reusing the page.
639 #define PAGE_FLAGS_CHECK_AT_PREP \
640 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
642 #define PAGE_FLAGS_PRIVATE \
643 (1 << PG_private | 1 << PG_private_2)
645 * page_has_private - Determine if page has private stuff
646 * @page: The page to be checked
648 * Determine if a page has private stuff, indicating that release routines
649 * should be invoked upon it.
651 static inline int page_has_private(struct page *page)
653 return !!(page->flags & PAGE_FLAGS_PRIVATE);
659 #undef PF_NO_COMPOUND
660 #endif /* !__GENERATING_BOUNDS_H */
662 #endif /* PAGE_FLAGS_H */