1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Macros for manipulating and testing page->flags
9 #include <linux/types.h>
10 #include <linux/bug.h>
11 #include <linux/mmdebug.h>
12 #ifndef __GENERATING_BOUNDS_H
13 #include <linux/mm_types.h>
14 #include <generated/bounds.h>
15 #endif /* !__GENERATING_BOUNDS_H */
18 * Various page->flags bits:
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
33 * - Pages not added to the page allocator when onlining a section because
34 * they were excluded via the online_page_callback() or because they are
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 * control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 * not marked PG_reserved (as they might be in use by somebody else who does
40 * not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 * not be trusted as they will be initialized when first onlined).
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
52 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
66 * PG_uptodate tells whether the page's contents is valid. When a read
67 * completes, the page becomes uptodate, unless a disk I/O error happened.
69 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
70 * file-backed pagecache (see mm/vmscan.c).
72 * PG_error is set to indicate that an I/O error occurred on this page.
74 * PG_arch_1 is an architecture specific page state bit. The generic code
75 * guarantees that this bit is cleared for a page when it first is entered into
78 * PG_hwpoison indicates that a page got corrupted in hardware and contains
79 * data with incorrect ECC bits that triggered a machine check. Accessing is
80 * not safe since it may cause another machine check. Don't touch!
84 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
85 * locked- and dirty-page accounting.
87 * The page flags field is split into two parts, the main flags area
88 * which extends from the low bits upwards, and the fields area which
89 * extends from the high bits downwards.
91 * | FIELD | ... | FLAGS |
95 * The fields area is reserved for fields mapping zone, node (for NUMA) and
96 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
97 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
100 PG_locked, /* Page is locked. Don't touch. */
107 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
110 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
113 PG_private, /* If pagecache, has fs-private data */
114 PG_private_2, /* If pagecache, has fs aux data */
115 PG_writeback, /* Page is under writeback */
116 PG_head, /* A head page */
117 PG_mappedtodisk, /* Has blocks allocated on-disk */
118 PG_reclaim, /* To be reclaimed asap */
119 PG_swapbacked, /* Page is backed by RAM/swap */
120 PG_unevictable, /* Page is "unevictable" */
122 PG_mlocked, /* Page is vma mlocked */
124 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
125 PG_uncached, /* Page has been mapped as uncached */
127 #ifdef CONFIG_MEMORY_FAILURE
128 PG_hwpoison, /* hardware poisoned page. Don't touch */
130 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
137 PG_checked = PG_owner_priv_1,
140 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
142 /* Two page bits are conscripted by FS-Cache to maintain local caching
143 * state. These bits are set on pages belonging to the netfs's inodes
144 * when those inodes are being locally cached.
146 PG_fscache = PG_private_2, /* page backed by cache */
149 /* Pinned in Xen as a read-only pagetable page. */
150 PG_pinned = PG_owner_priv_1,
151 /* Pinned as part of domain save (see xen_mm_pin_all()). */
152 PG_savepinned = PG_dirty,
153 /* Has a grant mapping of another (foreign) domain's page. */
154 PG_foreign = PG_owner_priv_1,
155 /* Remapped by swiotlb-xen. */
156 PG_xen_remapped = PG_owner_priv_1,
159 PG_slob_free = PG_private,
161 /* Compound pages. Stored in first tail page's flags */
162 PG_double_map = PG_private_2,
164 /* non-lru isolated movable page */
165 PG_isolated = PG_reclaim,
168 #ifndef __GENERATING_BOUNDS_H
170 struct page; /* forward declaration */
172 static inline struct page *compound_head(struct page *page)
174 unsigned long head = READ_ONCE(page->compound_head);
176 if (unlikely(head & 1))
177 return (struct page *) (head - 1);
181 static __always_inline int PageTail(struct page *page)
183 return READ_ONCE(page->compound_head) & 1;
186 static __always_inline int PageCompound(struct page *page)
188 return test_bit(PG_head, &page->flags) || PageTail(page);
191 #define PAGE_POISON_PATTERN -1l
192 static inline int PagePoisoned(const struct page *page)
194 return page->flags == PAGE_POISON_PATTERN;
197 #ifdef CONFIG_DEBUG_VM
198 void page_init_poison(struct page *page, size_t size);
200 static inline void page_init_poison(struct page *page, size_t size)
206 * Page flags policies wrt compound pages
209 * check if this struct page poisoned/uninitialized
212 * the page flag is relevant for small, head and tail pages.
215 * for compound page all operations related to the page flag applied to
219 * for compound page, callers only ever operate on the head page.
222 * modifications of the page flag must be done on small or head pages,
223 * checks can be done on tail pages too.
226 * the page flag is not relevant for compound pages.
228 #define PF_POISONED_CHECK(page) ({ \
229 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
231 #define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
232 #define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
233 #define PF_ONLY_HEAD(page, enforce) ({ \
234 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
235 PF_POISONED_CHECK(page); })
236 #define PF_NO_TAIL(page, enforce) ({ \
237 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
238 PF_POISONED_CHECK(compound_head(page)); })
239 #define PF_NO_COMPOUND(page, enforce) ({ \
240 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
241 PF_POISONED_CHECK(page); })
244 * Macros to create function definitions for page flags
246 #define TESTPAGEFLAG(uname, lname, policy) \
247 static __always_inline int Page##uname(struct page *page) \
248 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
250 #define SETPAGEFLAG(uname, lname, policy) \
251 static __always_inline void SetPage##uname(struct page *page) \
252 { set_bit(PG_##lname, &policy(page, 1)->flags); }
254 #define CLEARPAGEFLAG(uname, lname, policy) \
255 static __always_inline void ClearPage##uname(struct page *page) \
256 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
258 #define __SETPAGEFLAG(uname, lname, policy) \
259 static __always_inline void __SetPage##uname(struct page *page) \
260 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
262 #define __CLEARPAGEFLAG(uname, lname, policy) \
263 static __always_inline void __ClearPage##uname(struct page *page) \
264 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
266 #define TESTSETFLAG(uname, lname, policy) \
267 static __always_inline int TestSetPage##uname(struct page *page) \
268 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
270 #define TESTCLEARFLAG(uname, lname, policy) \
271 static __always_inline int TestClearPage##uname(struct page *page) \
272 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
274 #define PAGEFLAG(uname, lname, policy) \
275 TESTPAGEFLAG(uname, lname, policy) \
276 SETPAGEFLAG(uname, lname, policy) \
277 CLEARPAGEFLAG(uname, lname, policy)
279 #define __PAGEFLAG(uname, lname, policy) \
280 TESTPAGEFLAG(uname, lname, policy) \
281 __SETPAGEFLAG(uname, lname, policy) \
282 __CLEARPAGEFLAG(uname, lname, policy)
284 #define TESTSCFLAG(uname, lname, policy) \
285 TESTSETFLAG(uname, lname, policy) \
286 TESTCLEARFLAG(uname, lname, policy)
288 #define TESTPAGEFLAG_FALSE(uname) \
289 static inline int Page##uname(const struct page *page) { return 0; }
291 #define SETPAGEFLAG_NOOP(uname) \
292 static inline void SetPage##uname(struct page *page) { }
294 #define CLEARPAGEFLAG_NOOP(uname) \
295 static inline void ClearPage##uname(struct page *page) { }
297 #define __CLEARPAGEFLAG_NOOP(uname) \
298 static inline void __ClearPage##uname(struct page *page) { }
300 #define TESTSETFLAG_FALSE(uname) \
301 static inline int TestSetPage##uname(struct page *page) { return 0; }
303 #define TESTCLEARFLAG_FALSE(uname) \
304 static inline int TestClearPage##uname(struct page *page) { return 0; }
306 #define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
307 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
309 #define TESTSCFLAG_FALSE(uname) \
310 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
312 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
313 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
314 PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
315 PAGEFLAG(Referenced, referenced, PF_HEAD)
316 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
317 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
318 PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
319 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
320 PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
321 PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
322 TESTCLEARFLAG(Active, active, PF_HEAD)
323 PAGEFLAG(Workingset, workingset, PF_HEAD)
324 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
325 __PAGEFLAG(Slab, slab, PF_NO_TAIL)
326 __PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
327 PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
330 PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
331 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
332 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
333 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
334 PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
335 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
337 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
338 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
339 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
340 PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
341 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
342 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
345 * Private page markings that may be used by the filesystem that owns the page
346 * for its own purposes.
347 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
349 PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
350 __CLEARPAGEFLAG(Private, private, PF_ANY)
351 PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
352 PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
353 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
356 * Only test-and-set exist for PG_writeback. The unconditional operators are
357 * risky: they bypass page accounting.
359 TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
360 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
361 PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
363 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
364 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
365 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
366 PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
367 TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
369 #ifdef CONFIG_HIGHMEM
371 * Must use a macro here due to header dependency issues. page_zone() is not
372 * available at this point.
374 #define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
376 PAGEFLAG_FALSE(HighMem)
380 static __always_inline int PageSwapCache(struct page *page)
382 #ifdef CONFIG_THP_SWAP
383 page = compound_head(page);
385 return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
388 SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
389 CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
391 PAGEFLAG_FALSE(SwapCache)
394 PAGEFLAG(Unevictable, unevictable, PF_HEAD)
395 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
396 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
399 PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
400 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
401 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
403 PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
404 TESTSCFLAG_FALSE(Mlocked)
407 #ifdef CONFIG_ARCH_USES_PG_UNCACHED
408 PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
410 PAGEFLAG_FALSE(Uncached)
413 #ifdef CONFIG_MEMORY_FAILURE
414 PAGEFLAG(HWPoison, hwpoison, PF_ANY)
415 TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
416 #define __PG_HWPOISON (1UL << PG_hwpoison)
417 extern bool set_hwpoison_free_buddy_page(struct page *page);
419 PAGEFLAG_FALSE(HWPoison)
420 static inline bool set_hwpoison_free_buddy_page(struct page *page)
424 #define __PG_HWPOISON 0
427 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
428 TESTPAGEFLAG(Young, young, PF_ANY)
429 SETPAGEFLAG(Young, young, PF_ANY)
430 TESTCLEARFLAG(Young, young, PF_ANY)
431 PAGEFLAG(Idle, idle, PF_ANY)
435 * On an anonymous page mapped into a user virtual memory area,
436 * page->mapping points to its anon_vma, not to a struct address_space;
437 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
439 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
440 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
441 * bit; and then page->mapping points, not to an anon_vma, but to a private
442 * structure which KSM associates with that merged page. See ksm.h.
444 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
445 * page and then page->mapping points a struct address_space.
447 * Please note that, confusingly, "page_mapping" refers to the inode
448 * address_space which maps the page from disk; whereas "page_mapped"
449 * refers to user virtual address space into which the page is mapped.
451 #define PAGE_MAPPING_ANON 0x1
452 #define PAGE_MAPPING_MOVABLE 0x2
453 #define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
454 #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
456 static __always_inline int PageMappingFlags(struct page *page)
458 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
461 static __always_inline int PageAnon(struct page *page)
463 page = compound_head(page);
464 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
467 static __always_inline int __PageMovable(struct page *page)
469 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
470 PAGE_MAPPING_MOVABLE;
475 * A KSM page is one of those write-protected "shared pages" or "merged pages"
476 * which KSM maps into multiple mms, wherever identical anonymous page content
477 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
478 * anon_vma, but to that page's node of the stable tree.
480 static __always_inline int PageKsm(struct page *page)
482 page = compound_head(page);
483 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
487 TESTPAGEFLAG_FALSE(Ksm)
490 u64 stable_page_flags(struct page *page);
492 static inline int PageUptodate(struct page *page)
495 page = compound_head(page);
496 ret = test_bit(PG_uptodate, &(page)->flags);
498 * Must ensure that the data we read out of the page is loaded
499 * _after_ we've loaded page->flags to check for PageUptodate.
500 * We can skip the barrier if the page is not uptodate, because
501 * we wouldn't be reading anything from it.
503 * See SetPageUptodate() for the other side of the story.
511 static __always_inline void __SetPageUptodate(struct page *page)
513 VM_BUG_ON_PAGE(PageTail(page), page);
515 __set_bit(PG_uptodate, &page->flags);
518 static __always_inline void SetPageUptodate(struct page *page)
520 VM_BUG_ON_PAGE(PageTail(page), page);
522 * Memory barrier must be issued before setting the PG_uptodate bit,
523 * so that all previous stores issued in order to bring the page
524 * uptodate are actually visible before PageUptodate becomes true.
527 set_bit(PG_uptodate, &page->flags);
530 CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
532 int test_clear_page_writeback(struct page *page);
533 int __test_set_page_writeback(struct page *page, bool keep_write);
535 #define test_set_page_writeback(page) \
536 __test_set_page_writeback(page, false)
537 #define test_set_page_writeback_keepwrite(page) \
538 __test_set_page_writeback(page, true)
540 static inline void set_page_writeback(struct page *page)
542 test_set_page_writeback(page);
545 static inline void set_page_writeback_keepwrite(struct page *page)
547 test_set_page_writeback_keepwrite(page);
550 __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
552 static __always_inline void set_compound_head(struct page *page, struct page *head)
554 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
557 static __always_inline void clear_compound_head(struct page *page)
559 WRITE_ONCE(page->compound_head, 0);
562 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
563 static inline void ClearPageCompound(struct page *page)
565 BUG_ON(!PageHead(page));
570 #define PG_head_mask ((1UL << PG_head))
572 #ifdef CONFIG_HUGETLB_PAGE
573 int PageHuge(struct page *page);
574 int PageHeadHuge(struct page *page);
575 bool page_huge_active(struct page *page);
577 TESTPAGEFLAG_FALSE(Huge)
578 TESTPAGEFLAG_FALSE(HeadHuge)
580 static inline bool page_huge_active(struct page *page)
587 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
589 * PageHuge() only returns true for hugetlbfs pages, but not for
590 * normal or transparent huge pages.
592 * PageTransHuge() returns true for both transparent huge and
593 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
594 * called only in the core VM paths where hugetlbfs pages can't exist.
596 static inline int PageTransHuge(struct page *page)
598 VM_BUG_ON_PAGE(PageTail(page), page);
599 return PageHead(page);
603 * PageTransCompound returns true for both transparent huge pages
604 * and hugetlbfs pages, so it should only be called when it's known
605 * that hugetlbfs pages aren't involved.
607 static inline int PageTransCompound(struct page *page)
609 return PageCompound(page);
613 * PageTransCompoundMap is the same as PageTransCompound, but it also
614 * guarantees the primary MMU has the entire compound page mapped
615 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
616 * can also map the entire compound page. This allows the secondary
617 * MMUs to call get_user_pages() only once for each compound page and
618 * to immediately map the entire compound page with a single secondary
619 * MMU fault. If there will be a pmd split later, the secondary MMUs
620 * will get an update through the MMU notifier invalidation through
623 * Unlike PageTransCompound, this is safe to be called only while
624 * split_huge_pmd() cannot run from under us, like if protected by the
625 * MMU notifier, otherwise it may result in page->_mapcount < 0 false
628 static inline int PageTransCompoundMap(struct page *page)
630 return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
634 * PageTransTail returns true for both transparent huge pages
635 * and hugetlbfs pages, so it should only be called when it's known
636 * that hugetlbfs pages aren't involved.
638 static inline int PageTransTail(struct page *page)
640 return PageTail(page);
644 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
647 * This is required for optimization of rmap operations for THP: we can postpone
648 * per small page mapcount accounting (and its overhead from atomic operations)
649 * until the first PMD split.
651 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
652 * by one. This reference will go away with last compound_mapcount.
654 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
656 static inline int PageDoubleMap(struct page *page)
658 return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
661 static inline void SetPageDoubleMap(struct page *page)
663 VM_BUG_ON_PAGE(!PageHead(page), page);
664 set_bit(PG_double_map, &page[1].flags);
667 static inline void ClearPageDoubleMap(struct page *page)
669 VM_BUG_ON_PAGE(!PageHead(page), page);
670 clear_bit(PG_double_map, &page[1].flags);
672 static inline int TestSetPageDoubleMap(struct page *page)
674 VM_BUG_ON_PAGE(!PageHead(page), page);
675 return test_and_set_bit(PG_double_map, &page[1].flags);
678 static inline int TestClearPageDoubleMap(struct page *page)
680 VM_BUG_ON_PAGE(!PageHead(page), page);
681 return test_and_clear_bit(PG_double_map, &page[1].flags);
685 TESTPAGEFLAG_FALSE(TransHuge)
686 TESTPAGEFLAG_FALSE(TransCompound)
687 TESTPAGEFLAG_FALSE(TransCompoundMap)
688 TESTPAGEFLAG_FALSE(TransTail)
689 PAGEFLAG_FALSE(DoubleMap)
690 TESTSETFLAG_FALSE(DoubleMap)
691 TESTCLEARFLAG_FALSE(DoubleMap)
695 * For pages that are never mapped to userspace (and aren't PageSlab),
696 * page_type may be used. Because it is initialised to -1, we invert the
697 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
698 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
699 * low bits so that an underflow or overflow of page_mapcount() won't be
700 * mistaken for a page type value.
703 #define PAGE_TYPE_BASE 0xf0000000
704 /* Reserve 0x0000007f to catch underflows of page_mapcount */
705 #define PAGE_MAPCOUNT_RESERVE -128
706 #define PG_buddy 0x00000080
707 #define PG_offline 0x00000100
708 #define PG_kmemcg 0x00000200
709 #define PG_table 0x00000400
710 #define PG_guard 0x00000800
712 #define PageType(page, flag) \
713 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
715 static inline int page_has_type(struct page *page)
717 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
720 #define PAGE_TYPE_OPS(uname, lname) \
721 static __always_inline int Page##uname(struct page *page) \
723 return PageType(page, PG_##lname); \
725 static __always_inline void __SetPage##uname(struct page *page) \
727 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
728 page->page_type &= ~PG_##lname; \
730 static __always_inline void __ClearPage##uname(struct page *page) \
732 VM_BUG_ON_PAGE(!Page##uname(page), page); \
733 page->page_type |= PG_##lname; \
737 * PageBuddy() indicates that the page is free and in the buddy system
738 * (see mm/page_alloc.c).
740 PAGE_TYPE_OPS(Buddy, buddy)
743 * PageOffline() indicates that the page is logically offline although the
744 * containing section is online. (e.g. inflated in a balloon driver or
745 * not onlined when onlining the section).
746 * The content of these pages is effectively stale. Such pages should not
747 * be touched (read/write/dump/save) except by their owner.
749 PAGE_TYPE_OPS(Offline, offline)
752 * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
753 * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
755 PAGE_TYPE_OPS(Kmemcg, kmemcg)
758 * Marks pages in use as page tables.
760 PAGE_TYPE_OPS(Table, table)
763 * Marks guardpages used with debug_pagealloc.
765 PAGE_TYPE_OPS(Guard, guard)
767 extern bool is_free_buddy_page(struct page *page);
769 __PAGEFLAG(Isolated, isolated, PF_ANY);
772 * If network-based swap is enabled, sl*b must keep track of whether pages
773 * were allocated from pfmemalloc reserves.
775 static inline int PageSlabPfmemalloc(struct page *page)
777 VM_BUG_ON_PAGE(!PageSlab(page), page);
778 return PageActive(page);
781 static inline void SetPageSlabPfmemalloc(struct page *page)
783 VM_BUG_ON_PAGE(!PageSlab(page), page);
787 static inline void __ClearPageSlabPfmemalloc(struct page *page)
789 VM_BUG_ON_PAGE(!PageSlab(page), page);
790 __ClearPageActive(page);
793 static inline void ClearPageSlabPfmemalloc(struct page *page)
795 VM_BUG_ON_PAGE(!PageSlab(page), page);
796 ClearPageActive(page);
800 #define __PG_MLOCKED (1UL << PG_mlocked)
802 #define __PG_MLOCKED 0
806 * Flags checked when a page is freed. Pages being freed should not have
807 * these flags set. It they are, there is a problem.
809 #define PAGE_FLAGS_CHECK_AT_FREE \
810 (1UL << PG_lru | 1UL << PG_locked | \
811 1UL << PG_private | 1UL << PG_private_2 | \
812 1UL << PG_writeback | 1UL << PG_reserved | \
813 1UL << PG_slab | 1UL << PG_active | \
814 1UL << PG_unevictable | __PG_MLOCKED)
817 * Flags checked when a page is prepped for return by the page allocator.
818 * Pages being prepped should not have these flags set. It they are set,
819 * there has been a kernel bug or struct page corruption.
821 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
822 * alloc-free cycle to prevent from reusing the page.
824 #define PAGE_FLAGS_CHECK_AT_PREP \
825 (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
827 #define PAGE_FLAGS_PRIVATE \
828 (1UL << PG_private | 1UL << PG_private_2)
830 * page_has_private - Determine if page has private stuff
831 * @page: The page to be checked
833 * Determine if a page has private stuff, indicating that release routines
834 * should be invoked upon it.
836 static inline int page_has_private(struct page *page)
838 return !!(page->flags & PAGE_FLAGS_PRIVATE);
845 #undef PF_NO_COMPOUND
846 #endif /* !__GENERATING_BOUNDS_H */
848 #endif /* PAGE_FLAGS_H */