mm/damon: adaptively adjust regions
[linux-block.git] / include / linux / page-flags.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2/*
3 * Macros for manipulating and testing page->flags
4 */
5
6#ifndef PAGE_FLAGS_H
7#define PAGE_FLAGS_H
8
f886ed44 9#include <linux/types.h>
187f1882 10#include <linux/bug.h>
072bb0aa 11#include <linux/mmdebug.h>
9223b419 12#ifndef __GENERATING_BOUNDS_H
6d777953 13#include <linux/mm_types.h>
01fc0ac1 14#include <generated/bounds.h>
9223b419 15#endif /* !__GENERATING_BOUNDS_H */
f886ed44 16
1da177e4
LT
17/*
18 * Various page->flags bits:
19 *
6e2e07cd
DH
20 * PG_reserved is set for special pages. The "struct page" of such a page
21 * should in general not be touched (e.g. set dirty) except by its owner.
22 * Pages marked as PG_reserved include:
23 * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS,
24 * initrd, HW tables)
25 * - Pages reserved or allocated early during boot (before the page allocator
26 * was initialized). This includes (depending on the architecture) the
27 * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much
28 * much more. Once (if ever) freed, PG_reserved is cleared and they will
29 * be given to the page allocator.
30 * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying
31 * to read/write these pages might end badly. Don't touch!
32 * - The zero page(s)
33 * - Pages not added to the page allocator when onlining a section because
34 * they were excluded via the online_page_callback() or because they are
35 * PG_hwpoison.
36 * - Pages allocated in the context of kexec/kdump (loaded kernel image,
37 * control pages, vmcoreinfo)
38 * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are
39 * not marked PG_reserved (as they might be in use by somebody else who does
40 * not respect the caching strategy).
41 * - Pages part of an offline section (struct pages of offline sections should
42 * not be trusted as they will be initialized when first onlined).
43 * - MCA pages on ia64
44 * - Pages holding CPU notes for POWER Firmware Assisted Dump
45 * - Device memory (e.g. PMEM, DAX, HMM)
46 * Some PG_reserved pages will be excluded from the hibernation image.
47 * PG_reserved does in general not hinder anybody from dumping or swapping
48 * and is no longer required for remap_pfn_range(). ioremap might require it.
49 * Consequently, PG_reserved for a page mapped into user space can indicate
50 * the zero page, the vDSO, MMIO pages or device memory.
1da177e4 51 *
da6052f7
NP
52 * The PG_private bitflag is set on pagecache pages if they contain filesystem
53 * specific data (which is normally at page->private). It can be used by
54 * private allocations for its own usage.
1da177e4 55 *
da6052f7
NP
56 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
57 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
58 * is set before writeback starts and cleared when it finishes.
59 *
60 * PG_locked also pins a page in pagecache, and blocks truncation of the file
61 * while it is held.
62 *
63 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
64 * to become unlocked.
1da177e4 65 *
9de4f22a
HY
66 * PG_swapbacked is set when a page uses swap as a backing storage. This are
67 * usually PageAnon or shmem pages but please note that even anonymous pages
68 * might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
69 * a result of MADV_FREE).
70 *
1da177e4
LT
71 * PG_uptodate tells whether the page's contents is valid. When a read
72 * completes, the page becomes uptodate, unless a disk I/O error happened.
73 *
da6052f7
NP
74 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
75 * file-backed pagecache (see mm/vmscan.c).
1da177e4
LT
76 *
77 * PG_error is set to indicate that an I/O error occurred on this page.
78 *
79 * PG_arch_1 is an architecture specific page state bit. The generic code
80 * guarantees that this bit is cleared for a page when it first is entered into
81 * the page cache.
82 *
d466f2fc
AK
83 * PG_hwpoison indicates that a page got corrupted in hardware and contains
84 * data with incorrect ECC bits that triggered a machine check. Accessing is
85 * not safe since it may cause another machine check. Don't touch!
1da177e4
LT
86 */
87
88/*
3b12da6d 89 * Don't use the pageflags directly. Use the PageFoo macros.
91fc8ab3
AW
90 *
91 * The page flags field is split into two parts, the main flags area
92 * which extends from the low bits upwards, and the fields area which
93 * extends from the high bits downwards.
94 *
95 * | FIELD | ... | FLAGS |
9223b419
CL
96 * N-1 ^ 0
97 * (NR_PAGEFLAGS)
91fc8ab3 98 *
9223b419
CL
99 * The fields area is reserved for fields mapping zone, node (for NUMA) and
100 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
101 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
1da177e4 102 */
e2683181
CL
103enum pageflags {
104 PG_locked, /* Page is locked. Don't touch. */
e2683181
CL
105 PG_referenced,
106 PG_uptodate,
107 PG_dirty,
108 PG_lru,
109 PG_active,
1899ad18 110 PG_workingset,
b91e1302 111 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
1899ad18 112 PG_error,
e2683181
CL
113 PG_slab,
114 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
e2683181
CL
115 PG_arch_1,
116 PG_reserved,
117 PG_private, /* If pagecache, has fs-private data */
266cf658 118 PG_private_2, /* If pagecache, has fs aux data */
e2683181 119 PG_writeback, /* Page is under writeback */
e20b8cca 120 PG_head, /* A head page */
e2683181
CL
121 PG_mappedtodisk, /* Has blocks allocated on-disk */
122 PG_reclaim, /* To be reclaimed asap */
b2e18538 123 PG_swapbacked, /* Page is backed by RAM/swap */
894bc310 124 PG_unevictable, /* Page is "unevictable" */
af8e3354 125#ifdef CONFIG_MMU
b291f000 126 PG_mlocked, /* Page is vma mlocked */
894bc310 127#endif
46cf98cd 128#ifdef CONFIG_ARCH_USES_PG_UNCACHED
602c4d11 129 PG_uncached, /* Page has been mapped as uncached */
d466f2fc
AK
130#endif
131#ifdef CONFIG_MEMORY_FAILURE
132 PG_hwpoison, /* hardware poisoned page. Don't touch */
e9da73d6 133#endif
33c3fc71
VD
134#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
135 PG_young,
136 PG_idle,
4beba948
SP
137#endif
138#ifdef CONFIG_64BIT
139 PG_arch_2,
c275c5c6
PC
140#endif
141#ifdef CONFIG_KASAN_HW_TAGS
142 PG_skip_kasan_poison,
f886ed44 143#endif
0cad47cf
AW
144 __NR_PAGEFLAGS,
145
146 /* Filesystems */
147 PG_checked = PG_owner_priv_1,
148
6326fec1
NP
149 /* SwapBacked */
150 PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */
151
266cf658
DH
152 /* Two page bits are conscripted by FS-Cache to maintain local caching
153 * state. These bits are set on pages belonging to the netfs's inodes
154 * when those inodes are being locally cached.
155 */
156 PG_fscache = PG_private_2, /* page backed by cache */
157
0cad47cf 158 /* XEN */
d8ac3dd4 159 /* Pinned in Xen as a read-only pagetable page. */
0cad47cf 160 PG_pinned = PG_owner_priv_1,
d8ac3dd4 161 /* Pinned as part of domain save (see xen_mm_pin_all()). */
0cad47cf 162 PG_savepinned = PG_dirty,
d8ac3dd4
JH
163 /* Has a grant mapping of another (foreign) domain's page. */
164 PG_foreign = PG_owner_priv_1,
b877ac98
JG
165 /* Remapped by swiotlb-xen. */
166 PG_xen_remapped = PG_owner_priv_1,
8a38082d 167
9023cb7e 168 /* SLOB */
9023cb7e 169 PG_slob_free = PG_private,
53f9263b
KS
170
171 /* Compound pages. Stored in first tail page's flags */
e18c45ff 172 PG_double_map = PG_workingset,
bda807d4
MK
173
174 /* non-lru isolated movable page */
175 PG_isolated = PG_reclaim,
36e66c55
AD
176
177 /* Only valid for buddy pages. Used to track pages that are reported */
178 PG_reported = PG_uptodate,
e2683181 179};
1da177e4 180
41c961b9
MS
181#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
182
9223b419
CL
183#ifndef __GENERATING_BOUNDS_H
184
0f2317e3 185static inline unsigned long _compound_head(const struct page *page)
0e6d31a7
KS
186{
187 unsigned long head = READ_ONCE(page->compound_head);
188
189 if (unlikely(head & 1))
0f2317e3
MWO
190 return head - 1;
191 return (unsigned long)page;
0e6d31a7
KS
192}
193
0f2317e3
MWO
194#define compound_head(page) ((typeof(page))_compound_head(page))
195
4b0f3261 196static __always_inline int PageTail(struct page *page)
0e6d31a7
KS
197{
198 return READ_ONCE(page->compound_head) & 1;
199}
200
4b0f3261 201static __always_inline int PageCompound(struct page *page)
0e6d31a7
KS
202{
203 return test_bit(PG_head, &page->flags) || PageTail(page);
204}
205
f165b378
PT
206#define PAGE_POISON_PATTERN -1l
207static inline int PagePoisoned(const struct page *page)
208{
209 return page->flags == PAGE_POISON_PATTERN;
210}
211
f682a97a
AD
212#ifdef CONFIG_DEBUG_VM
213void page_init_poison(struct page *page, size_t size);
214#else
215static inline void page_init_poison(struct page *page, size_t size)
216{
217}
218#endif
219
95ad9755
KS
220/*
221 * Page flags policies wrt compound pages
222 *
f165b378
PT
223 * PF_POISONED_CHECK
224 * check if this struct page poisoned/uninitialized
225 *
95ad9755
KS
226 * PF_ANY:
227 * the page flag is relevant for small, head and tail pages.
228 *
229 * PF_HEAD:
230 * for compound page all operations related to the page flag applied to
231 * head page.
232 *
62906027
NP
233 * PF_ONLY_HEAD:
234 * for compound page, callers only ever operate on the head page.
235 *
95ad9755
KS
236 * PF_NO_TAIL:
237 * modifications of the page flag must be done on small or head pages,
238 * checks can be done on tail pages too.
239 *
240 * PF_NO_COMPOUND:
241 * the page flag is not relevant for compound pages.
a08d93e5
MWO
242 *
243 * PF_SECOND:
244 * the page flag is stored in the first tail page.
95ad9755 245 */
f165b378
PT
246#define PF_POISONED_CHECK(page) ({ \
247 VM_BUG_ON_PGFLAGS(PagePoisoned(page), page); \
248 page; })
249#define PF_ANY(page, enforce) PF_POISONED_CHECK(page)
250#define PF_HEAD(page, enforce) PF_POISONED_CHECK(compound_head(page))
62906027
NP
251#define PF_ONLY_HEAD(page, enforce) ({ \
252 VM_BUG_ON_PGFLAGS(PageTail(page), page); \
f165b378 253 PF_POISONED_CHECK(page); })
95ad9755
KS
254#define PF_NO_TAIL(page, enforce) ({ \
255 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
f165b378 256 PF_POISONED_CHECK(compound_head(page)); })
822cdd11 257#define PF_NO_COMPOUND(page, enforce) ({ \
95ad9755 258 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
f165b378 259 PF_POISONED_CHECK(page); })
a08d93e5
MWO
260#define PF_SECOND(page, enforce) ({ \
261 VM_BUG_ON_PGFLAGS(!PageHead(page), page); \
262 PF_POISONED_CHECK(&page[1]); })
95ad9755 263
f94a62e9
CL
264/*
265 * Macros to create function definitions for page flags
266 */
95ad9755 267#define TESTPAGEFLAG(uname, lname, policy) \
4b0f3261 268static __always_inline int Page##uname(struct page *page) \
95ad9755 269 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
f94a62e9 270
95ad9755 271#define SETPAGEFLAG(uname, lname, policy) \
4b0f3261 272static __always_inline void SetPage##uname(struct page *page) \
95ad9755 273 { set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 274
95ad9755 275#define CLEARPAGEFLAG(uname, lname, policy) \
4b0f3261 276static __always_inline void ClearPage##uname(struct page *page) \
95ad9755 277 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 278
95ad9755 279#define __SETPAGEFLAG(uname, lname, policy) \
4b0f3261 280static __always_inline void __SetPage##uname(struct page *page) \
95ad9755 281 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 282
95ad9755 283#define __CLEARPAGEFLAG(uname, lname, policy) \
4b0f3261 284static __always_inline void __ClearPage##uname(struct page *page) \
95ad9755 285 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 286
95ad9755 287#define TESTSETFLAG(uname, lname, policy) \
4b0f3261 288static __always_inline int TestSetPage##uname(struct page *page) \
95ad9755 289 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 290
95ad9755 291#define TESTCLEARFLAG(uname, lname, policy) \
4b0f3261 292static __always_inline int TestClearPage##uname(struct page *page) \
95ad9755 293 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 294
95ad9755
KS
295#define PAGEFLAG(uname, lname, policy) \
296 TESTPAGEFLAG(uname, lname, policy) \
297 SETPAGEFLAG(uname, lname, policy) \
298 CLEARPAGEFLAG(uname, lname, policy)
f94a62e9 299
95ad9755
KS
300#define __PAGEFLAG(uname, lname, policy) \
301 TESTPAGEFLAG(uname, lname, policy) \
302 __SETPAGEFLAG(uname, lname, policy) \
303 __CLEARPAGEFLAG(uname, lname, policy)
f94a62e9 304
95ad9755
KS
305#define TESTSCFLAG(uname, lname, policy) \
306 TESTSETFLAG(uname, lname, policy) \
307 TESTCLEARFLAG(uname, lname, policy)
f94a62e9 308
2f3e442c
JW
309#define TESTPAGEFLAG_FALSE(uname) \
310static inline int Page##uname(const struct page *page) { return 0; }
311
8a7a8544
LS
312#define SETPAGEFLAG_NOOP(uname) \
313static inline void SetPage##uname(struct page *page) { }
314
315#define CLEARPAGEFLAG_NOOP(uname) \
316static inline void ClearPage##uname(struct page *page) { }
317
318#define __CLEARPAGEFLAG_NOOP(uname) \
319static inline void __ClearPage##uname(struct page *page) { }
320
2f3e442c
JW
321#define TESTSETFLAG_FALSE(uname) \
322static inline int TestSetPage##uname(struct page *page) { return 0; }
323
8a7a8544
LS
324#define TESTCLEARFLAG_FALSE(uname) \
325static inline int TestClearPage##uname(struct page *page) { return 0; }
326
2f3e442c
JW
327#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
328 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
329
330#define TESTSCFLAG_FALSE(uname) \
331 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
332
48c935ad 333__PAGEFLAG(Locked, locked, PF_NO_TAIL)
62906027 334PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
d72520ad 335PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
8cb38fab
KS
336PAGEFLAG(Referenced, referenced, PF_HEAD)
337 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
338 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
df8c94d1
KS
339PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
340 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
8cb38fab 341PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
d25b5bd8 342 TESTCLEARFLAG(LRU, lru, PF_HEAD)
8cb38fab
KS
343PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
344 TESTCLEARFLAG(Active, active, PF_HEAD)
1899ad18
JW
345PAGEFLAG(Workingset, workingset, PF_HEAD)
346 TESTCLEARFLAG(Workingset, workingset, PF_HEAD)
dcb351cd
KS
347__PAGEFLAG(Slab, slab, PF_NO_TAIL)
348__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
df8c94d1 349PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
c13985fa
KS
350
351/* Xen */
352PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
353 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
354PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
355PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
b877ac98
JG
356PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
357 TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
c13985fa 358
de09d31d
KS
359PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
360 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
d483da5b 361 __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
da5efc40
KS
362PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
363 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
364 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
95ad9755 365
266cf658
DH
366/*
367 * Private page markings that may be used by the filesystem that owns the page
368 * for its own purposes.
369 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
370 */
2ee08717 371PAGEFLAG(Private, private, PF_ANY)
95ad9755
KS
372PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
373PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
374 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
266cf658 375
6a1e7f77
CL
376/*
377 * Only test-and-set exist for PG_writeback. The unconditional operators are
378 * risky: they bypass page accounting.
379 */
225311a4
HY
380TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL)
381 TESTSCFLAG(Writeback, writeback, PF_NO_TAIL)
e2f0a0db 382PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
6a1e7f77 383
579f8290 384/* PG_readahead is only used for reads; PG_reclaim is only for writes */
e2f0a0db
KS
385PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
386 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
df8c94d1
KS
387PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
388 TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
6a1e7f77
CL
389
390#ifdef CONFIG_HIGHMEM
1da177e4 391/*
6a1e7f77
CL
392 * Must use a macro here due to header dependency issues. page_zone() is not
393 * available at this point.
1da177e4 394 */
3ca65c19 395#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
6a1e7f77 396#else
ec7cade8 397PAGEFLAG_FALSE(HighMem)
6a1e7f77
CL
398#endif
399
400#ifdef CONFIG_SWAP
6326fec1
NP
401static __always_inline int PageSwapCache(struct page *page)
402{
38d8b4e6
HY
403#ifdef CONFIG_THP_SWAP
404 page = compound_head(page);
405#endif
6326fec1
NP
406 return PageSwapBacked(page) && test_bit(PG_swapcache, &page->flags);
407
408}
38d8b4e6
HY
409SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
410CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL)
6a1e7f77 411#else
ec7cade8 412PAGEFLAG_FALSE(SwapCache)
6a1e7f77
CL
413#endif
414
8cb38fab
KS
415PAGEFLAG(Unevictable, unevictable, PF_HEAD)
416 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
417 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
b291f000 418
af8e3354 419#ifdef CONFIG_MMU
e4f87d5d
KS
420PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
421 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
422 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
894bc310 423#else
2f3e442c 424PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
685eaade 425 TESTSCFLAG_FALSE(Mlocked)
894bc310
LS
426#endif
427
46cf98cd 428#ifdef CONFIG_ARCH_USES_PG_UNCACHED
b9d41817 429PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
602c4d11 430#else
ec7cade8 431PAGEFLAG_FALSE(Uncached)
6a1e7f77 432#endif
1da177e4 433
d466f2fc 434#ifdef CONFIG_MEMORY_FAILURE
95ad9755
KS
435PAGEFLAG(HWPoison, hwpoison, PF_ANY)
436TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
d466f2fc 437#define __PG_HWPOISON (1UL << PG_hwpoison)
06be6ff3 438extern bool take_page_off_buddy(struct page *page);
d466f2fc
AK
439#else
440PAGEFLAG_FALSE(HWPoison)
441#define __PG_HWPOISON 0
442#endif
443
33c3fc71 444#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
95ad9755
KS
445TESTPAGEFLAG(Young, young, PF_ANY)
446SETPAGEFLAG(Young, young, PF_ANY)
447TESTCLEARFLAG(Young, young, PF_ANY)
448PAGEFLAG(Idle, idle, PF_ANY)
33c3fc71
VD
449#endif
450
c275c5c6
PC
451#ifdef CONFIG_KASAN_HW_TAGS
452PAGEFLAG(SkipKASanPoison, skip_kasan_poison, PF_HEAD)
453#else
454PAGEFLAG_FALSE(SkipKASanPoison)
455#endif
456
36e66c55
AD
457/*
458 * PageReported() is used to track reported free pages within the Buddy
459 * allocator. We can use the non-atomic version of the test and set
460 * operations as both should be shielded with the zone lock to prevent
461 * any possible races on the setting or clearing of the bit.
462 */
463__PAGEFLAG(Reported, reported, PF_NO_COMPOUND)
464
e8c6158f
KS
465/*
466 * On an anonymous page mapped into a user virtual memory area,
467 * page->mapping points to its anon_vma, not to a struct address_space;
468 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
469 *
470 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
bda807d4
MK
471 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
472 * bit; and then page->mapping points, not to an anon_vma, but to a private
e8c6158f
KS
473 * structure which KSM associates with that merged page. See ksm.h.
474 *
bda807d4
MK
475 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
476 * page and then page->mapping points a struct address_space.
e8c6158f
KS
477 *
478 * Please note that, confusingly, "page_mapping" refers to the inode
479 * address_space which maps the page from disk; whereas "page_mapped"
480 * refers to user virtual address space into which the page is mapped.
481 */
bda807d4
MK
482#define PAGE_MAPPING_ANON 0x1
483#define PAGE_MAPPING_MOVABLE 0x2
484#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
485#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
e8c6158f 486
bda807d4 487static __always_inline int PageMappingFlags(struct page *page)
17514574 488{
bda807d4 489 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
17514574
MG
490}
491
4b0f3261 492static __always_inline int PageAnon(struct page *page)
e8c6158f 493{
822cdd11 494 page = compound_head(page);
bda807d4
MK
495 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
496}
497
498static __always_inline int __PageMovable(struct page *page)
499{
500 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
501 PAGE_MAPPING_MOVABLE;
e8c6158f
KS
502}
503
504#ifdef CONFIG_KSM
505/*
506 * A KSM page is one of those write-protected "shared pages" or "merged pages"
507 * which KSM maps into multiple mms, wherever identical anonymous page content
508 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
509 * anon_vma, but to that page's node of the stable tree.
510 */
4b0f3261 511static __always_inline int PageKsm(struct page *page)
e8c6158f 512{
822cdd11 513 page = compound_head(page);
e8c6158f 514 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
bda807d4 515 PAGE_MAPPING_KSM;
e8c6158f
KS
516}
517#else
518TESTPAGEFLAG_FALSE(Ksm)
519#endif
520
1a9b5b7f
WF
521u64 stable_page_flags(struct page *page);
522
0ed361de
NP
523static inline int PageUptodate(struct page *page)
524{
d2998c4d
KS
525 int ret;
526 page = compound_head(page);
527 ret = test_bit(PG_uptodate, &(page)->flags);
0ed361de
NP
528 /*
529 * Must ensure that the data we read out of the page is loaded
530 * _after_ we've loaded page->flags to check for PageUptodate.
531 * We can skip the barrier if the page is not uptodate, because
532 * we wouldn't be reading anything from it.
533 *
534 * See SetPageUptodate() for the other side of the story.
535 */
536 if (ret)
537 smp_rmb();
538
539 return ret;
540}
541
4b0f3261 542static __always_inline void __SetPageUptodate(struct page *page)
0ed361de 543{
d2998c4d 544 VM_BUG_ON_PAGE(PageTail(page), page);
0ed361de 545 smp_wmb();
df8c94d1 546 __set_bit(PG_uptodate, &page->flags);
0ed361de
NP
547}
548
4b0f3261 549static __always_inline void SetPageUptodate(struct page *page)
2dcea57a 550{
d2998c4d 551 VM_BUG_ON_PAGE(PageTail(page), page);
0ed361de
NP
552 /*
553 * Memory barrier must be issued before setting the PG_uptodate bit,
554 * so that all previous stores issued in order to bring the page
555 * uptodate are actually visible before PageUptodate becomes true.
0ed361de
NP
556 */
557 smp_wmb();
df8c94d1 558 set_bit(PG_uptodate, &page->flags);
0ed361de
NP
559}
560
d2998c4d 561CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
1da177e4 562
6a1e7f77 563int test_clear_page_writeback(struct page *page);
1c8349a1
NJ
564int __test_set_page_writeback(struct page *page, bool keep_write);
565
566#define test_set_page_writeback(page) \
567 __test_set_page_writeback(page, false)
568#define test_set_page_writeback_keepwrite(page) \
569 __test_set_page_writeback(page, true)
1da177e4 570
6a1e7f77
CL
571static inline void set_page_writeback(struct page *page)
572{
573 test_set_page_writeback(page);
574}
1da177e4 575
1c8349a1
NJ
576static inline void set_page_writeback_keepwrite(struct page *page)
577{
578 test_set_page_writeback_keepwrite(page);
579}
580
95ad9755 581__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
e20b8cca 582
4b0f3261 583static __always_inline void set_compound_head(struct page *page, struct page *head)
ad4b3fb7 584{
1d798ca3 585 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
ad4b3fb7
CD
586}
587
4b0f3261 588static __always_inline void clear_compound_head(struct page *page)
6a1e7f77 589{
1d798ca3 590 WRITE_ONCE(page->compound_head, 0);
6a1e7f77 591}
6d777953 592
4e6af67e
AA
593#ifdef CONFIG_TRANSPARENT_HUGEPAGE
594static inline void ClearPageCompound(struct page *page)
595{
1d798ca3
KS
596 BUG_ON(!PageHead(page));
597 ClearPageHead(page);
4e6af67e
AA
598}
599#endif
600
d2a1a1f0 601#define PG_head_mask ((1UL << PG_head))
dfa7e20c 602
e8c6158f
KS
603#ifdef CONFIG_HUGETLB_PAGE
604int PageHuge(struct page *page);
605int PageHeadHuge(struct page *page);
606#else
607TESTPAGEFLAG_FALSE(Huge)
608TESTPAGEFLAG_FALSE(HeadHuge)
609#endif
610
7e1f049e 611
936a5fe6 612#ifdef CONFIG_TRANSPARENT_HUGEPAGE
71e3aac0
AA
613/*
614 * PageHuge() only returns true for hugetlbfs pages, but not for
615 * normal or transparent huge pages.
616 *
617 * PageTransHuge() returns true for both transparent huge and
618 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
619 * called only in the core VM paths where hugetlbfs pages can't exist.
620 */
621static inline int PageTransHuge(struct page *page)
622{
309381fe 623 VM_BUG_ON_PAGE(PageTail(page), page);
71e3aac0
AA
624 return PageHead(page);
625}
626
385de357
DN
627/*
628 * PageTransCompound returns true for both transparent huge pages
629 * and hugetlbfs pages, so it should only be called when it's known
630 * that hugetlbfs pages aren't involved.
631 */
936a5fe6
AA
632static inline int PageTransCompound(struct page *page)
633{
634 return PageCompound(page);
635}
71e3aac0 636
127393fb
AA
637/*
638 * PageTransCompoundMap is the same as PageTransCompound, but it also
639 * guarantees the primary MMU has the entire compound page mapped
640 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
641 * can also map the entire compound page. This allows the secondary
642 * MMUs to call get_user_pages() only once for each compound page and
643 * to immediately map the entire compound page with a single secondary
644 * MMU fault. If there will be a pmd split later, the secondary MMUs
645 * will get an update through the MMU notifier invalidation through
646 * split_huge_pmd().
647 *
648 * Unlike PageTransCompound, this is safe to be called only while
649 * split_huge_pmd() cannot run from under us, like if protected by the
169226f7 650 * MMU notifier, otherwise it may result in page->_mapcount check false
127393fb 651 * positives.
169226f7
YS
652 *
653 * We have to treat page cache THP differently since every subpage of it
654 * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE
655 * mapped in the current process so comparing subpage's _mapcount to
656 * compound_mapcount to filter out PTE mapped case.
127393fb
AA
657 */
658static inline int PageTransCompoundMap(struct page *page)
659{
169226f7
YS
660 struct page *head;
661
662 if (!PageTransCompound(page))
663 return 0;
664
665 if (PageAnon(page))
666 return atomic_read(&page->_mapcount) < 0;
667
668 head = compound_head(page);
669 /* File THP is PMD mapped and not PTE mapped */
670 return atomic_read(&page->_mapcount) ==
671 atomic_read(compound_mapcount_ptr(head));
127393fb
AA
672}
673
385de357
DN
674/*
675 * PageTransTail returns true for both transparent huge pages
676 * and hugetlbfs pages, so it should only be called when it's known
677 * that hugetlbfs pages aren't involved.
678 */
679static inline int PageTransTail(struct page *page)
680{
681 return PageTail(page);
682}
683
53f9263b
KS
684/*
685 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
686 * as PMDs.
687 *
688 * This is required for optimization of rmap operations for THP: we can postpone
689 * per small page mapcount accounting (and its overhead from atomic operations)
690 * until the first PMD split.
691 *
692 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
693 * by one. This reference will go away with last compound_mapcount.
694 *
695 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
696 */
a08d93e5
MWO
697PAGEFLAG(DoubleMap, double_map, PF_SECOND)
698 TESTSCFLAG(DoubleMap, double_map, PF_SECOND)
936a5fe6 699#else
d8c1bdeb
KS
700TESTPAGEFLAG_FALSE(TransHuge)
701TESTPAGEFLAG_FALSE(TransCompound)
127393fb 702TESTPAGEFLAG_FALSE(TransCompoundMap)
d8c1bdeb 703TESTPAGEFLAG_FALSE(TransTail)
9a73f61b 704PAGEFLAG_FALSE(DoubleMap)
a08d93e5 705 TESTSCFLAG_FALSE(DoubleMap)
936a5fe6
AA
706#endif
707
0daa322b
DH
708/*
709 * Check if a page is currently marked HWPoisoned. Note that this check is
710 * best effort only and inherently racy: there is no way to synchronize with
711 * failing hardware.
712 */
713static inline bool is_page_hwpoison(struct page *page)
714{
715 if (PageHWPoison(page))
716 return true;
717 return PageHuge(page) && PageHWPoison(compound_head(page));
718}
719
e8c6158f 720/*
6e292b9b
MW
721 * For pages that are never mapped to userspace (and aren't PageSlab),
722 * page_type may be used. Because it is initialised to -1, we invert the
723 * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and
724 * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and
725 * low bits so that an underflow or overflow of page_mapcount() won't be
726 * mistaken for a page type value.
e8c6158f 727 */
6e292b9b
MW
728
729#define PAGE_TYPE_BASE 0xf0000000
730/* Reserve 0x0000007f to catch underflows of page_mapcount */
144552ff 731#define PAGE_MAPCOUNT_RESERVE -128
6e292b9b 732#define PG_buddy 0x00000080
ca215086 733#define PG_offline 0x00000100
18b2db3b
RG
734#define PG_table 0x00000200
735#define PG_guard 0x00000400
6e292b9b
MW
736
737#define PageType(page, flag) \
738 ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
739
144552ff
AY
740static inline int page_has_type(struct page *page)
741{
742 return (int)page->page_type < PAGE_MAPCOUNT_RESERVE;
743}
744
6e292b9b 745#define PAGE_TYPE_OPS(uname, lname) \
632c0a1a
VD
746static __always_inline int Page##uname(struct page *page) \
747{ \
6e292b9b 748 return PageType(page, PG_##lname); \
632c0a1a
VD
749} \
750static __always_inline void __SetPage##uname(struct page *page) \
751{ \
6e292b9b
MW
752 VM_BUG_ON_PAGE(!PageType(page, 0), page); \
753 page->page_type &= ~PG_##lname; \
632c0a1a
VD
754} \
755static __always_inline void __ClearPage##uname(struct page *page) \
756{ \
757 VM_BUG_ON_PAGE(!Page##uname(page), page); \
6e292b9b 758 page->page_type |= PG_##lname; \
e8c6158f
KS
759}
760
632c0a1a 761/*
6e292b9b 762 * PageBuddy() indicates that the page is free and in the buddy system
632c0a1a
VD
763 * (see mm/page_alloc.c).
764 */
6e292b9b 765PAGE_TYPE_OPS(Buddy, buddy)
e8c6158f 766
632c0a1a 767/*
ca215086
DH
768 * PageOffline() indicates that the page is logically offline although the
769 * containing section is online. (e.g. inflated in a balloon driver or
770 * not onlined when onlining the section).
771 * The content of these pages is effectively stale. Such pages should not
772 * be touched (read/write/dump/save) except by their owner.
aa218795
DH
773 *
774 * If a driver wants to allow to offline unmovable PageOffline() pages without
775 * putting them back to the buddy, it can do so via the memory notifier by
776 * decrementing the reference count in MEM_GOING_OFFLINE and incrementing the
777 * reference count in MEM_CANCEL_OFFLINE. When offlining, the PageOffline()
778 * pages (now with a reference count of zero) are treated like free pages,
779 * allowing the containing memory block to get offlined. A driver that
780 * relies on this feature is aware that re-onlining the memory block will
781 * require to re-set the pages PageOffline() and not giving them to the
782 * buddy via online_page_callback_t.
82840451
DH
783 *
784 * There are drivers that mark a page PageOffline() and expect there won't be
785 * any further access to page content. PFN walkers that read content of random
786 * pages should check PageOffline() and synchronize with such drivers using
787 * page_offline_freeze()/page_offline_thaw().
632c0a1a 788 */
ca215086 789PAGE_TYPE_OPS(Offline, offline)
e8c6158f 790
82840451
DH
791extern void page_offline_freeze(void);
792extern void page_offline_thaw(void);
793extern void page_offline_begin(void);
794extern void page_offline_end(void);
795
1d40a5ea
MW
796/*
797 * Marks pages in use as page tables.
798 */
799PAGE_TYPE_OPS(Table, table)
800
3972f6bb
VB
801/*
802 * Marks guardpages used with debug_pagealloc.
803 */
804PAGE_TYPE_OPS(Guard, guard)
805
832fc1de
NH
806extern bool is_free_buddy_page(struct page *page);
807
bda807d4
MK
808__PAGEFLAG(Isolated, isolated, PF_ANY);
809
072bb0aa
MG
810/*
811 * If network-based swap is enabled, sl*b must keep track of whether pages
812 * were allocated from pfmemalloc reserves.
813 */
814static inline int PageSlabPfmemalloc(struct page *page)
815{
309381fe 816 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
817 return PageActive(page);
818}
819
820static inline void SetPageSlabPfmemalloc(struct page *page)
821{
309381fe 822 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
823 SetPageActive(page);
824}
825
826static inline void __ClearPageSlabPfmemalloc(struct page *page)
827{
309381fe 828 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
829 __ClearPageActive(page);
830}
831
832static inline void ClearPageSlabPfmemalloc(struct page *page)
833{
309381fe 834 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
835 ClearPageActive(page);
836}
837
af8e3354 838#ifdef CONFIG_MMU
d2a1a1f0 839#define __PG_MLOCKED (1UL << PG_mlocked)
33925b25 840#else
b291f000 841#define __PG_MLOCKED 0
894bc310
LS
842#endif
843
dfa7e20c
RA
844/*
845 * Flags checked when a page is freed. Pages being freed should not have
4be408ce 846 * these flags set. If they are, there is a problem.
dfa7e20c 847 */
6326fec1
NP
848#define PAGE_FLAGS_CHECK_AT_FREE \
849 (1UL << PG_lru | 1UL << PG_locked | \
850 1UL << PG_private | 1UL << PG_private_2 | \
851 1UL << PG_writeback | 1UL << PG_reserved | \
852 1UL << PG_slab | 1UL << PG_active | \
853 1UL << PG_unevictable | __PG_MLOCKED)
dfa7e20c
RA
854
855/*
856 * Flags checked when a page is prepped for return by the page allocator.
4be408ce 857 * Pages being prepped should not have these flags set. If they are set,
79f4b7bf 858 * there has been a kernel bug or struct page corruption.
f4c18e6f
NH
859 *
860 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
861 * alloc-free cycle to prevent from reusing the page.
dfa7e20c 862 */
f4c18e6f 863#define PAGE_FLAGS_CHECK_AT_PREP \
41c961b9 864 (PAGEFLAGS_MASK & ~__PG_HWPOISON)
dfa7e20c 865
edcf4748 866#define PAGE_FLAGS_PRIVATE \
d2a1a1f0 867 (1UL << PG_private | 1UL << PG_private_2)
266cf658
DH
868/**
869 * page_has_private - Determine if page has private stuff
870 * @page: The page to be checked
871 *
872 * Determine if a page has private stuff, indicating that release routines
873 * should be invoked upon it.
874 */
edcf4748
JW
875static inline int page_has_private(struct page *page)
876{
877 return !!(page->flags & PAGE_FLAGS_PRIVATE);
878}
879
95ad9755
KS
880#undef PF_ANY
881#undef PF_HEAD
62906027 882#undef PF_ONLY_HEAD
95ad9755
KS
883#undef PF_NO_TAIL
884#undef PF_NO_COMPOUND
a08d93e5 885#undef PF_SECOND
edcf4748 886#endif /* !__GENERATING_BOUNDS_H */
266cf658 887
1da177e4 888#endif /* PAGE_FLAGS_H */