Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[linux-2.6-block.git] / include / linux / page-flags.h
CommitLineData
1da177e4
LT
1/*
2 * Macros for manipulating and testing page->flags
3 */
4
5#ifndef PAGE_FLAGS_H
6#define PAGE_FLAGS_H
7
f886ed44 8#include <linux/types.h>
187f1882 9#include <linux/bug.h>
072bb0aa 10#include <linux/mmdebug.h>
9223b419 11#ifndef __GENERATING_BOUNDS_H
6d777953 12#include <linux/mm_types.h>
01fc0ac1 13#include <generated/bounds.h>
9223b419 14#endif /* !__GENERATING_BOUNDS_H */
f886ed44 15
1da177e4
LT
16/*
17 * Various page->flags bits:
18 *
19 * PG_reserved is set for special pages, which can never be swapped out. Some
20 * of them might not even exist (eg empty_bad_page)...
21 *
da6052f7
NP
22 * The PG_private bitflag is set on pagecache pages if they contain filesystem
23 * specific data (which is normally at page->private). It can be used by
24 * private allocations for its own usage.
1da177e4 25 *
da6052f7
NP
26 * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
27 * and cleared when writeback _starts_ or when read _completes_. PG_writeback
28 * is set before writeback starts and cleared when it finishes.
29 *
30 * PG_locked also pins a page in pagecache, and blocks truncation of the file
31 * while it is held.
32 *
33 * page_waitqueue(page) is a wait queue of all tasks waiting for the page
34 * to become unlocked.
1da177e4
LT
35 *
36 * PG_uptodate tells whether the page's contents is valid. When a read
37 * completes, the page becomes uptodate, unless a disk I/O error happened.
38 *
da6052f7
NP
39 * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
40 * file-backed pagecache (see mm/vmscan.c).
1da177e4
LT
41 *
42 * PG_error is set to indicate that an I/O error occurred on this page.
43 *
44 * PG_arch_1 is an architecture specific page state bit. The generic code
45 * guarantees that this bit is cleared for a page when it first is entered into
46 * the page cache.
47 *
48 * PG_highmem pages are not permanently mapped into the kernel virtual address
49 * space, they need to be kmapped separately for doing IO on the pages. The
50 * struct page (these bits with information) are always mapped into kernel
51 * address space...
da6052f7 52 *
d466f2fc
AK
53 * PG_hwpoison indicates that a page got corrupted in hardware and contains
54 * data with incorrect ECC bits that triggered a machine check. Accessing is
55 * not safe since it may cause another machine check. Don't touch!
1da177e4
LT
56 */
57
58/*
59 * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
91fc8ab3
AW
60 * locked- and dirty-page accounting.
61 *
62 * The page flags field is split into two parts, the main flags area
63 * which extends from the low bits upwards, and the fields area which
64 * extends from the high bits downwards.
65 *
66 * | FIELD | ... | FLAGS |
9223b419
CL
67 * N-1 ^ 0
68 * (NR_PAGEFLAGS)
91fc8ab3 69 *
9223b419
CL
70 * The fields area is reserved for fields mapping zone, node (for NUMA) and
71 * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
72 * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
1da177e4 73 */
e2683181
CL
74enum pageflags {
75 PG_locked, /* Page is locked. Don't touch. */
76 PG_error,
77 PG_referenced,
78 PG_uptodate,
79 PG_dirty,
80 PG_lru,
81 PG_active,
82 PG_slab,
83 PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
e2683181
CL
84 PG_arch_1,
85 PG_reserved,
86 PG_private, /* If pagecache, has fs-private data */
266cf658 87 PG_private_2, /* If pagecache, has fs aux data */
e2683181 88 PG_writeback, /* Page is under writeback */
e20b8cca 89 PG_head, /* A head page */
e2683181
CL
90 PG_swapcache, /* Swap page: swp_entry_t in private */
91 PG_mappedtodisk, /* Has blocks allocated on-disk */
92 PG_reclaim, /* To be reclaimed asap */
b2e18538 93 PG_swapbacked, /* Page is backed by RAM/swap */
894bc310 94 PG_unevictable, /* Page is "unevictable" */
af8e3354 95#ifdef CONFIG_MMU
b291f000 96 PG_mlocked, /* Page is vma mlocked */
894bc310 97#endif
46cf98cd 98#ifdef CONFIG_ARCH_USES_PG_UNCACHED
602c4d11 99 PG_uncached, /* Page has been mapped as uncached */
d466f2fc
AK
100#endif
101#ifdef CONFIG_MEMORY_FAILURE
102 PG_hwpoison, /* hardware poisoned page. Don't touch */
e9da73d6 103#endif
33c3fc71
VD
104#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
105 PG_young,
106 PG_idle,
f886ed44 107#endif
0cad47cf
AW
108 __NR_PAGEFLAGS,
109
110 /* Filesystems */
111 PG_checked = PG_owner_priv_1,
112
266cf658
DH
113 /* Two page bits are conscripted by FS-Cache to maintain local caching
114 * state. These bits are set on pages belonging to the netfs's inodes
115 * when those inodes are being locally cached.
116 */
117 PG_fscache = PG_private_2, /* page backed by cache */
118
0cad47cf 119 /* XEN */
d8ac3dd4 120 /* Pinned in Xen as a read-only pagetable page. */
0cad47cf 121 PG_pinned = PG_owner_priv_1,
d8ac3dd4 122 /* Pinned as part of domain save (see xen_mm_pin_all()). */
0cad47cf 123 PG_savepinned = PG_dirty,
d8ac3dd4
JH
124 /* Has a grant mapping of another (foreign) domain's page. */
125 PG_foreign = PG_owner_priv_1,
8a38082d 126
9023cb7e 127 /* SLOB */
9023cb7e 128 PG_slob_free = PG_private,
53f9263b
KS
129
130 /* Compound pages. Stored in first tail page's flags */
131 PG_double_map = PG_private_2,
bda807d4
MK
132
133 /* non-lru isolated movable page */
134 PG_isolated = PG_reclaim,
e2683181 135};
1da177e4 136
9223b419
CL
137#ifndef __GENERATING_BOUNDS_H
138
0e6d31a7
KS
139struct page; /* forward declaration */
140
141static inline struct page *compound_head(struct page *page)
142{
143 unsigned long head = READ_ONCE(page->compound_head);
144
145 if (unlikely(head & 1))
146 return (struct page *) (head - 1);
147 return page;
148}
149
4b0f3261 150static __always_inline int PageTail(struct page *page)
0e6d31a7
KS
151{
152 return READ_ONCE(page->compound_head) & 1;
153}
154
4b0f3261 155static __always_inline int PageCompound(struct page *page)
0e6d31a7
KS
156{
157 return test_bit(PG_head, &page->flags) || PageTail(page);
158}
159
95ad9755
KS
160/*
161 * Page flags policies wrt compound pages
162 *
163 * PF_ANY:
164 * the page flag is relevant for small, head and tail pages.
165 *
166 * PF_HEAD:
167 * for compound page all operations related to the page flag applied to
168 * head page.
169 *
170 * PF_NO_TAIL:
171 * modifications of the page flag must be done on small or head pages,
172 * checks can be done on tail pages too.
173 *
174 * PF_NO_COMPOUND:
175 * the page flag is not relevant for compound pages.
176 */
177#define PF_ANY(page, enforce) page
178#define PF_HEAD(page, enforce) compound_head(page)
179#define PF_NO_TAIL(page, enforce) ({ \
180 VM_BUG_ON_PGFLAGS(enforce && PageTail(page), page); \
181 compound_head(page);})
822cdd11 182#define PF_NO_COMPOUND(page, enforce) ({ \
95ad9755
KS
183 VM_BUG_ON_PGFLAGS(enforce && PageCompound(page), page); \
184 page;})
185
f94a62e9
CL
186/*
187 * Macros to create function definitions for page flags
188 */
95ad9755 189#define TESTPAGEFLAG(uname, lname, policy) \
4b0f3261 190static __always_inline int Page##uname(struct page *page) \
95ad9755 191 { return test_bit(PG_##lname, &policy(page, 0)->flags); }
f94a62e9 192
95ad9755 193#define SETPAGEFLAG(uname, lname, policy) \
4b0f3261 194static __always_inline void SetPage##uname(struct page *page) \
95ad9755 195 { set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 196
95ad9755 197#define CLEARPAGEFLAG(uname, lname, policy) \
4b0f3261 198static __always_inline void ClearPage##uname(struct page *page) \
95ad9755 199 { clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 200
95ad9755 201#define __SETPAGEFLAG(uname, lname, policy) \
4b0f3261 202static __always_inline void __SetPage##uname(struct page *page) \
95ad9755 203 { __set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 204
95ad9755 205#define __CLEARPAGEFLAG(uname, lname, policy) \
4b0f3261 206static __always_inline void __ClearPage##uname(struct page *page) \
95ad9755 207 { __clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 208
95ad9755 209#define TESTSETFLAG(uname, lname, policy) \
4b0f3261 210static __always_inline int TestSetPage##uname(struct page *page) \
95ad9755 211 { return test_and_set_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 212
95ad9755 213#define TESTCLEARFLAG(uname, lname, policy) \
4b0f3261 214static __always_inline int TestClearPage##uname(struct page *page) \
95ad9755 215 { return test_and_clear_bit(PG_##lname, &policy(page, 1)->flags); }
f94a62e9 216
95ad9755
KS
217#define PAGEFLAG(uname, lname, policy) \
218 TESTPAGEFLAG(uname, lname, policy) \
219 SETPAGEFLAG(uname, lname, policy) \
220 CLEARPAGEFLAG(uname, lname, policy)
f94a62e9 221
95ad9755
KS
222#define __PAGEFLAG(uname, lname, policy) \
223 TESTPAGEFLAG(uname, lname, policy) \
224 __SETPAGEFLAG(uname, lname, policy) \
225 __CLEARPAGEFLAG(uname, lname, policy)
f94a62e9 226
95ad9755
KS
227#define TESTSCFLAG(uname, lname, policy) \
228 TESTSETFLAG(uname, lname, policy) \
229 TESTCLEARFLAG(uname, lname, policy)
f94a62e9 230
2f3e442c
JW
231#define TESTPAGEFLAG_FALSE(uname) \
232static inline int Page##uname(const struct page *page) { return 0; }
233
8a7a8544
LS
234#define SETPAGEFLAG_NOOP(uname) \
235static inline void SetPage##uname(struct page *page) { }
236
237#define CLEARPAGEFLAG_NOOP(uname) \
238static inline void ClearPage##uname(struct page *page) { }
239
240#define __CLEARPAGEFLAG_NOOP(uname) \
241static inline void __ClearPage##uname(struct page *page) { }
242
2f3e442c
JW
243#define TESTSETFLAG_FALSE(uname) \
244static inline int TestSetPage##uname(struct page *page) { return 0; }
245
8a7a8544
LS
246#define TESTCLEARFLAG_FALSE(uname) \
247static inline int TestClearPage##uname(struct page *page) { return 0; }
248
2f3e442c
JW
249#define PAGEFLAG_FALSE(uname) TESTPAGEFLAG_FALSE(uname) \
250 SETPAGEFLAG_NOOP(uname) CLEARPAGEFLAG_NOOP(uname)
251
252#define TESTSCFLAG_FALSE(uname) \
253 TESTSETFLAG_FALSE(uname) TESTCLEARFLAG_FALSE(uname)
254
48c935ad 255__PAGEFLAG(Locked, locked, PF_NO_TAIL)
df8c94d1 256PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
8cb38fab
KS
257PAGEFLAG(Referenced, referenced, PF_HEAD)
258 TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
259 __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
df8c94d1
KS
260PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
261 __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
8cb38fab
KS
262PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
263PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
264 TESTCLEARFLAG(Active, active, PF_HEAD)
dcb351cd
KS
265__PAGEFLAG(Slab, slab, PF_NO_TAIL)
266__PAGEFLAG(SlobFree, slob_free, PF_NO_TAIL)
df8c94d1 267PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */
c13985fa
KS
268
269/* Xen */
270PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
271 TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
272PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
273PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
274
de09d31d
KS
275PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
276 __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
da5efc40
KS
277PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
278 __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
279 __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL)
95ad9755 280
266cf658
DH
281/*
282 * Private page markings that may be used by the filesystem that owns the page
283 * for its own purposes.
284 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
285 */
95ad9755
KS
286PAGEFLAG(Private, private, PF_ANY) __SETPAGEFLAG(Private, private, PF_ANY)
287 __CLEARPAGEFLAG(Private, private, PF_ANY)
288PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY)
289PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
290 TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY)
266cf658 291
6a1e7f77
CL
292/*
293 * Only test-and-set exist for PG_writeback. The unconditional operators are
294 * risky: they bypass page accounting.
295 */
df8c94d1
KS
296TESTPAGEFLAG(Writeback, writeback, PF_NO_COMPOUND)
297 TESTSCFLAG(Writeback, writeback, PF_NO_COMPOUND)
e2f0a0db 298PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
6a1e7f77 299
579f8290 300/* PG_readahead is only used for reads; PG_reclaim is only for writes */
e2f0a0db
KS
301PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
302 TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
df8c94d1
KS
303PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
304 TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
6a1e7f77
CL
305
306#ifdef CONFIG_HIGHMEM
1da177e4 307/*
6a1e7f77
CL
308 * Must use a macro here due to header dependency issues. page_zone() is not
309 * available at this point.
1da177e4 310 */
3ca65c19 311#define PageHighMem(__p) is_highmem_idx(page_zonenum(__p))
6a1e7f77 312#else
ec7cade8 313PAGEFLAG_FALSE(HighMem)
6a1e7f77
CL
314#endif
315
316#ifdef CONFIG_SWAP
50ea78d6 317PAGEFLAG(SwapCache, swapcache, PF_NO_COMPOUND)
6a1e7f77 318#else
ec7cade8 319PAGEFLAG_FALSE(SwapCache)
6a1e7f77
CL
320#endif
321
8cb38fab
KS
322PAGEFLAG(Unevictable, unevictable, PF_HEAD)
323 __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD)
324 TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD)
b291f000 325
af8e3354 326#ifdef CONFIG_MMU
e4f87d5d
KS
327PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
328 __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
329 TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL)
894bc310 330#else
2f3e442c 331PAGEFLAG_FALSE(Mlocked) __CLEARPAGEFLAG_NOOP(Mlocked)
685eaade 332 TESTSCFLAG_FALSE(Mlocked)
894bc310
LS
333#endif
334
46cf98cd 335#ifdef CONFIG_ARCH_USES_PG_UNCACHED
b9d41817 336PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND)
602c4d11 337#else
ec7cade8 338PAGEFLAG_FALSE(Uncached)
6a1e7f77 339#endif
1da177e4 340
d466f2fc 341#ifdef CONFIG_MEMORY_FAILURE
95ad9755
KS
342PAGEFLAG(HWPoison, hwpoison, PF_ANY)
343TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
d466f2fc
AK
344#define __PG_HWPOISON (1UL << PG_hwpoison)
345#else
346PAGEFLAG_FALSE(HWPoison)
347#define __PG_HWPOISON 0
348#endif
349
33c3fc71 350#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
95ad9755
KS
351TESTPAGEFLAG(Young, young, PF_ANY)
352SETPAGEFLAG(Young, young, PF_ANY)
353TESTCLEARFLAG(Young, young, PF_ANY)
354PAGEFLAG(Idle, idle, PF_ANY)
33c3fc71
VD
355#endif
356
e8c6158f
KS
357/*
358 * On an anonymous page mapped into a user virtual memory area,
359 * page->mapping points to its anon_vma, not to a struct address_space;
360 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
361 *
362 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
bda807d4
MK
363 * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
364 * bit; and then page->mapping points, not to an anon_vma, but to a private
e8c6158f
KS
365 * structure which KSM associates with that merged page. See ksm.h.
366 *
bda807d4
MK
367 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
368 * page and then page->mapping points a struct address_space.
e8c6158f
KS
369 *
370 * Please note that, confusingly, "page_mapping" refers to the inode
371 * address_space which maps the page from disk; whereas "page_mapped"
372 * refers to user virtual address space into which the page is mapped.
373 */
bda807d4
MK
374#define PAGE_MAPPING_ANON 0x1
375#define PAGE_MAPPING_MOVABLE 0x2
376#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
377#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
e8c6158f 378
bda807d4 379static __always_inline int PageMappingFlags(struct page *page)
17514574 380{
bda807d4 381 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
17514574
MG
382}
383
4b0f3261 384static __always_inline int PageAnon(struct page *page)
e8c6158f 385{
822cdd11 386 page = compound_head(page);
bda807d4
MK
387 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
388}
389
390static __always_inline int __PageMovable(struct page *page)
391{
392 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
393 PAGE_MAPPING_MOVABLE;
e8c6158f
KS
394}
395
396#ifdef CONFIG_KSM
397/*
398 * A KSM page is one of those write-protected "shared pages" or "merged pages"
399 * which KSM maps into multiple mms, wherever identical anonymous page content
400 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
401 * anon_vma, but to that page's node of the stable tree.
402 */
4b0f3261 403static __always_inline int PageKsm(struct page *page)
e8c6158f 404{
822cdd11 405 page = compound_head(page);
e8c6158f 406 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
bda807d4 407 PAGE_MAPPING_KSM;
e8c6158f
KS
408}
409#else
410TESTPAGEFLAG_FALSE(Ksm)
411#endif
412
1a9b5b7f
WF
413u64 stable_page_flags(struct page *page);
414
0ed361de
NP
415static inline int PageUptodate(struct page *page)
416{
d2998c4d
KS
417 int ret;
418 page = compound_head(page);
419 ret = test_bit(PG_uptodate, &(page)->flags);
0ed361de
NP
420 /*
421 * Must ensure that the data we read out of the page is loaded
422 * _after_ we've loaded page->flags to check for PageUptodate.
423 * We can skip the barrier if the page is not uptodate, because
424 * we wouldn't be reading anything from it.
425 *
426 * See SetPageUptodate() for the other side of the story.
427 */
428 if (ret)
429 smp_rmb();
430
431 return ret;
432}
433
4b0f3261 434static __always_inline void __SetPageUptodate(struct page *page)
0ed361de 435{
d2998c4d 436 VM_BUG_ON_PAGE(PageTail(page), page);
0ed361de 437 smp_wmb();
df8c94d1 438 __set_bit(PG_uptodate, &page->flags);
0ed361de
NP
439}
440
4b0f3261 441static __always_inline void SetPageUptodate(struct page *page)
2dcea57a 442{
d2998c4d 443 VM_BUG_ON_PAGE(PageTail(page), page);
0ed361de
NP
444 /*
445 * Memory barrier must be issued before setting the PG_uptodate bit,
446 * so that all previous stores issued in order to bring the page
447 * uptodate are actually visible before PageUptodate becomes true.
0ed361de
NP
448 */
449 smp_wmb();
df8c94d1 450 set_bit(PG_uptodate, &page->flags);
0ed361de
NP
451}
452
d2998c4d 453CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
1da177e4 454
6a1e7f77 455int test_clear_page_writeback(struct page *page);
1c8349a1
NJ
456int __test_set_page_writeback(struct page *page, bool keep_write);
457
458#define test_set_page_writeback(page) \
459 __test_set_page_writeback(page, false)
460#define test_set_page_writeback_keepwrite(page) \
461 __test_set_page_writeback(page, true)
1da177e4 462
6a1e7f77
CL
463static inline void set_page_writeback(struct page *page)
464{
465 test_set_page_writeback(page);
466}
1da177e4 467
1c8349a1
NJ
468static inline void set_page_writeback_keepwrite(struct page *page)
469{
470 test_set_page_writeback_keepwrite(page);
471}
472
95ad9755 473__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
e20b8cca 474
4b0f3261 475static __always_inline void set_compound_head(struct page *page, struct page *head)
ad4b3fb7 476{
1d798ca3 477 WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
ad4b3fb7
CD
478}
479
4b0f3261 480static __always_inline void clear_compound_head(struct page *page)
6a1e7f77 481{
1d798ca3 482 WRITE_ONCE(page->compound_head, 0);
6a1e7f77 483}
6d777953 484
4e6af67e
AA
485#ifdef CONFIG_TRANSPARENT_HUGEPAGE
486static inline void ClearPageCompound(struct page *page)
487{
1d798ca3
KS
488 BUG_ON(!PageHead(page));
489 ClearPageHead(page);
4e6af67e
AA
490}
491#endif
492
d2a1a1f0 493#define PG_head_mask ((1UL << PG_head))
dfa7e20c 494
e8c6158f
KS
495#ifdef CONFIG_HUGETLB_PAGE
496int PageHuge(struct page *page);
497int PageHeadHuge(struct page *page);
7e1f049e 498bool page_huge_active(struct page *page);
e8c6158f
KS
499#else
500TESTPAGEFLAG_FALSE(Huge)
501TESTPAGEFLAG_FALSE(HeadHuge)
7e1f049e
NH
502
503static inline bool page_huge_active(struct page *page)
504{
505 return 0;
506}
e8c6158f
KS
507#endif
508
7e1f049e 509
936a5fe6 510#ifdef CONFIG_TRANSPARENT_HUGEPAGE
71e3aac0
AA
511/*
512 * PageHuge() only returns true for hugetlbfs pages, but not for
513 * normal or transparent huge pages.
514 *
515 * PageTransHuge() returns true for both transparent huge and
516 * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
517 * called only in the core VM paths where hugetlbfs pages can't exist.
518 */
519static inline int PageTransHuge(struct page *page)
520{
309381fe 521 VM_BUG_ON_PAGE(PageTail(page), page);
71e3aac0
AA
522 return PageHead(page);
523}
524
385de357
DN
525/*
526 * PageTransCompound returns true for both transparent huge pages
527 * and hugetlbfs pages, so it should only be called when it's known
528 * that hugetlbfs pages aren't involved.
529 */
936a5fe6
AA
530static inline int PageTransCompound(struct page *page)
531{
532 return PageCompound(page);
533}
71e3aac0 534
127393fb
AA
535/*
536 * PageTransCompoundMap is the same as PageTransCompound, but it also
537 * guarantees the primary MMU has the entire compound page mapped
538 * through pmd_trans_huge, which in turn guarantees the secondary MMUs
539 * can also map the entire compound page. This allows the secondary
540 * MMUs to call get_user_pages() only once for each compound page and
541 * to immediately map the entire compound page with a single secondary
542 * MMU fault. If there will be a pmd split later, the secondary MMUs
543 * will get an update through the MMU notifier invalidation through
544 * split_huge_pmd().
545 *
546 * Unlike PageTransCompound, this is safe to be called only while
547 * split_huge_pmd() cannot run from under us, like if protected by the
548 * MMU notifier, otherwise it may result in page->_mapcount < 0 false
549 * positives.
550 */
551static inline int PageTransCompoundMap(struct page *page)
552{
553 return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
554}
555
385de357
DN
556/*
557 * PageTransTail returns true for both transparent huge pages
558 * and hugetlbfs pages, so it should only be called when it's known
559 * that hugetlbfs pages aren't involved.
560 */
561static inline int PageTransTail(struct page *page)
562{
563 return PageTail(page);
564}
565
53f9263b
KS
566/*
567 * PageDoubleMap indicates that the compound page is mapped with PTEs as well
568 * as PMDs.
569 *
570 * This is required for optimization of rmap operations for THP: we can postpone
571 * per small page mapcount accounting (and its overhead from atomic operations)
572 * until the first PMD split.
573 *
574 * For the page PageDoubleMap means ->_mapcount in all sub-pages is offset up
575 * by one. This reference will go away with last compound_mapcount.
576 *
577 * See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
578 */
579static inline int PageDoubleMap(struct page *page)
580{
581 return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
582}
583
9a73f61b
KS
584static inline void SetPageDoubleMap(struct page *page)
585{
586 VM_BUG_ON_PAGE(!PageHead(page), page);
587 set_bit(PG_double_map, &page[1].flags);
588}
589
590static inline void ClearPageDoubleMap(struct page *page)
591{
592 VM_BUG_ON_PAGE(!PageHead(page), page);
593 clear_bit(PG_double_map, &page[1].flags);
594}
53f9263b
KS
595static inline int TestSetPageDoubleMap(struct page *page)
596{
597 VM_BUG_ON_PAGE(!PageHead(page), page);
598 return test_and_set_bit(PG_double_map, &page[1].flags);
599}
600
601static inline int TestClearPageDoubleMap(struct page *page)
602{
603 VM_BUG_ON_PAGE(!PageHead(page), page);
604 return test_and_clear_bit(PG_double_map, &page[1].flags);
605}
606
936a5fe6 607#else
d8c1bdeb
KS
608TESTPAGEFLAG_FALSE(TransHuge)
609TESTPAGEFLAG_FALSE(TransCompound)
127393fb 610TESTPAGEFLAG_FALSE(TransCompoundMap)
d8c1bdeb 611TESTPAGEFLAG_FALSE(TransTail)
9a73f61b 612PAGEFLAG_FALSE(DoubleMap)
53f9263b
KS
613 TESTSETFLAG_FALSE(DoubleMap)
614 TESTCLEARFLAG_FALSE(DoubleMap)
936a5fe6
AA
615#endif
616
e8c6158f 617/*
632c0a1a
VD
618 * For pages that are never mapped to userspace, page->mapcount may be
619 * used for storing extra information about page type. Any value used
620 * for this purpose must be <= -2, but it's better start not too close
621 * to -2 so that an underflow of the page_mapcount() won't be mistaken
622 * for a special page.
e8c6158f 623 */
632c0a1a
VD
624#define PAGE_MAPCOUNT_OPS(uname, lname) \
625static __always_inline int Page##uname(struct page *page) \
626{ \
627 return atomic_read(&page->_mapcount) == \
628 PAGE_##lname##_MAPCOUNT_VALUE; \
629} \
630static __always_inline void __SetPage##uname(struct page *page) \
631{ \
632 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); \
633 atomic_set(&page->_mapcount, PAGE_##lname##_MAPCOUNT_VALUE); \
634} \
635static __always_inline void __ClearPage##uname(struct page *page) \
636{ \
637 VM_BUG_ON_PAGE(!Page##uname(page), page); \
638 atomic_set(&page->_mapcount, -1); \
e8c6158f
KS
639}
640
632c0a1a
VD
641/*
642 * PageBuddy() indicate that the page is free and in the buddy system
643 * (see mm/page_alloc.c).
644 */
645#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
646PAGE_MAPCOUNT_OPS(Buddy, BUDDY)
e8c6158f 647
632c0a1a
VD
648/*
649 * PageBalloon() is set on pages that are on the balloon page list
650 * (see mm/balloon_compaction.c).
651 */
652#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
653PAGE_MAPCOUNT_OPS(Balloon, BALLOON)
e8c6158f 654
4949148a
VD
655/*
656 * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on
657 * pages allocated with __GFP_ACCOUNT. It gets cleared on page free.
658 */
659#define PAGE_KMEMCG_MAPCOUNT_VALUE (-512)
660PAGE_MAPCOUNT_OPS(Kmemcg, KMEMCG)
661
832fc1de
NH
662extern bool is_free_buddy_page(struct page *page);
663
bda807d4
MK
664__PAGEFLAG(Isolated, isolated, PF_ANY);
665
072bb0aa
MG
666/*
667 * If network-based swap is enabled, sl*b must keep track of whether pages
668 * were allocated from pfmemalloc reserves.
669 */
670static inline int PageSlabPfmemalloc(struct page *page)
671{
309381fe 672 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
673 return PageActive(page);
674}
675
676static inline void SetPageSlabPfmemalloc(struct page *page)
677{
309381fe 678 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
679 SetPageActive(page);
680}
681
682static inline void __ClearPageSlabPfmemalloc(struct page *page)
683{
309381fe 684 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
685 __ClearPageActive(page);
686}
687
688static inline void ClearPageSlabPfmemalloc(struct page *page)
689{
309381fe 690 VM_BUG_ON_PAGE(!PageSlab(page), page);
072bb0aa
MG
691 ClearPageActive(page);
692}
693
af8e3354 694#ifdef CONFIG_MMU
d2a1a1f0 695#define __PG_MLOCKED (1UL << PG_mlocked)
33925b25 696#else
b291f000 697#define __PG_MLOCKED 0
894bc310
LS
698#endif
699
dfa7e20c
RA
700/*
701 * Flags checked when a page is freed. Pages being freed should not have
702 * these flags set. It they are, there is a problem.
703 */
79f4b7bf 704#define PAGE_FLAGS_CHECK_AT_FREE \
d2a1a1f0
YZ
705 (1UL << PG_lru | 1UL << PG_locked | \
706 1UL << PG_private | 1UL << PG_private_2 | \
707 1UL << PG_writeback | 1UL << PG_reserved | \
708 1UL << PG_slab | 1UL << PG_swapcache | 1UL << PG_active | \
709 1UL << PG_unevictable | __PG_MLOCKED)
dfa7e20c
RA
710
711/*
712 * Flags checked when a page is prepped for return by the page allocator.
f4c18e6f 713 * Pages being prepped should not have these flags set. It they are set,
79f4b7bf 714 * there has been a kernel bug or struct page corruption.
f4c18e6f
NH
715 *
716 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
717 * alloc-free cycle to prevent from reusing the page.
dfa7e20c 718 */
f4c18e6f 719#define PAGE_FLAGS_CHECK_AT_PREP \
d2a1a1f0 720 (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
dfa7e20c 721
edcf4748 722#define PAGE_FLAGS_PRIVATE \
d2a1a1f0 723 (1UL << PG_private | 1UL << PG_private_2)
266cf658
DH
724/**
725 * page_has_private - Determine if page has private stuff
726 * @page: The page to be checked
727 *
728 * Determine if a page has private stuff, indicating that release routines
729 * should be invoked upon it.
730 */
edcf4748
JW
731static inline int page_has_private(struct page *page)
732{
733 return !!(page->flags & PAGE_FLAGS_PRIVATE);
734}
735
95ad9755
KS
736#undef PF_ANY
737#undef PF_HEAD
738#undef PF_NO_TAIL
739#undef PF_NO_COMPOUND
edcf4748 740#endif /* !__GENERATING_BOUNDS_H */
266cf658 741
1da177e4 742#endif /* PAGE_FLAGS_H */