mm: introduce wrappers to access mm->nr_ptes
[linux-block.git] / include / linux / mm.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_MM_H
3#define _LINUX_MM_H
4
1da177e4
LT
5#include <linux/errno.h>
6
7#ifdef __KERNEL__
8
309381fe 9#include <linux/mmdebug.h>
1da177e4 10#include <linux/gfp.h>
187f1882 11#include <linux/bug.h>
1da177e4
LT
12#include <linux/list.h>
13#include <linux/mmzone.h>
14#include <linux/rbtree.h>
83aeeada 15#include <linux/atomic.h>
9a11b49a 16#include <linux/debug_locks.h>
5b99cd0e 17#include <linux/mm_types.h>
08677214 18#include <linux/range.h>
c6f6b596 19#include <linux/pfn.h>
3565fce3 20#include <linux/percpu-refcount.h>
e9da73d6 21#include <linux/bit_spinlock.h>
b0d40c92 22#include <linux/shrinker.h>
9c599024 23#include <linux/resource.h>
e30825f1 24#include <linux/page_ext.h>
8025e5dd 25#include <linux/err.h>
fe896d18 26#include <linux/page_ref.h>
7b2d55d2 27#include <linux/memremap.h>
1da177e4
LT
28
29struct mempolicy;
30struct anon_vma;
bf181b9f 31struct anon_vma_chain;
4e950f6f 32struct file_ra_state;
e8edc6e0 33struct user_struct;
4e950f6f 34struct writeback_control;
682aa8e1 35struct bdi_writeback;
1da177e4 36
597b7305
MH
37void init_mm_internals(void);
38
fccc9987 39#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
1da177e4 40extern unsigned long max_mapnr;
fccc9987
JL
41
42static inline void set_max_mapnr(unsigned long limit)
43{
44 max_mapnr = limit;
45}
46#else
47static inline void set_max_mapnr(unsigned long limit) { }
1da177e4
LT
48#endif
49
4481374c 50extern unsigned long totalram_pages;
1da177e4 51extern void * high_memory;
1da177e4
LT
52extern int page_cluster;
53
54#ifdef CONFIG_SYSCTL
55extern int sysctl_legacy_va_layout;
56#else
57#define sysctl_legacy_va_layout 0
58#endif
59
d07e2259
DC
60#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
61extern const int mmap_rnd_bits_min;
62extern const int mmap_rnd_bits_max;
63extern int mmap_rnd_bits __read_mostly;
64#endif
65#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
66extern const int mmap_rnd_compat_bits_min;
67extern const int mmap_rnd_compat_bits_max;
68extern int mmap_rnd_compat_bits __read_mostly;
69#endif
70
1da177e4
LT
71#include <asm/page.h>
72#include <asm/pgtable.h>
73#include <asm/processor.h>
1da177e4 74
79442ed1
TC
75#ifndef __pa_symbol
76#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
77#endif
78
1dff8083
AB
79#ifndef page_to_virt
80#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
81#endif
82
568c5fe5
LA
83#ifndef lm_alias
84#define lm_alias(x) __va(__pa_symbol(x))
85#endif
86
593befa6
DD
87/*
88 * To prevent common memory management code establishing
89 * a zero page mapping on a read fault.
90 * This macro should be defined within <asm/pgtable.h>.
91 * s390 does this to prevent multiplexing of hardware bits
92 * related to the physical page in case of virtualization.
93 */
94#ifndef mm_forbids_zeropage
95#define mm_forbids_zeropage(X) (0)
96#endif
97
ea606cf5
AR
98/*
99 * Default maximum number of active map areas, this limits the number of vmas
100 * per mm struct. Users can overwrite this number by sysctl but there is a
101 * problem.
102 *
103 * When a program's coredump is generated as ELF format, a section is created
104 * per a vma. In ELF, the number of sections is represented in unsigned short.
105 * This means the number of sections should be smaller than 65535 at coredump.
106 * Because the kernel adds some informative sections to a image of program at
107 * generating coredump, we need some margin. The number of extra sections is
108 * 1-3 now and depends on arch. We use "5" as safe margin, here.
109 *
110 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
111 * not a hard limit any more. Although some userspace tools can be surprised by
112 * that.
113 */
114#define MAPCOUNT_ELF_CORE_MARGIN (5)
115#define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
116
117extern int sysctl_max_map_count;
118
c9b1d098 119extern unsigned long sysctl_user_reserve_kbytes;
4eeab4f5 120extern unsigned long sysctl_admin_reserve_kbytes;
c9b1d098 121
49f0ce5f
JM
122extern int sysctl_overcommit_memory;
123extern int sysctl_overcommit_ratio;
124extern unsigned long sysctl_overcommit_kbytes;
125
126extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
127 size_t *, loff_t *);
128extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
129 size_t *, loff_t *);
130
1da177e4
LT
131#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
132
27ac792c
AR
133/* to align the pointer to the (next) page boundary */
134#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
135
0fa73b86 136/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
1061b0d2 137#define PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
0fa73b86 138
1da177e4
LT
139/*
140 * Linux kernel virtual memory manager primitives.
141 * The idea being to have a "virtual" mm in the same way
142 * we have a virtual fs - giving a cleaner interface to the
143 * mm details, and allowing different kinds of memory mappings
144 * (from shared memory to executable loading to arbitrary
145 * mmap() functions).
146 */
147
c43692e8
CL
148extern struct kmem_cache *vm_area_cachep;
149
1da177e4 150#ifndef CONFIG_MMU
8feae131
DH
151extern struct rb_root nommu_region_tree;
152extern struct rw_semaphore nommu_region_sem;
1da177e4
LT
153
154extern unsigned int kobjsize(const void *objp);
155#endif
156
157/*
605d9288 158 * vm_flags in vm_area_struct, see mm_types.h.
bcf66917 159 * When changing, update also include/trace/events/mmflags.h
1da177e4 160 */
cc2383ec
KK
161#define VM_NONE 0x00000000
162
1da177e4
LT
163#define VM_READ 0x00000001 /* currently active flags */
164#define VM_WRITE 0x00000002
165#define VM_EXEC 0x00000004
166#define VM_SHARED 0x00000008
167
7e2cff42 168/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
1da177e4
LT
169#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
170#define VM_MAYWRITE 0x00000020
171#define VM_MAYEXEC 0x00000040
172#define VM_MAYSHARE 0x00000080
173
174#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
16ba6f81 175#define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */
6aab341e 176#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
1da177e4 177#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
16ba6f81 178#define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */
1da177e4 179
1da177e4
LT
180#define VM_LOCKED 0x00002000
181#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
182
183 /* Used by sys_madvise() */
184#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
185#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
186
187#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
188#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
de60f5f1 189#define VM_LOCKONFAULT 0x00080000 /* Lock the pages covered when they are faulted in */
1da177e4 190#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
cdfd4325 191#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
1da177e4 192#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
cc2383ec 193#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
d2cd9ede 194#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */
0103bd16 195#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
d00806b1 196
d9104d1c
CG
197#ifdef CONFIG_MEM_SOFT_DIRTY
198# define VM_SOFTDIRTY 0x08000000 /* Not soft dirty clean area */
199#else
200# define VM_SOFTDIRTY 0
201#endif
202
b379d790 203#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
cc2383ec
KK
204#define VM_HUGEPAGE 0x20000000 /* MADV_HUGEPAGE marked this vma */
205#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
f8af4da3 206#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
1da177e4 207
63c17fb8
DH
208#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
209#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
210#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
211#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
212#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
df3735c5 213#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
63c17fb8
DH
214#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
215#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
216#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
217#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
df3735c5 218#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
63c17fb8
DH
219#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
220
cc2383ec
KK
221#if defined(CONFIG_X86)
222# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
8f62c883
DH
223#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
224# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
225# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */
226# define VM_PKEY_BIT1 VM_HIGH_ARCH_1
227# define VM_PKEY_BIT2 VM_HIGH_ARCH_2
228# define VM_PKEY_BIT3 VM_HIGH_ARCH_3
229#endif
cc2383ec
KK
230#elif defined(CONFIG_PPC)
231# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
232#elif defined(CONFIG_PARISC)
233# define VM_GROWSUP VM_ARCH_1
9ca52ed9
JH
234#elif defined(CONFIG_METAG)
235# define VM_GROWSUP VM_ARCH_1
cc2383ec
KK
236#elif defined(CONFIG_IA64)
237# define VM_GROWSUP VM_ARCH_1
238#elif !defined(CONFIG_MMU)
239# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
240#endif
241
df3735c5 242#if defined(CONFIG_X86_INTEL_MPX)
4aae7e43 243/* MPX specific bounds table or bounds directory */
fa87b91c 244# define VM_MPX VM_HIGH_ARCH_4
df3735c5
RR
245#else
246# define VM_MPX VM_NONE
4aae7e43
QR
247#endif
248
cc2383ec
KK
249#ifndef VM_GROWSUP
250# define VM_GROWSUP VM_NONE
251#endif
252
a8bef8ff
MG
253/* Bits set in the VMA until the stack is in its final location */
254#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ)
255
1da177e4
LT
256#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
257#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
258#endif
259
260#ifdef CONFIG_STACK_GROWSUP
30bdbb78 261#define VM_STACK VM_GROWSUP
1da177e4 262#else
30bdbb78 263#define VM_STACK VM_GROWSDOWN
1da177e4
LT
264#endif
265
30bdbb78
KK
266#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
267
b291f000 268/*
78f11a25
AA
269 * Special vmas that are non-mergable, non-mlock()able.
270 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
b291f000 271 */
9050d7eb 272#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
b291f000 273
a0715cc2
AT
274/* This mask defines which mm->def_flags a process can inherit its parent */
275#define VM_INIT_DEF_MASK VM_NOHUGEPAGE
276
de60f5f1
EM
277/* This mask is used to clear all the VMA flags used by mlock */
278#define VM_LOCKED_CLEAR_MASK (~(VM_LOCKED | VM_LOCKONFAULT))
279
1da177e4
LT
280/*
281 * mapping from the currently active vm_flags protection bits (the
282 * low four bits) to a page protection mask..
283 */
284extern pgprot_t protection_map[16];
285
d0217ac0 286#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
9b4bdd2f
KS
287#define FAULT_FLAG_MKWRITE 0x02 /* Fault was mkwrite of existing pte */
288#define FAULT_FLAG_ALLOW_RETRY 0x04 /* Retry fault if blocking */
289#define FAULT_FLAG_RETRY_NOWAIT 0x08 /* Don't drop mmap_sem and wait when retrying */
290#define FAULT_FLAG_KILLABLE 0x10 /* The fault task is in SIGKILL killable region */
291#define FAULT_FLAG_TRIED 0x20 /* Second try */
292#define FAULT_FLAG_USER 0x40 /* The fault originated in userspace */
1b2ee126 293#define FAULT_FLAG_REMOTE 0x80 /* faulting for non current tsk/mm */
d61172b4 294#define FAULT_FLAG_INSTRUCTION 0x100 /* The fault was during an instruction fetch */
d0217ac0 295
282a8e03
RZ
296#define FAULT_FLAG_TRACE \
297 { FAULT_FLAG_WRITE, "WRITE" }, \
298 { FAULT_FLAG_MKWRITE, "MKWRITE" }, \
299 { FAULT_FLAG_ALLOW_RETRY, "ALLOW_RETRY" }, \
300 { FAULT_FLAG_RETRY_NOWAIT, "RETRY_NOWAIT" }, \
301 { FAULT_FLAG_KILLABLE, "KILLABLE" }, \
302 { FAULT_FLAG_TRIED, "TRIED" }, \
303 { FAULT_FLAG_USER, "USER" }, \
304 { FAULT_FLAG_REMOTE, "REMOTE" }, \
305 { FAULT_FLAG_INSTRUCTION, "INSTRUCTION" }
306
54cb8821 307/*
d0217ac0 308 * vm_fault is filled by the the pagefault handler and passed to the vma's
83c54070
NP
309 * ->fault function. The vma's ->fault is responsible for returning a bitmask
310 * of VM_FAULT_xxx flags that give details about how the fault was handled.
54cb8821 311 *
c20cd45e
MH
312 * MM layer fills up gfp_mask for page allocations but fault handler might
313 * alter it if its implementation requires a different allocation context.
314 *
9b4bdd2f 315 * pgoff should be used in favour of virtual_address, if possible.
54cb8821 316 */
d0217ac0 317struct vm_fault {
82b0f8c3 318 struct vm_area_struct *vma; /* Target VMA */
d0217ac0 319 unsigned int flags; /* FAULT_FLAG_xxx flags */
c20cd45e 320 gfp_t gfp_mask; /* gfp mask to be used for allocations */
d0217ac0 321 pgoff_t pgoff; /* Logical page offset based on vma */
82b0f8c3 322 unsigned long address; /* Faulting virtual address */
82b0f8c3 323 pmd_t *pmd; /* Pointer to pmd entry matching
2994302b 324 * the 'address' */
a2d58167
DJ
325 pud_t *pud; /* Pointer to pud entry matching
326 * the 'address'
327 */
2994302b 328 pte_t orig_pte; /* Value of PTE at the time of fault */
d0217ac0 329
3917048d
JK
330 struct page *cow_page; /* Page handler may use for COW fault */
331 struct mem_cgroup *memcg; /* Cgroup cow_page belongs to */
d0217ac0 332 struct page *page; /* ->fault handlers should return a
83c54070 333 * page here, unless VM_FAULT_NOPAGE
d0217ac0 334 * is set (which is also implied by
83c54070 335 * VM_FAULT_ERROR).
d0217ac0 336 */
82b0f8c3 337 /* These three entries are valid only while holding ptl lock */
bae473a4
KS
338 pte_t *pte; /* Pointer to pte entry matching
339 * the 'address'. NULL if the page
340 * table hasn't been allocated.
341 */
342 spinlock_t *ptl; /* Page table lock.
343 * Protects pte page table if 'pte'
344 * is not NULL, otherwise pmd.
345 */
7267ec00
KS
346 pgtable_t prealloc_pte; /* Pre-allocated pte page table.
347 * vm_ops->map_pages() calls
348 * alloc_set_pte() from atomic context.
349 * do_fault_around() pre-allocates
350 * page table to avoid allocation from
351 * atomic context.
352 */
54cb8821 353};
1da177e4 354
c791ace1
DJ
355/* page entry size for vm->huge_fault() */
356enum page_entry_size {
357 PE_SIZE_PTE = 0,
358 PE_SIZE_PMD,
359 PE_SIZE_PUD,
360};
361
1da177e4
LT
362/*
363 * These are the virtual MM functions - opening of an area, closing and
364 * unmapping it (needed to keep files on disk up-to-date etc), pointer
365 * to the functions called when a no-page or a wp-page exception occurs.
366 */
367struct vm_operations_struct {
368 void (*open)(struct vm_area_struct * area);
369 void (*close)(struct vm_area_struct * area);
5477e70a 370 int (*mremap)(struct vm_area_struct * area);
11bac800 371 int (*fault)(struct vm_fault *vmf);
c791ace1 372 int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
82b0f8c3 373 void (*map_pages)(struct vm_fault *vmf,
bae473a4 374 pgoff_t start_pgoff, pgoff_t end_pgoff);
9637a5ef
DH
375
376 /* notification that a previously read-only page is about to become
377 * writable, if an error is returned it will cause a SIGBUS */
11bac800 378 int (*page_mkwrite)(struct vm_fault *vmf);
28b2ee20 379
dd906184 380 /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
11bac800 381 int (*pfn_mkwrite)(struct vm_fault *vmf);
dd906184 382
28b2ee20
RR
383 /* called by access_process_vm when get_user_pages() fails, typically
384 * for use by special VMAs that can switch between memory and hardware
385 */
386 int (*access)(struct vm_area_struct *vma, unsigned long addr,
387 void *buf, int len, int write);
78d683e8
AL
388
389 /* Called by the /proc/PID/maps code to ask the vma whether it
390 * has a special name. Returning non-NULL will also cause this
391 * vma to be dumped unconditionally. */
392 const char *(*name)(struct vm_area_struct *vma);
393
1da177e4 394#ifdef CONFIG_NUMA
a6020ed7
LS
395 /*
396 * set_policy() op must add a reference to any non-NULL @new mempolicy
397 * to hold the policy upon return. Caller should pass NULL @new to
398 * remove a policy and fall back to surrounding context--i.e. do not
399 * install a MPOL_DEFAULT policy, nor the task or system default
400 * mempolicy.
401 */
1da177e4 402 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
a6020ed7
LS
403
404 /*
405 * get_policy() op must add reference [mpol_get()] to any policy at
406 * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
407 * in mm/mempolicy.c will do this automatically.
408 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
409 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
410 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
411 * must return NULL--i.e., do not "fallback" to task or system default
412 * policy.
413 */
1da177e4
LT
414 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
415 unsigned long addr);
416#endif
667a0a06
DV
417 /*
418 * Called by vm_normal_page() for special PTEs to find the
419 * page for @addr. This is useful if the default behavior
420 * (using pte_page()) would not find the correct page.
421 */
422 struct page *(*find_special_page)(struct vm_area_struct *vma,
423 unsigned long addr);
1da177e4
LT
424};
425
426struct mmu_gather;
427struct inode;
428
349aef0b
AM
429#define page_private(page) ((page)->private)
430#define set_page_private(page, v) ((page)->private = (v))
4c21e2f2 431
5c7fb56e
DW
432#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
433static inline int pmd_devmap(pmd_t pmd)
434{
435 return 0;
436}
a00cc7d9
MW
437static inline int pud_devmap(pud_t pud)
438{
439 return 0;
440}
b59f65fa
KS
441static inline int pgd_devmap(pgd_t pgd)
442{
443 return 0;
444}
5c7fb56e
DW
445#endif
446
1da177e4
LT
447/*
448 * FIXME: take this include out, include page-flags.h in
449 * files which need it (119 of them)
450 */
451#include <linux/page-flags.h>
71e3aac0 452#include <linux/huge_mm.h>
1da177e4
LT
453
454/*
455 * Methods to modify the page usage count.
456 *
457 * What counts for a page usage:
458 * - cache mapping (page->mapping)
459 * - private data (page->private)
460 * - page mapped in a task's page tables, each mapping
461 * is counted separately
462 *
463 * Also, many kernel routines increase the page count before a critical
464 * routine so they can be sure the page doesn't go away from under them.
1da177e4
LT
465 */
466
467/*
da6052f7 468 * Drop a ref, return true if the refcount fell to zero (the page has no users)
1da177e4 469 */
7c8ee9a8
NP
470static inline int put_page_testzero(struct page *page)
471{
fe896d18
JK
472 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
473 return page_ref_dec_and_test(page);
7c8ee9a8 474}
1da177e4
LT
475
476/*
7c8ee9a8
NP
477 * Try to grab a ref unless the page has a refcount of zero, return false if
478 * that is the case.
8e0861fa
AK
479 * This can be called when MMU is off so it must not access
480 * any of the virtual mappings.
1da177e4 481 */
7c8ee9a8
NP
482static inline int get_page_unless_zero(struct page *page)
483{
fe896d18 484 return page_ref_add_unless(page, 1, 0);
7c8ee9a8 485}
1da177e4 486
53df8fdc 487extern int page_is_ram(unsigned long pfn);
124fe20d
DW
488
489enum {
490 REGION_INTERSECTS,
491 REGION_DISJOINT,
492 REGION_MIXED,
493};
494
1c29f25b
TK
495int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
496 unsigned long desc);
53df8fdc 497
48667e7a 498/* Support for virtually mapped pages */
b3bdda02
CL
499struct page *vmalloc_to_page(const void *addr);
500unsigned long vmalloc_to_pfn(const void *addr);
48667e7a 501
0738c4bb
PM
502/*
503 * Determine if an address is within the vmalloc range
504 *
505 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
506 * is no special casing required.
507 */
bb00a789 508static inline bool is_vmalloc_addr(const void *x)
9e2779fa 509{
0738c4bb 510#ifdef CONFIG_MMU
9e2779fa
CL
511 unsigned long addr = (unsigned long)x;
512
513 return addr >= VMALLOC_START && addr < VMALLOC_END;
0738c4bb 514#else
bb00a789 515 return false;
8ca3ed87 516#endif
0738c4bb 517}
81ac3ad9
KH
518#ifdef CONFIG_MMU
519extern int is_vmalloc_or_module_addr(const void *x);
520#else
934831d0 521static inline int is_vmalloc_or_module_addr(const void *x)
81ac3ad9
KH
522{
523 return 0;
524}
525#endif
9e2779fa 526
a7c3e901
MH
527extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
528static inline void *kvmalloc(size_t size, gfp_t flags)
529{
530 return kvmalloc_node(size, flags, NUMA_NO_NODE);
531}
532static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
533{
534 return kvmalloc_node(size, flags | __GFP_ZERO, node);
535}
536static inline void *kvzalloc(size_t size, gfp_t flags)
537{
538 return kvmalloc(size, flags | __GFP_ZERO);
539}
540
752ade68
MH
541static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
542{
543 if (size != 0 && n > SIZE_MAX / size)
544 return NULL;
545
546 return kvmalloc(n * size, flags);
547}
548
39f1f78d
AV
549extern void kvfree(const void *addr);
550
53f9263b
KS
551static inline atomic_t *compound_mapcount_ptr(struct page *page)
552{
553 return &page[1].compound_mapcount;
554}
555
556static inline int compound_mapcount(struct page *page)
557{
5f527c2b 558 VM_BUG_ON_PAGE(!PageCompound(page), page);
53f9263b
KS
559 page = compound_head(page);
560 return atomic_read(compound_mapcount_ptr(page)) + 1;
561}
562
70b50f94
AA
563/*
564 * The atomic page->_mapcount, starts from -1: so that transitions
565 * both from it and to it can be tracked, using atomic_inc_and_test
566 * and atomic_add_negative(-1).
567 */
22b751c3 568static inline void page_mapcount_reset(struct page *page)
70b50f94
AA
569{
570 atomic_set(&(page)->_mapcount, -1);
571}
572
b20ce5e0
KS
573int __page_mapcount(struct page *page);
574
70b50f94
AA
575static inline int page_mapcount(struct page *page)
576{
1d148e21 577 VM_BUG_ON_PAGE(PageSlab(page), page);
53f9263b 578
b20ce5e0
KS
579 if (unlikely(PageCompound(page)))
580 return __page_mapcount(page);
581 return atomic_read(&page->_mapcount) + 1;
582}
583
584#ifdef CONFIG_TRANSPARENT_HUGEPAGE
585int total_mapcount(struct page *page);
6d0a07ed 586int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
b20ce5e0
KS
587#else
588static inline int total_mapcount(struct page *page)
589{
590 return page_mapcount(page);
70b50f94 591}
6d0a07ed
AA
592static inline int page_trans_huge_mapcount(struct page *page,
593 int *total_mapcount)
594{
595 int mapcount = page_mapcount(page);
596 if (total_mapcount)
597 *total_mapcount = mapcount;
598 return mapcount;
599}
b20ce5e0 600#endif
70b50f94 601
b49af68f
CL
602static inline struct page *virt_to_head_page(const void *x)
603{
604 struct page *page = virt_to_page(x);
ccaafd7f 605
1d798ca3 606 return compound_head(page);
b49af68f
CL
607}
608
ddc58f27
KS
609void __put_page(struct page *page);
610
1d7ea732 611void put_pages_list(struct list_head *pages);
1da177e4 612
8dfcc9ba 613void split_page(struct page *page, unsigned int order);
8dfcc9ba 614
33f2ef89
AW
615/*
616 * Compound pages have a destructor function. Provide a
617 * prototype for that function and accessor functions.
f1e61557 618 * These are _only_ valid on the head of a compound page.
33f2ef89 619 */
f1e61557
KS
620typedef void compound_page_dtor(struct page *);
621
622/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
623enum compound_dtor_id {
624 NULL_COMPOUND_DTOR,
625 COMPOUND_PAGE_DTOR,
626#ifdef CONFIG_HUGETLB_PAGE
627 HUGETLB_PAGE_DTOR,
9a982250
KS
628#endif
629#ifdef CONFIG_TRANSPARENT_HUGEPAGE
630 TRANSHUGE_PAGE_DTOR,
f1e61557
KS
631#endif
632 NR_COMPOUND_DTORS,
633};
634extern compound_page_dtor * const compound_page_dtors[];
33f2ef89
AW
635
636static inline void set_compound_page_dtor(struct page *page,
f1e61557 637 enum compound_dtor_id compound_dtor)
33f2ef89 638{
f1e61557
KS
639 VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
640 page[1].compound_dtor = compound_dtor;
33f2ef89
AW
641}
642
643static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
644{
f1e61557
KS
645 VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
646 return compound_page_dtors[page[1].compound_dtor];
33f2ef89
AW
647}
648
d00181b9 649static inline unsigned int compound_order(struct page *page)
d85f3385 650{
6d777953 651 if (!PageHead(page))
d85f3385 652 return 0;
e4b294c2 653 return page[1].compound_order;
d85f3385
CL
654}
655
f1e61557 656static inline void set_compound_order(struct page *page, unsigned int order)
d85f3385 657{
e4b294c2 658 page[1].compound_order = order;
d85f3385
CL
659}
660
9a982250
KS
661void free_compound_page(struct page *page);
662
3dece370 663#ifdef CONFIG_MMU
14fd403f
AA
664/*
665 * Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
666 * servicing faults for write access. In the normal case, do always want
667 * pte_mkwrite. But get_user_pages can cause write faults for mappings
668 * that do not have writing enabled, when used by access_process_vm.
669 */
670static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
671{
672 if (likely(vma->vm_flags & VM_WRITE))
673 pte = pte_mkwrite(pte);
674 return pte;
675}
8c6e50b0 676
82b0f8c3 677int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
7267ec00 678 struct page *page);
9118c0cb 679int finish_fault(struct vm_fault *vmf);
66a6197c 680int finish_mkwrite_fault(struct vm_fault *vmf);
3dece370 681#endif
14fd403f 682
1da177e4
LT
683/*
684 * Multiple processes may "see" the same page. E.g. for untouched
685 * mappings of /dev/null, all processes see the same page full of
686 * zeroes, and text pages of executables and shared libraries have
687 * only one copy in memory, at most, normally.
688 *
689 * For the non-reserved pages, page_count(page) denotes a reference count.
7e871b6c
PBG
690 * page_count() == 0 means the page is free. page->lru is then used for
691 * freelist management in the buddy allocator.
da6052f7 692 * page_count() > 0 means the page has been allocated.
1da177e4 693 *
da6052f7
NP
694 * Pages are allocated by the slab allocator in order to provide memory
695 * to kmalloc and kmem_cache_alloc. In this case, the management of the
696 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
697 * unless a particular usage is carefully commented. (the responsibility of
698 * freeing the kmalloc memory is the caller's, of course).
1da177e4 699 *
da6052f7
NP
700 * A page may be used by anyone else who does a __get_free_page().
701 * In this case, page_count still tracks the references, and should only
702 * be used through the normal accessor functions. The top bits of page->flags
703 * and page->virtual store page management information, but all other fields
704 * are unused and could be used privately, carefully. The management of this
705 * page is the responsibility of the one who allocated it, and those who have
706 * subsequently been given references to it.
707 *
708 * The other pages (we may call them "pagecache pages") are completely
1da177e4
LT
709 * managed by the Linux memory manager: I/O, buffers, swapping etc.
710 * The following discussion applies only to them.
711 *
da6052f7
NP
712 * A pagecache page contains an opaque `private' member, which belongs to the
713 * page's address_space. Usually, this is the address of a circular list of
714 * the page's disk buffers. PG_private must be set to tell the VM to call
715 * into the filesystem to release these pages.
1da177e4 716 *
da6052f7
NP
717 * A page may belong to an inode's memory mapping. In this case, page->mapping
718 * is the pointer to the inode, and page->index is the file offset of the page,
ea1754a0 719 * in units of PAGE_SIZE.
1da177e4 720 *
da6052f7
NP
721 * If pagecache pages are not associated with an inode, they are said to be
722 * anonymous pages. These may become associated with the swapcache, and in that
723 * case PG_swapcache is set, and page->private is an offset into the swapcache.
1da177e4 724 *
da6052f7
NP
725 * In either case (swapcache or inode backed), the pagecache itself holds one
726 * reference to the page. Setting PG_private should also increment the
727 * refcount. The each user mapping also has a reference to the page.
1da177e4 728 *
da6052f7
NP
729 * The pagecache pages are stored in a per-mapping radix tree, which is
730 * rooted at mapping->page_tree, and indexed by offset.
731 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
732 * lists, we instead now tag pages as dirty/writeback in the radix tree.
1da177e4 733 *
da6052f7 734 * All pagecache pages may be subject to I/O:
1da177e4
LT
735 * - inode pages may need to be read from disk,
736 * - inode pages which have been modified and are MAP_SHARED may need
da6052f7
NP
737 * to be written back to the inode on disk,
738 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
739 * modified may need to be swapped out to swap space and (later) to be read
740 * back into memory.
1da177e4
LT
741 */
742
743/*
744 * The zone field is never updated after free_area_init_core()
745 * sets it, so none of the operations on it need to be atomic.
1da177e4 746 */
348f8b6c 747
90572890 748/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
07808b74 749#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
d41dee36
AW
750#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
751#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
90572890 752#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
d41dee36 753
348f8b6c 754/*
25985edc 755 * Define the bit shifts to access each section. For non-existent
348f8b6c
DH
756 * sections we define the shift as 0; that plus a 0 mask ensures
757 * the compiler will optimise away reference to them.
758 */
d41dee36
AW
759#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
760#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
761#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
90572890 762#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
348f8b6c 763
bce54bbf
WD
764/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
765#ifdef NODE_NOT_IN_PAGE_FLAGS
89689ae7 766#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
bd8029b6
AW
767#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
768 SECTIONS_PGOFF : ZONES_PGOFF)
d41dee36 769#else
89689ae7 770#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
bd8029b6
AW
771#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
772 NODES_PGOFF : ZONES_PGOFF)
89689ae7
CL
773#endif
774
bd8029b6 775#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
348f8b6c 776
9223b419
CL
777#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
778#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
348f8b6c
DH
779#endif
780
d41dee36
AW
781#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
782#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
783#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
834a964a 784#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_SHIFT) - 1)
89689ae7 785#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
348f8b6c 786
33dd4e0e 787static inline enum zone_type page_zonenum(const struct page *page)
1da177e4 788{
348f8b6c 789 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
1da177e4 790}
1da177e4 791
260ae3f7
DW
792#ifdef CONFIG_ZONE_DEVICE
793static inline bool is_zone_device_page(const struct page *page)
794{
795 return page_zonenum(page) == ZONE_DEVICE;
796}
797#else
798static inline bool is_zone_device_page(const struct page *page)
799{
800 return false;
801}
7b2d55d2 802#endif
5042db43 803
6b368cd4 804#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
df6ad698 805void put_zone_device_private_or_public_page(struct page *page);
6b368cd4
JG
806DECLARE_STATIC_KEY_FALSE(device_private_key);
807#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key)
808static inline bool is_device_private_page(const struct page *page);
809static inline bool is_device_public_page(const struct page *page);
810#else /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
df6ad698 811static inline void put_zone_device_private_or_public_page(struct page *page)
5042db43 812{
5042db43 813}
6b368cd4
JG
814#define IS_HMM_ENABLED 0
815static inline bool is_device_private_page(const struct page *page)
816{
817 return false;
818}
819static inline bool is_device_public_page(const struct page *page)
820{
821 return false;
822}
df6ad698 823#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
260ae3f7 824
7b2d55d2 825
3565fce3
DW
826static inline void get_page(struct page *page)
827{
828 page = compound_head(page);
829 /*
830 * Getting a normal page or the head of a compound page
0139aa7b 831 * requires to already have an elevated page->_refcount.
3565fce3 832 */
fe896d18
JK
833 VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
834 page_ref_inc(page);
3565fce3
DW
835}
836
837static inline void put_page(struct page *page)
838{
839 page = compound_head(page);
840
7b2d55d2
JG
841 /*
842 * For private device pages we need to catch refcount transition from
843 * 2 to 1, when refcount reach one it means the private device page is
844 * free and we need to inform the device driver through callback. See
845 * include/linux/memremap.h and HMM for details.
846 */
6b368cd4
JG
847 if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) ||
848 unlikely(is_device_public_page(page)))) {
df6ad698 849 put_zone_device_private_or_public_page(page);
7b2d55d2
JG
850 return;
851 }
852
3565fce3
DW
853 if (put_page_testzero(page))
854 __put_page(page);
3565fce3
DW
855}
856
9127ab4f
CS
857#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
858#define SECTION_IN_PAGE_FLAGS
859#endif
860
89689ae7 861/*
7a8010cd
VB
862 * The identification function is mainly used by the buddy allocator for
863 * determining if two pages could be buddies. We are not really identifying
864 * the zone since we could be using the section number id if we do not have
865 * node id available in page flags.
866 * We only guarantee that it will return the same value for two combinable
867 * pages in a zone.
89689ae7 868 */
cb2b95e1
AW
869static inline int page_zone_id(struct page *page)
870{
89689ae7 871 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
348f8b6c
DH
872}
873
25ba77c1 874static inline int zone_to_nid(struct zone *zone)
89fa3024 875{
d5f541ed
CL
876#ifdef CONFIG_NUMA
877 return zone->node;
878#else
879 return 0;
880#endif
89fa3024
CL
881}
882
89689ae7 883#ifdef NODE_NOT_IN_PAGE_FLAGS
33dd4e0e 884extern int page_to_nid(const struct page *page);
89689ae7 885#else
33dd4e0e 886static inline int page_to_nid(const struct page *page)
d41dee36 887{
89689ae7 888 return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
d41dee36 889}
89689ae7
CL
890#endif
891
57e0a030 892#ifdef CONFIG_NUMA_BALANCING
90572890 893static inline int cpu_pid_to_cpupid(int cpu, int pid)
57e0a030 894{
90572890 895 return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
57e0a030
MG
896}
897
90572890 898static inline int cpupid_to_pid(int cpupid)
57e0a030 899{
90572890 900 return cpupid & LAST__PID_MASK;
57e0a030 901}
b795854b 902
90572890 903static inline int cpupid_to_cpu(int cpupid)
b795854b 904{
90572890 905 return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
b795854b
MG
906}
907
90572890 908static inline int cpupid_to_nid(int cpupid)
b795854b 909{
90572890 910 return cpu_to_node(cpupid_to_cpu(cpupid));
b795854b
MG
911}
912
90572890 913static inline bool cpupid_pid_unset(int cpupid)
57e0a030 914{
90572890 915 return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
b795854b
MG
916}
917
90572890 918static inline bool cpupid_cpu_unset(int cpupid)
b795854b 919{
90572890 920 return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
b795854b
MG
921}
922
8c8a743c
PZ
923static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
924{
925 return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
926}
927
928#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
90572890
PZ
929#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
930static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
b795854b 931{
1ae71d03 932 return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
b795854b 933}
90572890
PZ
934
935static inline int page_cpupid_last(struct page *page)
936{
937 return page->_last_cpupid;
938}
939static inline void page_cpupid_reset_last(struct page *page)
b795854b 940{
1ae71d03 941 page->_last_cpupid = -1 & LAST_CPUPID_MASK;
57e0a030
MG
942}
943#else
90572890 944static inline int page_cpupid_last(struct page *page)
75980e97 945{
90572890 946 return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
75980e97
PZ
947}
948
90572890 949extern int page_cpupid_xchg_last(struct page *page, int cpupid);
75980e97 950
90572890 951static inline void page_cpupid_reset_last(struct page *page)
75980e97 952{
09940a4f 953 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
75980e97 954}
90572890
PZ
955#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
956#else /* !CONFIG_NUMA_BALANCING */
957static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
57e0a030 958{
90572890 959 return page_to_nid(page); /* XXX */
57e0a030
MG
960}
961
90572890 962static inline int page_cpupid_last(struct page *page)
57e0a030 963{
90572890 964 return page_to_nid(page); /* XXX */
57e0a030
MG
965}
966
90572890 967static inline int cpupid_to_nid(int cpupid)
b795854b
MG
968{
969 return -1;
970}
971
90572890 972static inline int cpupid_to_pid(int cpupid)
b795854b
MG
973{
974 return -1;
975}
976
90572890 977static inline int cpupid_to_cpu(int cpupid)
b795854b
MG
978{
979 return -1;
980}
981
90572890
PZ
982static inline int cpu_pid_to_cpupid(int nid, int pid)
983{
984 return -1;
985}
986
987static inline bool cpupid_pid_unset(int cpupid)
b795854b
MG
988{
989 return 1;
990}
991
90572890 992static inline void page_cpupid_reset_last(struct page *page)
57e0a030
MG
993{
994}
8c8a743c
PZ
995
996static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
997{
998 return false;
999}
90572890 1000#endif /* CONFIG_NUMA_BALANCING */
57e0a030 1001
33dd4e0e 1002static inline struct zone *page_zone(const struct page *page)
89689ae7
CL
1003{
1004 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1005}
1006
75ef7184
MG
1007static inline pg_data_t *page_pgdat(const struct page *page)
1008{
1009 return NODE_DATA(page_to_nid(page));
1010}
1011
9127ab4f 1012#ifdef SECTION_IN_PAGE_FLAGS
bf4e8902
DK
1013static inline void set_page_section(struct page *page, unsigned long section)
1014{
1015 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1016 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1017}
1018
aa462abe 1019static inline unsigned long page_to_section(const struct page *page)
d41dee36
AW
1020{
1021 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1022}
308c05e3 1023#endif
d41dee36 1024
2f1b6248 1025static inline void set_page_zone(struct page *page, enum zone_type zone)
348f8b6c
DH
1026{
1027 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1028 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1029}
2f1b6248 1030
348f8b6c
DH
1031static inline void set_page_node(struct page *page, unsigned long node)
1032{
1033 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1034 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1da177e4 1035}
89689ae7 1036
2f1b6248 1037static inline void set_page_links(struct page *page, enum zone_type zone,
d41dee36 1038 unsigned long node, unsigned long pfn)
1da177e4 1039{
348f8b6c
DH
1040 set_page_zone(page, zone);
1041 set_page_node(page, node);
9127ab4f 1042#ifdef SECTION_IN_PAGE_FLAGS
d41dee36 1043 set_page_section(page, pfn_to_section_nr(pfn));
bf4e8902 1044#endif
1da177e4
LT
1045}
1046
0610c25d
GT
1047#ifdef CONFIG_MEMCG
1048static inline struct mem_cgroup *page_memcg(struct page *page)
1049{
1050 return page->mem_cgroup;
1051}
55779ec7
JW
1052static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1053{
1054 WARN_ON_ONCE(!rcu_read_lock_held());
1055 return READ_ONCE(page->mem_cgroup);
1056}
0610c25d
GT
1057#else
1058static inline struct mem_cgroup *page_memcg(struct page *page)
1059{
1060 return NULL;
1061}
55779ec7
JW
1062static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1063{
1064 WARN_ON_ONCE(!rcu_read_lock_held());
1065 return NULL;
1066}
0610c25d
GT
1067#endif
1068
f6ac2354
CL
1069/*
1070 * Some inline functions in vmstat.h depend on page_zone()
1071 */
1072#include <linux/vmstat.h>
1073
33dd4e0e 1074static __always_inline void *lowmem_page_address(const struct page *page)
1da177e4 1075{
1dff8083 1076 return page_to_virt(page);
1da177e4
LT
1077}
1078
1079#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1080#define HASHED_PAGE_VIRTUAL
1081#endif
1082
1083#if defined(WANT_PAGE_VIRTUAL)
f92f455f
GU
1084static inline void *page_address(const struct page *page)
1085{
1086 return page->virtual;
1087}
1088static inline void set_page_address(struct page *page, void *address)
1089{
1090 page->virtual = address;
1091}
1da177e4
LT
1092#define page_address_init() do { } while(0)
1093#endif
1094
1095#if defined(HASHED_PAGE_VIRTUAL)
f9918794 1096void *page_address(const struct page *page);
1da177e4
LT
1097void set_page_address(struct page *page, void *virtual);
1098void page_address_init(void);
1099#endif
1100
1101#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1102#define page_address(page) lowmem_page_address(page)
1103#define set_page_address(page, address) do { } while(0)
1104#define page_address_init() do { } while(0)
1105#endif
1106
e39155ea
KS
1107extern void *page_rmapping(struct page *page);
1108extern struct anon_vma *page_anon_vma(struct page *page);
9800339b 1109extern struct address_space *page_mapping(struct page *page);
1da177e4 1110
f981c595
MG
1111extern struct address_space *__page_file_mapping(struct page *);
1112
1113static inline
1114struct address_space *page_file_mapping(struct page *page)
1115{
1116 if (unlikely(PageSwapCache(page)))
1117 return __page_file_mapping(page);
1118
1119 return page->mapping;
1120}
1121
f6ab1f7f
HY
1122extern pgoff_t __page_file_index(struct page *page);
1123
1da177e4
LT
1124/*
1125 * Return the pagecache index of the passed page. Regular pagecache pages
f6ab1f7f 1126 * use ->index whereas swapcache pages use swp_offset(->private)
1da177e4
LT
1127 */
1128static inline pgoff_t page_index(struct page *page)
1129{
1130 if (unlikely(PageSwapCache(page)))
f6ab1f7f 1131 return __page_file_index(page);
1da177e4
LT
1132 return page->index;
1133}
1134
1aa8aea5 1135bool page_mapped(struct page *page);
bda807d4 1136struct address_space *page_mapping(struct page *page);
1da177e4 1137
2f064f34
MH
1138/*
1139 * Return true only if the page has been allocated with
1140 * ALLOC_NO_WATERMARKS and the low watermark was not
1141 * met implying that the system is under some pressure.
1142 */
1143static inline bool page_is_pfmemalloc(struct page *page)
1144{
1145 /*
1146 * Page index cannot be this large so this must be
1147 * a pfmemalloc page.
1148 */
1149 return page->index == -1UL;
1150}
1151
1152/*
1153 * Only to be called by the page allocator on a freshly allocated
1154 * page.
1155 */
1156static inline void set_page_pfmemalloc(struct page *page)
1157{
1158 page->index = -1UL;
1159}
1160
1161static inline void clear_page_pfmemalloc(struct page *page)
1162{
1163 page->index = 0;
1164}
1165
1da177e4
LT
1166/*
1167 * Different kinds of faults, as returned by handle_mm_fault().
1168 * Used to decide whether a process gets delivered SIGBUS or
1169 * just gets major/minor fault counters bumped up.
1170 */
d0217ac0 1171
83c54070
NP
1172#define VM_FAULT_OOM 0x0001
1173#define VM_FAULT_SIGBUS 0x0002
1174#define VM_FAULT_MAJOR 0x0004
1175#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
aa50d3a7
AK
1176#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
1177#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
33692f27 1178#define VM_FAULT_SIGSEGV 0x0040
f33ea7f4 1179
83c54070
NP
1180#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
1181#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
d065bd81 1182#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
c0292554 1183#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
b1aa812b 1184#define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */
1da177e4 1185
aa50d3a7
AK
1186#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1187
33692f27
LT
1188#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1189 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1190 VM_FAULT_FALLBACK)
aa50d3a7 1191
282a8e03
RZ
1192#define VM_FAULT_RESULT_TRACE \
1193 { VM_FAULT_OOM, "OOM" }, \
1194 { VM_FAULT_SIGBUS, "SIGBUS" }, \
1195 { VM_FAULT_MAJOR, "MAJOR" }, \
1196 { VM_FAULT_WRITE, "WRITE" }, \
1197 { VM_FAULT_HWPOISON, "HWPOISON" }, \
1198 { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \
1199 { VM_FAULT_SIGSEGV, "SIGSEGV" }, \
1200 { VM_FAULT_NOPAGE, "NOPAGE" }, \
1201 { VM_FAULT_LOCKED, "LOCKED" }, \
1202 { VM_FAULT_RETRY, "RETRY" }, \
1203 { VM_FAULT_FALLBACK, "FALLBACK" }, \
1204 { VM_FAULT_DONE_COW, "DONE_COW" }
1205
aa50d3a7
AK
1206/* Encode hstate index for a hwpoisoned large page */
1207#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1208#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
d0217ac0 1209
1c0fe6e3
NP
1210/*
1211 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1212 */
1213extern void pagefault_out_of_memory(void);
1214
1da177e4
LT
1215#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
1216
ddd588b5 1217/*
7bf02ea2 1218 * Flags passed to show_mem() and show_free_areas() to suppress output in
ddd588b5
DR
1219 * various contexts.
1220 */
4b59e6c4 1221#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
ddd588b5 1222
9af744d7 1223extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1da177e4 1224
7f43add4 1225extern bool can_do_mlock(void);
1da177e4
LT
1226extern int user_shm_lock(size_t, struct user_struct *);
1227extern void user_shm_unlock(size_t, struct user_struct *);
1228
1229/*
1230 * Parameter block passed down to zap_pte_range in exceptional cases.
1231 */
1232struct zap_details {
1da177e4
LT
1233 struct address_space *check_mapping; /* Check page->mapping if set */
1234 pgoff_t first_index; /* Lowest page->index to unmap */
1235 pgoff_t last_index; /* Highest page->index to unmap */
1da177e4
LT
1236};
1237
df6ad698
JG
1238struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1239 pte_t pte, bool with_public_device);
1240#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
1241
28093f9f
GS
1242struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1243 pmd_t pmd);
7e675137 1244
c627f9cc
JS
1245int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1246 unsigned long size);
14f5ff5d 1247void zap_page_range(struct vm_area_struct *vma, unsigned long address,
ecf1385d 1248 unsigned long size);
4f74d2c8
LT
1249void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1250 unsigned long start, unsigned long end);
e6473092
MM
1251
1252/**
1253 * mm_walk - callbacks for walk_page_range
a00cc7d9
MW
1254 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
1255 * this handler should only handle pud_trans_huge() puds.
1256 * the pmd_entry or pte_entry callbacks will be used for
1257 * regular PUDs.
e6473092 1258 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
03319327
DH
1259 * this handler is required to be able to handle
1260 * pmd_trans_huge() pmds. They may simply choose to
1261 * split_huge_page() instead of handling it explicitly.
e6473092
MM
1262 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1263 * @pte_hole: if set, called for each hole at all levels
5dc37642 1264 * @hugetlb_entry: if set, called for each hugetlb entry
fafaa426 1265 * @test_walk: caller specific callback function to determine whether
f7e2355f 1266 * we walk over the current vma or not. Returning 0
fafaa426
NH
1267 * value means "do page table walk over the current vma,"
1268 * and a negative one means "abort current page table walk
f7e2355f 1269 * right now." 1 means "skip the current vma."
fafaa426
NH
1270 * @mm: mm_struct representing the target process of page table walk
1271 * @vma: vma currently walked (NULL if walking outside vmas)
1272 * @private: private data for callbacks' usage
e6473092 1273 *
fafaa426 1274 * (see the comment on walk_page_range() for more details)
e6473092
MM
1275 */
1276struct mm_walk {
a00cc7d9
MW
1277 int (*pud_entry)(pud_t *pud, unsigned long addr,
1278 unsigned long next, struct mm_walk *walk);
0f157a5b
AM
1279 int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1280 unsigned long next, struct mm_walk *walk);
1281 int (*pte_entry)(pte_t *pte, unsigned long addr,
1282 unsigned long next, struct mm_walk *walk);
1283 int (*pte_hole)(unsigned long addr, unsigned long next,
1284 struct mm_walk *walk);
1285 int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1286 unsigned long addr, unsigned long next,
1287 struct mm_walk *walk);
fafaa426
NH
1288 int (*test_walk)(unsigned long addr, unsigned long next,
1289 struct mm_walk *walk);
2165009b 1290 struct mm_struct *mm;
fafaa426 1291 struct vm_area_struct *vma;
2165009b 1292 void *private;
e6473092
MM
1293};
1294
2165009b
DH
1295int walk_page_range(unsigned long addr, unsigned long end,
1296 struct mm_walk *walk);
900fc5f1 1297int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
42b77728 1298void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
3bf5ee95 1299 unsigned long end, unsigned long floor, unsigned long ceiling);
1da177e4
LT
1300int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1301 struct vm_area_struct *vma);
1da177e4
LT
1302void unmap_mapping_range(struct address_space *mapping,
1303 loff_t const holebegin, loff_t const holelen, int even_cows);
09796395 1304int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
a4d1a885 1305 unsigned long *start, unsigned long *end,
09796395 1306 pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
3b6748e2
JW
1307int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1308 unsigned long *pfn);
d87fe660 1309int follow_phys(struct vm_area_struct *vma, unsigned long address,
1310 unsigned int flags, unsigned long *prot, resource_size_t *phys);
28b2ee20
RR
1311int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1312 void *buf, int len, int write);
1da177e4
LT
1313
1314static inline void unmap_shared_mapping_range(struct address_space *mapping,
1315 loff_t const holebegin, loff_t const holelen)
1316{
1317 unmap_mapping_range(mapping, holebegin, holelen, 0);
1318}
1319
7caef267 1320extern void truncate_pagecache(struct inode *inode, loff_t new);
2c27c65e 1321extern void truncate_setsize(struct inode *inode, loff_t newsize);
90a80202 1322void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
623e3db9 1323void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
750b4987 1324int truncate_inode_page(struct address_space *mapping, struct page *page);
25718736 1325int generic_error_remove_page(struct address_space *mapping, struct page *page);
83f78668
WF
1326int invalidate_inode_page(struct page *page);
1327
7ee1dd3f 1328#ifdef CONFIG_MMU
dcddffd4
KS
1329extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
1330 unsigned int flags);
5c723ba5 1331extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
4a9e1cda
DD
1332 unsigned long address, unsigned int fault_flags,
1333 bool *unlocked);
7ee1dd3f 1334#else
dcddffd4
KS
1335static inline int handle_mm_fault(struct vm_area_struct *vma,
1336 unsigned long address, unsigned int flags)
7ee1dd3f
DH
1337{
1338 /* should never happen if there's no MMU */
1339 BUG();
1340 return VM_FAULT_SIGBUS;
1341}
5c723ba5
PZ
1342static inline int fixup_user_fault(struct task_struct *tsk,
1343 struct mm_struct *mm, unsigned long address,
4a9e1cda 1344 unsigned int fault_flags, bool *unlocked)
5c723ba5
PZ
1345{
1346 /* should never happen if there's no MMU */
1347 BUG();
1348 return -EFAULT;
1349}
7ee1dd3f 1350#endif
f33ea7f4 1351
f307ab6d
LS
1352extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1353 unsigned int gup_flags);
5ddd36b9 1354extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
6347e8d5 1355 void *buf, int len, unsigned int gup_flags);
84d77d3f
EB
1356extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1357 unsigned long addr, void *buf, int len, unsigned int gup_flags);
1da177e4 1358
1e987790
DH
1359long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1360 unsigned long start, unsigned long nr_pages,
9beae1ea 1361 unsigned int gup_flags, struct page **pages,
5b56d49f 1362 struct vm_area_struct **vmas, int *locked);
c12d2da5 1363long get_user_pages(unsigned long start, unsigned long nr_pages,
768ae309 1364 unsigned int gup_flags, struct page **pages,
cde70140 1365 struct vm_area_struct **vmas);
c12d2da5 1366long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
3b913179 1367 unsigned int gup_flags, struct page **pages, int *locked);
c12d2da5 1368long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
c164154f 1369 struct page **pages, unsigned int gup_flags);
d2bf6be8
NP
1370int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1371 struct page **pages);
8025e5dd
JK
1372
1373/* Container for pinned pfns / pages */
1374struct frame_vector {
1375 unsigned int nr_allocated; /* Number of frames we have space for */
1376 unsigned int nr_frames; /* Number of frames stored in ptrs array */
1377 bool got_ref; /* Did we pin pages by getting page ref? */
1378 bool is_pfns; /* Does array contain pages or pfns? */
1379 void *ptrs[0]; /* Array of pinned pfns / pages. Use
1380 * pfns_vector_pages() or pfns_vector_pfns()
1381 * for access */
1382};
1383
1384struct frame_vector *frame_vector_create(unsigned int nr_frames);
1385void frame_vector_destroy(struct frame_vector *vec);
1386int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
7f23b350 1387 unsigned int gup_flags, struct frame_vector *vec);
8025e5dd
JK
1388void put_vaddr_frames(struct frame_vector *vec);
1389int frame_vector_to_pages(struct frame_vector *vec);
1390void frame_vector_to_pfns(struct frame_vector *vec);
1391
1392static inline unsigned int frame_vector_count(struct frame_vector *vec)
1393{
1394 return vec->nr_frames;
1395}
1396
1397static inline struct page **frame_vector_pages(struct frame_vector *vec)
1398{
1399 if (vec->is_pfns) {
1400 int err = frame_vector_to_pages(vec);
1401
1402 if (err)
1403 return ERR_PTR(err);
1404 }
1405 return (struct page **)(vec->ptrs);
1406}
1407
1408static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1409{
1410 if (!vec->is_pfns)
1411 frame_vector_to_pfns(vec);
1412 return (unsigned long *)(vec->ptrs);
1413}
1414
18022c5d
MG
1415struct kvec;
1416int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1417 struct page **pages);
1418int get_kernel_page(unsigned long start, int write, struct page **pages);
f3e8fccd 1419struct page *get_dump_page(unsigned long addr);
1da177e4 1420
cf9a2ae8 1421extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
d47992f8
LC
1422extern void do_invalidatepage(struct page *page, unsigned int offset,
1423 unsigned int length);
cf9a2ae8 1424
1da177e4 1425int __set_page_dirty_nobuffers(struct page *page);
76719325 1426int __set_page_dirty_no_writeback(struct page *page);
1da177e4
LT
1427int redirty_page_for_writepage(struct writeback_control *wbc,
1428 struct page *page);
62cccb8c 1429void account_page_dirtied(struct page *page, struct address_space *mapping);
c4843a75 1430void account_page_cleaned(struct page *page, struct address_space *mapping,
62cccb8c 1431 struct bdi_writeback *wb);
b3c97528 1432int set_page_dirty(struct page *page);
1da177e4 1433int set_page_dirty_lock(struct page *page);
11f81bec 1434void cancel_dirty_page(struct page *page);
1da177e4 1435int clear_page_dirty_for_io(struct page *page);
b9ea2515 1436
a9090253 1437int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1da177e4 1438
b5330628
ON
1439static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1440{
1441 return !vma->vm_ops;
1442}
1443
b0506e48
MR
1444#ifdef CONFIG_SHMEM
1445/*
1446 * The vma_is_shmem is not inline because it is used only by slow
1447 * paths in userfault.
1448 */
1449bool vma_is_shmem(struct vm_area_struct *vma);
1450#else
1451static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1452#endif
1453
d17af505 1454int vma_is_stack_for_current(struct vm_area_struct *vma);
b7643757 1455
b6a2fea3
OW
1456extern unsigned long move_page_tables(struct vm_area_struct *vma,
1457 unsigned long old_addr, struct vm_area_struct *new_vma,
38a76013
ML
1458 unsigned long new_addr, unsigned long len,
1459 bool need_rmap_locks);
7da4d641
PZ
1460extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1461 unsigned long end, pgprot_t newprot,
4b10e7d5 1462 int dirty_accountable, int prot_numa);
b6a2fea3
OW
1463extern int mprotect_fixup(struct vm_area_struct *vma,
1464 struct vm_area_struct **pprev, unsigned long start,
1465 unsigned long end, unsigned long newflags);
1da177e4 1466
465a454f
PZ
1467/*
1468 * doesn't attempt to fault and will return short.
1469 */
1470int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1471 struct page **pages);
d559db08
KH
1472/*
1473 * per-process(per-mm_struct) statistics.
1474 */
d559db08
KH
1475static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1476{
69c97823
KK
1477 long val = atomic_long_read(&mm->rss_stat.count[member]);
1478
1479#ifdef SPLIT_RSS_COUNTING
1480 /*
1481 * counter is updated in asynchronous manner and may go to minus.
1482 * But it's never be expected number for users.
1483 */
1484 if (val < 0)
1485 val = 0;
172703b0 1486#endif
69c97823
KK
1487 return (unsigned long)val;
1488}
d559db08
KH
1489
1490static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1491{
172703b0 1492 atomic_long_add(value, &mm->rss_stat.count[member]);
d559db08
KH
1493}
1494
1495static inline void inc_mm_counter(struct mm_struct *mm, int member)
1496{
172703b0 1497 atomic_long_inc(&mm->rss_stat.count[member]);
d559db08
KH
1498}
1499
1500static inline void dec_mm_counter(struct mm_struct *mm, int member)
1501{
172703b0 1502 atomic_long_dec(&mm->rss_stat.count[member]);
d559db08
KH
1503}
1504
eca56ff9
JM
1505/* Optimized variant when page is already known not to be PageAnon */
1506static inline int mm_counter_file(struct page *page)
1507{
1508 if (PageSwapBacked(page))
1509 return MM_SHMEMPAGES;
1510 return MM_FILEPAGES;
1511}
1512
1513static inline int mm_counter(struct page *page)
1514{
1515 if (PageAnon(page))
1516 return MM_ANONPAGES;
1517 return mm_counter_file(page);
1518}
1519
d559db08
KH
1520static inline unsigned long get_mm_rss(struct mm_struct *mm)
1521{
1522 return get_mm_counter(mm, MM_FILEPAGES) +
eca56ff9
JM
1523 get_mm_counter(mm, MM_ANONPAGES) +
1524 get_mm_counter(mm, MM_SHMEMPAGES);
d559db08
KH
1525}
1526
1527static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1528{
1529 return max(mm->hiwater_rss, get_mm_rss(mm));
1530}
1531
1532static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1533{
1534 return max(mm->hiwater_vm, mm->total_vm);
1535}
1536
1537static inline void update_hiwater_rss(struct mm_struct *mm)
1538{
1539 unsigned long _rss = get_mm_rss(mm);
1540
1541 if ((mm)->hiwater_rss < _rss)
1542 (mm)->hiwater_rss = _rss;
1543}
1544
1545static inline void update_hiwater_vm(struct mm_struct *mm)
1546{
1547 if (mm->hiwater_vm < mm->total_vm)
1548 mm->hiwater_vm = mm->total_vm;
1549}
1550
695f0559
PC
1551static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1552{
1553 mm->hiwater_rss = get_mm_rss(mm);
1554}
1555
d559db08
KH
1556static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1557 struct mm_struct *mm)
1558{
1559 unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1560
1561 if (*maxrss < hiwater_rss)
1562 *maxrss = hiwater_rss;
1563}
1564
53bddb4e 1565#if defined(SPLIT_RSS_COUNTING)
05af2e10 1566void sync_mm_rss(struct mm_struct *mm);
53bddb4e 1567#else
05af2e10 1568static inline void sync_mm_rss(struct mm_struct *mm)
53bddb4e
KH
1569{
1570}
1571#endif
465a454f 1572
3565fce3
DW
1573#ifndef __HAVE_ARCH_PTE_DEVMAP
1574static inline int pte_devmap(pte_t pte)
1575{
1576 return 0;
1577}
1578#endif
1579
6d2329f8 1580int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
d08b3851 1581
25ca1d6c
NK
1582extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1583 spinlock_t **ptl);
1584static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1585 spinlock_t **ptl)
1586{
1587 pte_t *ptep;
1588 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1589 return ptep;
1590}
c9cfcddf 1591
c2febafc
KS
1592#ifdef __PAGETABLE_P4D_FOLDED
1593static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1594 unsigned long address)
1595{
1596 return 0;
1597}
1598#else
1599int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1600#endif
1601
b4e98d9a 1602#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
c2febafc 1603static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
5f22df00
NP
1604 unsigned long address)
1605{
1606 return 0;
1607}
b4e98d9a
KS
1608
1609static inline unsigned long mm_nr_puds(const struct mm_struct *mm)
1610{
1611 return 0;
1612}
1613
1614static inline void mm_nr_puds_init(struct mm_struct *mm) {}
1615static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1616static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1617
5f22df00 1618#else
c2febafc 1619int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
b4e98d9a
KS
1620
1621static inline void mm_nr_puds_init(struct mm_struct *mm)
1622{
1623 atomic_long_set(&mm->nr_puds, 0);
1624}
1625
1626static inline unsigned long mm_nr_puds(const struct mm_struct *mm)
1627{
1628 return atomic_long_read(&mm->nr_puds);
1629}
1630
1631static inline void mm_inc_nr_puds(struct mm_struct *mm)
1632{
1633 atomic_long_inc(&mm->nr_puds);
1634}
1635
1636static inline void mm_dec_nr_puds(struct mm_struct *mm)
1637{
1638 atomic_long_dec(&mm->nr_puds);
1639}
5f22df00
NP
1640#endif
1641
2d2f5119 1642#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
5f22df00
NP
1643static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1644 unsigned long address)
1645{
1646 return 0;
1647}
dc6c9a35 1648
2d2f5119
KS
1649static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
1650
b4e98d9a 1651static inline unsigned long mm_nr_pmds(const struct mm_struct *mm)
dc6c9a35
KS
1652{
1653 return 0;
1654}
1655
1656static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1657static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1658
5f22df00 1659#else
1bb3630e 1660int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
dc6c9a35 1661
2d2f5119
KS
1662static inline void mm_nr_pmds_init(struct mm_struct *mm)
1663{
1664 atomic_long_set(&mm->nr_pmds, 0);
1665}
1666
b4e98d9a 1667static inline unsigned long mm_nr_pmds(const struct mm_struct *mm)
dc6c9a35
KS
1668{
1669 return atomic_long_read(&mm->nr_pmds);
1670}
1671
1672static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1673{
1674 atomic_long_inc(&mm->nr_pmds);
1675}
1676
1677static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1678{
1679 atomic_long_dec(&mm->nr_pmds);
1680}
5f22df00
NP
1681#endif
1682
c4812909
KS
1683#ifdef CONFIG_MMU
1684static inline void mm_nr_ptes_init(struct mm_struct *mm)
1685{
1686 atomic_long_set(&mm->nr_ptes, 0);
1687}
1688
1689static inline unsigned long mm_nr_ptes(const struct mm_struct *mm)
1690{
1691 return atomic_long_read(&mm->nr_ptes);
1692}
1693
1694static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1695{
1696 atomic_long_inc(&mm->nr_ptes);
1697}
1698
1699static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1700{
1701 atomic_long_dec(&mm->nr_ptes);
1702}
1703#else
1704static inline void mm_nr_ptes_init(struct mm_struct *mm) {}
1705
1706static inline unsigned long mm_nr_ptes(const struct mm_struct *mm)
1707{
1708 return 0;
1709}
1710
1711static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1712static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1713#endif
1714
3ed3a4f0 1715int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
1bb3630e
HD
1716int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1717
1da177e4
LT
1718/*
1719 * The following ifdef needed to get the 4level-fixup.h header to work.
1720 * Remove it when 4level-fixup.h has been removed.
1721 */
1bb3630e 1722#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
505a60e2
KS
1723
1724#ifndef __ARCH_HAS_5LEVEL_HACK
c2febafc
KS
1725static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1726 unsigned long address)
1727{
1728 return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1729 NULL : p4d_offset(pgd, address);
1730}
1731
1732static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1733 unsigned long address)
1da177e4 1734{
c2febafc
KS
1735 return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1736 NULL : pud_offset(p4d, address);
1da177e4 1737}
505a60e2 1738#endif /* !__ARCH_HAS_5LEVEL_HACK */
1da177e4
LT
1739
1740static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1741{
1bb3630e
HD
1742 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1743 NULL: pmd_offset(pud, address);
1da177e4 1744}
1bb3630e
HD
1745#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1746
57c1ffce 1747#if USE_SPLIT_PTE_PTLOCKS
597d795a 1748#if ALLOC_SPLIT_PTLOCKS
b35f1819 1749void __init ptlock_cache_init(void);
539edb58
PZ
1750extern bool ptlock_alloc(struct page *page);
1751extern void ptlock_free(struct page *page);
1752
1753static inline spinlock_t *ptlock_ptr(struct page *page)
1754{
1755 return page->ptl;
1756}
597d795a 1757#else /* ALLOC_SPLIT_PTLOCKS */
b35f1819
KS
1758static inline void ptlock_cache_init(void)
1759{
1760}
1761
49076ec2
KS
1762static inline bool ptlock_alloc(struct page *page)
1763{
49076ec2
KS
1764 return true;
1765}
539edb58 1766
49076ec2
KS
1767static inline void ptlock_free(struct page *page)
1768{
49076ec2
KS
1769}
1770
1771static inline spinlock_t *ptlock_ptr(struct page *page)
1772{
539edb58 1773 return &page->ptl;
49076ec2 1774}
597d795a 1775#endif /* ALLOC_SPLIT_PTLOCKS */
49076ec2
KS
1776
1777static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1778{
1779 return ptlock_ptr(pmd_page(*pmd));
1780}
1781
1782static inline bool ptlock_init(struct page *page)
1783{
1784 /*
1785 * prep_new_page() initialize page->private (and therefore page->ptl)
1786 * with 0. Make sure nobody took it in use in between.
1787 *
1788 * It can happen if arch try to use slab for page table allocation:
1d798ca3 1789 * slab code uses page->slab_cache, which share storage with page->ptl.
49076ec2 1790 */
309381fe 1791 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
49076ec2
KS
1792 if (!ptlock_alloc(page))
1793 return false;
1794 spin_lock_init(ptlock_ptr(page));
1795 return true;
1796}
1797
1798/* Reset page->mapping so free_pages_check won't complain. */
1799static inline void pte_lock_deinit(struct page *page)
1800{
1801 page->mapping = NULL;
1802 ptlock_free(page);
1803}
1804
57c1ffce 1805#else /* !USE_SPLIT_PTE_PTLOCKS */
4c21e2f2
HD
1806/*
1807 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1808 */
49076ec2
KS
1809static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1810{
1811 return &mm->page_table_lock;
1812}
b35f1819 1813static inline void ptlock_cache_init(void) {}
49076ec2
KS
1814static inline bool ptlock_init(struct page *page) { return true; }
1815static inline void pte_lock_deinit(struct page *page) {}
57c1ffce 1816#endif /* USE_SPLIT_PTE_PTLOCKS */
4c21e2f2 1817
b35f1819
KS
1818static inline void pgtable_init(void)
1819{
1820 ptlock_cache_init();
1821 pgtable_cache_init();
1822}
1823
390f44e2 1824static inline bool pgtable_page_ctor(struct page *page)
2f569afd 1825{
706874e9
VD
1826 if (!ptlock_init(page))
1827 return false;
2f569afd 1828 inc_zone_page_state(page, NR_PAGETABLE);
706874e9 1829 return true;
2f569afd
MS
1830}
1831
1832static inline void pgtable_page_dtor(struct page *page)
1833{
1834 pte_lock_deinit(page);
1835 dec_zone_page_state(page, NR_PAGETABLE);
1836}
1837
c74df32c
HD
1838#define pte_offset_map_lock(mm, pmd, address, ptlp) \
1839({ \
4c21e2f2 1840 spinlock_t *__ptl = pte_lockptr(mm, pmd); \
c74df32c
HD
1841 pte_t *__pte = pte_offset_map(pmd, address); \
1842 *(ptlp) = __ptl; \
1843 spin_lock(__ptl); \
1844 __pte; \
1845})
1846
1847#define pte_unmap_unlock(pte, ptl) do { \
1848 spin_unlock(ptl); \
1849 pte_unmap(pte); \
1850} while (0)
1851
3ed3a4f0
KS
1852#define pte_alloc(mm, pmd, address) \
1853 (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
1854
1855#define pte_alloc_map(mm, pmd, address) \
1856 (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
1bb3630e 1857
c74df32c 1858#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
3ed3a4f0
KS
1859 (pte_alloc(mm, pmd, address) ? \
1860 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
c74df32c 1861
1bb3630e 1862#define pte_alloc_kernel(pmd, address) \
8ac1f832 1863 ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1bb3630e 1864 NULL: pte_offset_kernel(pmd, address))
1da177e4 1865
e009bb30
KS
1866#if USE_SPLIT_PMD_PTLOCKS
1867
634391ac
MS
1868static struct page *pmd_to_page(pmd_t *pmd)
1869{
1870 unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1871 return virt_to_page((void *)((unsigned long) pmd & mask));
1872}
1873
e009bb30
KS
1874static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1875{
634391ac 1876 return ptlock_ptr(pmd_to_page(pmd));
e009bb30
KS
1877}
1878
1879static inline bool pgtable_pmd_page_ctor(struct page *page)
1880{
e009bb30
KS
1881#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1882 page->pmd_huge_pte = NULL;
1883#endif
49076ec2 1884 return ptlock_init(page);
e009bb30
KS
1885}
1886
1887static inline void pgtable_pmd_page_dtor(struct page *page)
1888{
1889#ifdef CONFIG_TRANSPARENT_HUGEPAGE
309381fe 1890 VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
e009bb30 1891#endif
49076ec2 1892 ptlock_free(page);
e009bb30
KS
1893}
1894
634391ac 1895#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
e009bb30
KS
1896
1897#else
1898
9a86cb7b
KS
1899static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1900{
1901 return &mm->page_table_lock;
1902}
1903
e009bb30
KS
1904static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1905static inline void pgtable_pmd_page_dtor(struct page *page) {}
1906
c389a250 1907#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
9a86cb7b 1908
e009bb30
KS
1909#endif
1910
9a86cb7b
KS
1911static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1912{
1913 spinlock_t *ptl = pmd_lockptr(mm, pmd);
1914 spin_lock(ptl);
1915 return ptl;
1916}
1917
a00cc7d9
MW
1918/*
1919 * No scalability reason to split PUD locks yet, but follow the same pattern
1920 * as the PMD locks to make it easier if we decide to. The VM should not be
1921 * considered ready to switch to split PUD locks yet; there may be places
1922 * which need to be converted from page_table_lock.
1923 */
1924static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
1925{
1926 return &mm->page_table_lock;
1927}
1928
1929static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
1930{
1931 spinlock_t *ptl = pud_lockptr(mm, pud);
1932
1933 spin_lock(ptl);
1934 return ptl;
1935}
62906027 1936
a00cc7d9 1937extern void __init pagecache_init(void);
1da177e4 1938extern void free_area_init(unsigned long * zones_size);
9109fb7b
JW
1939extern void free_area_init_node(int nid, unsigned long * zones_size,
1940 unsigned long zone_start_pfn, unsigned long *zholes_size);
49a7f04a
DH
1941extern void free_initmem(void);
1942
69afade7
JL
1943/*
1944 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1945 * into the buddy system. The freed pages will be poisoned with pattern
dbe67df4 1946 * "poison" if it's within range [0, UCHAR_MAX].
69afade7
JL
1947 * Return pages freed into the buddy system.
1948 */
11199692 1949extern unsigned long free_reserved_area(void *start, void *end,
69afade7 1950 int poison, char *s);
c3d5f5f0 1951
cfa11e08
JL
1952#ifdef CONFIG_HIGHMEM
1953/*
1954 * Free a highmem page into the buddy system, adjusting totalhigh_pages
1955 * and totalram_pages.
1956 */
1957extern void free_highmem_page(struct page *page);
1958#endif
69afade7 1959
c3d5f5f0 1960extern void adjust_managed_page_count(struct page *page, long count);
7ee3d4e8 1961extern void mem_init_print_info(const char *str);
69afade7 1962
4b50bcc7 1963extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
92923ca3 1964
69afade7
JL
1965/* Free the reserved page into the buddy system, so it gets managed. */
1966static inline void __free_reserved_page(struct page *page)
1967{
1968 ClearPageReserved(page);
1969 init_page_count(page);
1970 __free_page(page);
1971}
1972
1973static inline void free_reserved_page(struct page *page)
1974{
1975 __free_reserved_page(page);
1976 adjust_managed_page_count(page, 1);
1977}
1978
1979static inline void mark_page_reserved(struct page *page)
1980{
1981 SetPageReserved(page);
1982 adjust_managed_page_count(page, -1);
1983}
1984
1985/*
1986 * Default method to free all the __init memory into the buddy system.
dbe67df4
JL
1987 * The freed pages will be poisoned with pattern "poison" if it's within
1988 * range [0, UCHAR_MAX].
1989 * Return pages freed into the buddy system.
69afade7
JL
1990 */
1991static inline unsigned long free_initmem_default(int poison)
1992{
1993 extern char __init_begin[], __init_end[];
1994
11199692 1995 return free_reserved_area(&__init_begin, &__init_end,
69afade7
JL
1996 poison, "unused kernel");
1997}
1998
7ee3d4e8
JL
1999static inline unsigned long get_num_physpages(void)
2000{
2001 int nid;
2002 unsigned long phys_pages = 0;
2003
2004 for_each_online_node(nid)
2005 phys_pages += node_present_pages(nid);
2006
2007 return phys_pages;
2008}
2009
0ee332c1 2010#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
c713216d 2011/*
0ee332c1 2012 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
c713216d
MG
2013 * zones, allocate the backing mem_map and account for memory holes in a more
2014 * architecture independent manner. This is a substitute for creating the
2015 * zone_sizes[] and zholes_size[] arrays and passing them to
2016 * free_area_init_node()
2017 *
2018 * An architecture is expected to register range of page frames backed by
0ee332c1 2019 * physical memory with memblock_add[_node]() before calling
c713216d
MG
2020 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
2021 * usage, an architecture is expected to do something like
2022 *
2023 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
2024 * max_highmem_pfn};
2025 * for_each_valid_physical_page_range()
0ee332c1 2026 * memblock_add_node(base, size, nid)
c713216d
MG
2027 * free_area_init_nodes(max_zone_pfns);
2028 *
0ee332c1
TH
2029 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
2030 * registered physical page range. Similarly
2031 * sparse_memory_present_with_active_regions() calls memory_present() for
2032 * each range when SPARSEMEM is enabled.
c713216d
MG
2033 *
2034 * See mm/page_alloc.c for more information on each function exposed by
0ee332c1 2035 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
c713216d
MG
2036 */
2037extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1e01979c 2038unsigned long node_map_pfn_alignment(void);
32996250
YL
2039unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2040 unsigned long end_pfn);
c713216d
MG
2041extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2042 unsigned long end_pfn);
2043extern void get_pfn_range_for_nid(unsigned int nid,
2044 unsigned long *start_pfn, unsigned long *end_pfn);
2045extern unsigned long find_min_pfn_with_active_regions(void);
c713216d
MG
2046extern void free_bootmem_with_active_regions(int nid,
2047 unsigned long max_low_pfn);
2048extern void sparse_memory_present_with_active_regions(int nid);
f2dbcfa7 2049
0ee332c1 2050#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
f2dbcfa7 2051
0ee332c1 2052#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
f2dbcfa7 2053 !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
8a942fde
MG
2054static inline int __early_pfn_to_nid(unsigned long pfn,
2055 struct mminit_pfnnid_cache *state)
f2dbcfa7
KH
2056{
2057 return 0;
2058}
2059#else
2060/* please see mm/page_alloc.c */
2061extern int __meminit early_pfn_to_nid(unsigned long pfn);
f2dbcfa7 2062/* there is a per-arch backend function. */
8a942fde
MG
2063extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2064 struct mminit_pfnnid_cache *state);
f2dbcfa7
KH
2065#endif
2066
0e0b864e 2067extern void set_dma_reserve(unsigned long new_dma_reserve);
a2f3aa02
DH
2068extern void memmap_init_zone(unsigned long, int, unsigned long,
2069 unsigned long, enum memmap_context);
bc75d33f 2070extern void setup_per_zone_wmarks(void);
1b79acc9 2071extern int __meminit init_per_zone_wmark_min(void);
1da177e4 2072extern void mem_init(void);
8feae131 2073extern void __init mmap_init(void);
9af744d7 2074extern void show_mem(unsigned int flags, nodemask_t *nodemask);
d02bd27b 2075extern long si_mem_available(void);
1da177e4
LT
2076extern void si_meminfo(struct sysinfo * val);
2077extern void si_meminfo_node(struct sysinfo *val, int nid);
f6f34b43
SD
2078#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2079extern unsigned long arch_reserved_kernel_pages(void);
2080#endif
1da177e4 2081
a8e99259
MH
2082extern __printf(3, 4)
2083void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
a238ab5b 2084
e7c8d5c9 2085extern void setup_per_cpu_pageset(void);
e7c8d5c9 2086
112067f0 2087extern void zone_pcp_update(struct zone *zone);
340175b7 2088extern void zone_pcp_reset(struct zone *zone);
112067f0 2089
75f7ad8e
PS
2090/* page_alloc.c */
2091extern int min_free_kbytes;
795ae7a0 2092extern int watermark_scale_factor;
75f7ad8e 2093
8feae131 2094/* nommu.c */
33e5d769 2095extern atomic_long_t mmap_pages_allocated;
7e660872 2096extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
8feae131 2097
6b2dbba8 2098/* interval_tree.c */
6b2dbba8 2099void vma_interval_tree_insert(struct vm_area_struct *node,
f808c13f 2100 struct rb_root_cached *root);
9826a516
ML
2101void vma_interval_tree_insert_after(struct vm_area_struct *node,
2102 struct vm_area_struct *prev,
f808c13f 2103 struct rb_root_cached *root);
6b2dbba8 2104void vma_interval_tree_remove(struct vm_area_struct *node,
f808c13f
DB
2105 struct rb_root_cached *root);
2106struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
6b2dbba8
ML
2107 unsigned long start, unsigned long last);
2108struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2109 unsigned long start, unsigned long last);
2110
2111#define vma_interval_tree_foreach(vma, root, start, last) \
2112 for (vma = vma_interval_tree_iter_first(root, start, last); \
2113 vma; vma = vma_interval_tree_iter_next(vma, start, last))
1da177e4 2114
bf181b9f 2115void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
f808c13f 2116 struct rb_root_cached *root);
bf181b9f 2117void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
f808c13f
DB
2118 struct rb_root_cached *root);
2119struct anon_vma_chain *
2120anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2121 unsigned long start, unsigned long last);
bf181b9f
ML
2122struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2123 struct anon_vma_chain *node, unsigned long start, unsigned long last);
ed8ea815
ML
2124#ifdef CONFIG_DEBUG_VM_RB
2125void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2126#endif
bf181b9f
ML
2127
2128#define anon_vma_interval_tree_foreach(avc, root, start, last) \
2129 for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2130 avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2131
1da177e4 2132/* mmap.c */
34b4e4aa 2133extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
e86f15ee
AA
2134extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2135 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2136 struct vm_area_struct *expand);
2137static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2138 unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2139{
2140 return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2141}
1da177e4
LT
2142extern struct vm_area_struct *vma_merge(struct mm_struct *,
2143 struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2144 unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
19a809af 2145 struct mempolicy *, struct vm_userfaultfd_ctx);
1da177e4 2146extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
def5efe0
DR
2147extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2148 unsigned long addr, int new_below);
2149extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2150 unsigned long addr, int new_below);
1da177e4
LT
2151extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2152extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2153 struct rb_node **, struct rb_node *);
a8fb5618 2154extern void unlink_file_vma(struct vm_area_struct *);
1da177e4 2155extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
38a76013
ML
2156 unsigned long addr, unsigned long len, pgoff_t pgoff,
2157 bool *need_rmap_locks);
1da177e4 2158extern void exit_mmap(struct mm_struct *);
925d1c40 2159
9c599024
CG
2160static inline int check_data_rlimit(unsigned long rlim,
2161 unsigned long new,
2162 unsigned long start,
2163 unsigned long end_data,
2164 unsigned long start_data)
2165{
2166 if (rlim < RLIM_INFINITY) {
2167 if (((new - start) + (end_data - start_data)) > rlim)
2168 return -ENOSPC;
2169 }
2170
2171 return 0;
2172}
2173
7906d00c
AA
2174extern int mm_take_all_locks(struct mm_struct *mm);
2175extern void mm_drop_all_locks(struct mm_struct *mm);
2176
38646013
JS
2177extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2178extern struct file *get_mm_exe_file(struct mm_struct *mm);
cd81a917 2179extern struct file *get_task_exe_file(struct task_struct *task);
925d1c40 2180
84638335
KK
2181extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2182extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2183
2eefd878
DS
2184extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2185 const struct vm_special_mapping *sm);
3935ed6a
SS
2186extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2187 unsigned long addr, unsigned long len,
a62c34bd
AL
2188 unsigned long flags,
2189 const struct vm_special_mapping *spec);
2190/* This is an obsolete alternative to _install_special_mapping. */
fa5dc22f
RM
2191extern int install_special_mapping(struct mm_struct *mm,
2192 unsigned long addr, unsigned long len,
2193 unsigned long flags, struct page **pages);
1da177e4
LT
2194
2195extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2196
0165ab44 2197extern unsigned long mmap_region(struct file *file, unsigned long addr,
897ab3e0
MR
2198 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2199 struct list_head *uf);
1fcfd8db 2200extern unsigned long do_mmap(struct file *file, unsigned long addr,
bebeb3d6 2201 unsigned long len, unsigned long prot, unsigned long flags,
897ab3e0
MR
2202 vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2203 struct list_head *uf);
2204extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2205 struct list_head *uf);
1da177e4 2206
1fcfd8db
ON
2207static inline unsigned long
2208do_mmap_pgoff(struct file *file, unsigned long addr,
2209 unsigned long len, unsigned long prot, unsigned long flags,
897ab3e0
MR
2210 unsigned long pgoff, unsigned long *populate,
2211 struct list_head *uf)
1fcfd8db 2212{
897ab3e0 2213 return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
1fcfd8db
ON
2214}
2215
bebeb3d6
ML
2216#ifdef CONFIG_MMU
2217extern int __mm_populate(unsigned long addr, unsigned long len,
2218 int ignore_errors);
2219static inline void mm_populate(unsigned long addr, unsigned long len)
2220{
2221 /* Ignore errors */
2222 (void) __mm_populate(addr, len, 1);
2223}
2224#else
2225static inline void mm_populate(unsigned long addr, unsigned long len) {}
2226#endif
2227
e4eb1ff6 2228/* These take the mm semaphore themselves */
5d22fc25 2229extern int __must_check vm_brk(unsigned long, unsigned long);
16e72e9b 2230extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
bfce281c 2231extern int vm_munmap(unsigned long, size_t);
9fbeb5ab 2232extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
6be5ceb0
LT
2233 unsigned long, unsigned long,
2234 unsigned long, unsigned long);
1da177e4 2235
db4fbfb9
ML
2236struct vm_unmapped_area_info {
2237#define VM_UNMAPPED_AREA_TOPDOWN 1
2238 unsigned long flags;
2239 unsigned long length;
2240 unsigned long low_limit;
2241 unsigned long high_limit;
2242 unsigned long align_mask;
2243 unsigned long align_offset;
2244};
2245
2246extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2247extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2248
2249/*
2250 * Search for an unmapped address range.
2251 *
2252 * We are looking for a range that:
2253 * - does not intersect with any VMA;
2254 * - is contained within the [low_limit, high_limit) interval;
2255 * - is at least the desired size.
2256 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
2257 */
2258static inline unsigned long
2259vm_unmapped_area(struct vm_unmapped_area_info *info)
2260{
cdd7875e 2261 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
db4fbfb9 2262 return unmapped_area_topdown(info);
cdd7875e
BP
2263 else
2264 return unmapped_area(info);
db4fbfb9
ML
2265}
2266
85821aab 2267/* truncate.c */
1da177e4 2268extern void truncate_inode_pages(struct address_space *, loff_t);
d7339071
HR
2269extern void truncate_inode_pages_range(struct address_space *,
2270 loff_t lstart, loff_t lend);
91b0abe3 2271extern void truncate_inode_pages_final(struct address_space *);
1da177e4
LT
2272
2273/* generic vm_area_ops exported for stackable file systems */
11bac800 2274extern int filemap_fault(struct vm_fault *vmf);
82b0f8c3 2275extern void filemap_map_pages(struct vm_fault *vmf,
bae473a4 2276 pgoff_t start_pgoff, pgoff_t end_pgoff);
11bac800 2277extern int filemap_page_mkwrite(struct vm_fault *vmf);
1da177e4
LT
2278
2279/* mm/page-writeback.c */
2b69c828 2280int __must_check write_one_page(struct page *page);
1cf6e7d8 2281void task_dirty_inc(struct task_struct *tsk);
1da177e4
LT
2282
2283/* readahead.c */
2284#define VM_MAX_READAHEAD 128 /* kbytes */
2285#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
1da177e4 2286
1da177e4 2287int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
7361f4d8 2288 pgoff_t offset, unsigned long nr_to_read);
cf914a7d
RR
2289
2290void page_cache_sync_readahead(struct address_space *mapping,
2291 struct file_ra_state *ra,
2292 struct file *filp,
2293 pgoff_t offset,
2294 unsigned long size);
2295
2296void page_cache_async_readahead(struct address_space *mapping,
2297 struct file_ra_state *ra,
2298 struct file *filp,
2299 struct page *pg,
2300 pgoff_t offset,
2301 unsigned long size);
2302
1be7107f 2303extern unsigned long stack_guard_gap;
d05f3169 2304/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
46dea3d0 2305extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
d05f3169
MH
2306
2307/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
2308extern int expand_downwards(struct vm_area_struct *vma,
2309 unsigned long address);
8ca3eb08 2310#if VM_GROWSUP
46dea3d0 2311extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
8ca3eb08 2312#else
fee7e49d 2313 #define expand_upwards(vma, address) (0)
9ab88515 2314#endif
1da177e4
LT
2315
2316/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
2317extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2318extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2319 struct vm_area_struct **pprev);
2320
2321/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
2322 NULL if none. Assume start_addr < end_addr. */
2323static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2324{
2325 struct vm_area_struct * vma = find_vma(mm,start_addr);
2326
2327 if (vma && end_addr <= vma->vm_start)
2328 vma = NULL;
2329 return vma;
2330}
2331
1be7107f
HD
2332static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2333{
2334 unsigned long vm_start = vma->vm_start;
2335
2336 if (vma->vm_flags & VM_GROWSDOWN) {
2337 vm_start -= stack_guard_gap;
2338 if (vm_start > vma->vm_start)
2339 vm_start = 0;
2340 }
2341 return vm_start;
2342}
2343
2344static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2345{
2346 unsigned long vm_end = vma->vm_end;
2347
2348 if (vma->vm_flags & VM_GROWSUP) {
2349 vm_end += stack_guard_gap;
2350 if (vm_end < vma->vm_end)
2351 vm_end = -PAGE_SIZE;
2352 }
2353 return vm_end;
2354}
2355
1da177e4
LT
2356static inline unsigned long vma_pages(struct vm_area_struct *vma)
2357{
2358 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2359}
2360
640708a2
PE
2361/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
2362static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2363 unsigned long vm_start, unsigned long vm_end)
2364{
2365 struct vm_area_struct *vma = find_vma(mm, vm_start);
2366
2367 if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2368 vma = NULL;
2369
2370 return vma;
2371}
2372
bad849b3 2373#ifdef CONFIG_MMU
804af2cf 2374pgprot_t vm_get_page_prot(unsigned long vm_flags);
64e45507 2375void vma_set_page_prot(struct vm_area_struct *vma);
bad849b3
DH
2376#else
2377static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2378{
2379 return __pgprot(0);
2380}
64e45507
PF
2381static inline void vma_set_page_prot(struct vm_area_struct *vma)
2382{
2383 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2384}
bad849b3
DH
2385#endif
2386
5877231f 2387#ifdef CONFIG_NUMA_BALANCING
4b10e7d5 2388unsigned long change_prot_numa(struct vm_area_struct *vma,
b24f53a0
LS
2389 unsigned long start, unsigned long end);
2390#endif
2391
deceb6cd 2392struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
deceb6cd
HD
2393int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2394 unsigned long pfn, unsigned long size, pgprot_t);
a145dd41 2395int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
e0dc0d8f
NP
2396int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2397 unsigned long pfn);
1745cbc5
AL
2398int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2399 unsigned long pfn, pgprot_t pgprot);
423bad60 2400int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
01c8f1c4 2401 pfn_t pfn);
b2770da6
RZ
2402int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
2403 pfn_t pfn);
b4cbb197
LT
2404int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2405
deceb6cd 2406
240aadee
ML
2407struct page *follow_page_mask(struct vm_area_struct *vma,
2408 unsigned long address, unsigned int foll_flags,
2409 unsigned int *page_mask);
2410
2411static inline struct page *follow_page(struct vm_area_struct *vma,
2412 unsigned long address, unsigned int foll_flags)
2413{
2414 unsigned int unused_page_mask;
2415 return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2416}
2417
deceb6cd
HD
2418#define FOLL_WRITE 0x01 /* check pte is writable */
2419#define FOLL_TOUCH 0x02 /* mark page accessed */
2420#define FOLL_GET 0x04 /* do get_page on page */
8e4b9a60 2421#define FOLL_DUMP 0x08 /* give error on hole if it would be zero */
58fa879e 2422#define FOLL_FORCE 0x10 /* get_user_pages read/write w/o permission */
318b275f
GN
2423#define FOLL_NOWAIT 0x20 /* if a disk transfer is needed, start the IO
2424 * and return without waiting upon it */
84d33df2 2425#define FOLL_POPULATE 0x40 /* fault in page */
500d65d4 2426#define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */
69ebb83e 2427#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
0b9d7052 2428#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
5117b3b8 2429#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
234b239b 2430#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
de60f5f1 2431#define FOLL_MLOCK 0x1000 /* lock present pages */
1e987790 2432#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
19be0eaf 2433#define FOLL_COW 0x4000 /* internal GUP flag */
1da177e4 2434
9a291a7c
JM
2435static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
2436{
2437 if (vm_fault & VM_FAULT_OOM)
2438 return -ENOMEM;
2439 if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2440 return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2441 if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2442 return -EFAULT;
2443 return 0;
2444}
2445
2f569afd 2446typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
aee16b3c
JF
2447 void *data);
2448extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2449 unsigned long size, pte_fn_t fn, void *data);
2450
1da177e4 2451
8823b1db
LA
2452#ifdef CONFIG_PAGE_POISONING
2453extern bool page_poisoning_enabled(void);
2454extern void kernel_poison_pages(struct page *page, int numpages, int enable);
1414c7f4 2455extern bool page_is_poisoned(struct page *page);
8823b1db
LA
2456#else
2457static inline bool page_poisoning_enabled(void) { return false; }
2458static inline void kernel_poison_pages(struct page *page, int numpages,
2459 int enable) { }
1414c7f4 2460static inline bool page_is_poisoned(struct page *page) { return false; }
8823b1db
LA
2461#endif
2462
12d6f21e 2463#ifdef CONFIG_DEBUG_PAGEALLOC
031bc574
JK
2464extern bool _debug_pagealloc_enabled;
2465extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2466
2467static inline bool debug_pagealloc_enabled(void)
2468{
2469 return _debug_pagealloc_enabled;
2470}
2471
2472static inline void
2473kernel_map_pages(struct page *page, int numpages, int enable)
2474{
2475 if (!debug_pagealloc_enabled())
2476 return;
2477
2478 __kernel_map_pages(page, numpages, enable);
2479}
8a235efa
RW
2480#ifdef CONFIG_HIBERNATION
2481extern bool kernel_page_present(struct page *page);
40b44137
JK
2482#endif /* CONFIG_HIBERNATION */
2483#else /* CONFIG_DEBUG_PAGEALLOC */
1da177e4 2484static inline void
9858db50 2485kernel_map_pages(struct page *page, int numpages, int enable) {}
8a235efa
RW
2486#ifdef CONFIG_HIBERNATION
2487static inline bool kernel_page_present(struct page *page) { return true; }
40b44137
JK
2488#endif /* CONFIG_HIBERNATION */
2489static inline bool debug_pagealloc_enabled(void)
2490{
2491 return false;
2492}
2493#endif /* CONFIG_DEBUG_PAGEALLOC */
1da177e4 2494
a6c19dfe 2495#ifdef __HAVE_ARCH_GATE_AREA
31db58b3 2496extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
a6c19dfe
AL
2497extern int in_gate_area_no_mm(unsigned long addr);
2498extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
1da177e4 2499#else
a6c19dfe
AL
2500static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2501{
2502 return NULL;
2503}
2504static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2505static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2506{
2507 return 0;
2508}
1da177e4
LT
2509#endif /* __HAVE_ARCH_GATE_AREA */
2510
44a70ade
MH
2511extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2512
146732ce
JT
2513#ifdef CONFIG_SYSCTL
2514extern int sysctl_drop_caches;
8d65af78 2515int drop_caches_sysctl_handler(struct ctl_table *, int,
9d0243bc 2516 void __user *, size_t *, loff_t *);
146732ce
JT
2517#endif
2518
cb731d6c
VD
2519void drop_slab(void);
2520void drop_slab_node(int nid);
9d0243bc 2521
7a9166e3
LY
2522#ifndef CONFIG_MMU
2523#define randomize_va_space 0
2524#else
a62eaf15 2525extern int randomize_va_space;
7a9166e3 2526#endif
a62eaf15 2527
045e72ac 2528const char * arch_vma_name(struct vm_area_struct *vma);
03252919 2529void print_vma_addr(char *prefix, unsigned long rip);
e6e5494c 2530
9bdac914
YL
2531void sparse_mem_maps_populate_node(struct page **map_map,
2532 unsigned long pnum_begin,
2533 unsigned long pnum_end,
2534 unsigned long map_count,
2535 int nodeid);
2536
98f3cfc1 2537struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
29c71111 2538pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
c2febafc
KS
2539p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2540pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
29c71111
AW
2541pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2542pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
8f6aac41 2543void *vmemmap_alloc_block(unsigned long size, int node);
4b94ffdc
DW
2544struct vmem_altmap;
2545void *__vmemmap_alloc_block_buf(unsigned long size, int node,
2546 struct vmem_altmap *altmap);
2547static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
2548{
2549 return __vmemmap_alloc_block_buf(size, node, NULL);
2550}
2551
8f6aac41 2552void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
0aad818b
JW
2553int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2554 int node);
2555int vmemmap_populate(unsigned long start, unsigned long end, int node);
c2b91e2e 2556void vmemmap_populate_print_last(void);
0197518c 2557#ifdef CONFIG_MEMORY_HOTPLUG
0aad818b 2558void vmemmap_free(unsigned long start, unsigned long end);
0197518c 2559#endif
46723bfa 2560void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
15670bfe 2561 unsigned long nr_pages);
6a46079c 2562
82ba011b
AK
2563enum mf_flags {
2564 MF_COUNT_INCREASED = 1 << 0,
7329bbeb 2565 MF_ACTION_REQUIRED = 1 << 1,
6751ed65 2566 MF_MUST_KILL = 1 << 2,
cf870c70 2567 MF_SOFT_OFFLINE = 1 << 3,
82ba011b 2568};
cd42f4a3 2569extern int memory_failure(unsigned long pfn, int trapno, int flags);
ea8f5fb8 2570extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
847ce401 2571extern int unpoison_memory(unsigned long pfn);
ead07f6a 2572extern int get_hwpoison_page(struct page *page);
4e41a30c 2573#define put_hwpoison_page(page) put_page(page)
6a46079c
AK
2574extern int sysctl_memory_failure_early_kill;
2575extern int sysctl_memory_failure_recovery;
facb6011 2576extern void shake_page(struct page *p, int access);
293c07e3 2577extern atomic_long_t num_poisoned_pages;
facb6011 2578extern int soft_offline_page(struct page *page, int flags);
6a46079c 2579
cc637b17
XX
2580
2581/*
2582 * Error handlers for various types of pages.
2583 */
cc3e2af4 2584enum mf_result {
cc637b17
XX
2585 MF_IGNORED, /* Error: cannot be handled */
2586 MF_FAILED, /* Error: handling failed */
2587 MF_DELAYED, /* Will be handled later */
2588 MF_RECOVERED, /* Successfully recovered */
2589};
2590
2591enum mf_action_page_type {
2592 MF_MSG_KERNEL,
2593 MF_MSG_KERNEL_HIGH_ORDER,
2594 MF_MSG_SLAB,
2595 MF_MSG_DIFFERENT_COMPOUND,
2596 MF_MSG_POISONED_HUGE,
2597 MF_MSG_HUGE,
2598 MF_MSG_FREE_HUGE,
2599 MF_MSG_UNMAP_FAILED,
2600 MF_MSG_DIRTY_SWAPCACHE,
2601 MF_MSG_CLEAN_SWAPCACHE,
2602 MF_MSG_DIRTY_MLOCKED_LRU,
2603 MF_MSG_CLEAN_MLOCKED_LRU,
2604 MF_MSG_DIRTY_UNEVICTABLE_LRU,
2605 MF_MSG_CLEAN_UNEVICTABLE_LRU,
2606 MF_MSG_DIRTY_LRU,
2607 MF_MSG_CLEAN_LRU,
2608 MF_MSG_TRUNCATED_LRU,
2609 MF_MSG_BUDDY,
2610 MF_MSG_BUDDY_2ND,
2611 MF_MSG_UNKNOWN,
2612};
2613
47ad8475
AA
2614#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2615extern void clear_huge_page(struct page *page,
c79b57e4 2616 unsigned long addr_hint,
47ad8475
AA
2617 unsigned int pages_per_huge_page);
2618extern void copy_user_huge_page(struct page *dst, struct page *src,
2619 unsigned long addr, struct vm_area_struct *vma,
2620 unsigned int pages_per_huge_page);
fa4d75c1
MK
2621extern long copy_huge_page_from_user(struct page *dst_page,
2622 const void __user *usr_src,
810a56b9
MK
2623 unsigned int pages_per_huge_page,
2624 bool allow_pagefault);
47ad8475
AA
2625#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2626
e30825f1 2627extern struct page_ext_operations debug_guardpage_ops;
e30825f1 2628
c0a32fc5
SG
2629#ifdef CONFIG_DEBUG_PAGEALLOC
2630extern unsigned int _debug_guardpage_minorder;
e30825f1 2631extern bool _debug_guardpage_enabled;
c0a32fc5
SG
2632
2633static inline unsigned int debug_guardpage_minorder(void)
2634{
2635 return _debug_guardpage_minorder;
2636}
2637
e30825f1
JK
2638static inline bool debug_guardpage_enabled(void)
2639{
2640 return _debug_guardpage_enabled;
2641}
2642
c0a32fc5
SG
2643static inline bool page_is_guard(struct page *page)
2644{
e30825f1
JK
2645 struct page_ext *page_ext;
2646
2647 if (!debug_guardpage_enabled())
2648 return false;
2649
2650 page_ext = lookup_page_ext(page);
0bb2fd13
YS
2651 if (unlikely(!page_ext))
2652 return false;
2653
e30825f1 2654 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
c0a32fc5
SG
2655}
2656#else
2657static inline unsigned int debug_guardpage_minorder(void) { return 0; }
e30825f1 2658static inline bool debug_guardpage_enabled(void) { return false; }
c0a32fc5
SG
2659static inline bool page_is_guard(struct page *page) { return false; }
2660#endif /* CONFIG_DEBUG_PAGEALLOC */
2661
f9872caf
CS
2662#if MAX_NUMNODES > 1
2663void __init setup_nr_node_ids(void);
2664#else
2665static inline void setup_nr_node_ids(void) {}
2666#endif
2667
1da177e4
LT
2668#endif /* __KERNEL__ */
2669#endif /* _LINUX_MM_H */