mm: make alloc_contig_range handle free hugetlb pages
[linux-2.6-block.git] / include / linux / hugetlb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
be93d8cf 5#include <linux/mm_types.h>
309381fe 6#include <linux/mmdebug.h>
4e950f6f 7#include <linux/fs.h>
8edf344c 8#include <linux/hugetlb_inline.h>
abb8206c 9#include <linux/cgroup.h>
9119a41e
JK
10#include <linux/list.h>
11#include <linux/kref.h>
ca5999fd 12#include <linux/pgtable.h>
d92bbc27 13#include <linux/gfp.h>
4e950f6f 14
e9ea0e2d
AM
15struct ctl_table;
16struct user_struct;
24669e58 17struct mmu_gather;
e9ea0e2d 18
e2299292 19#ifndef is_hugepd
e2299292
AK
20typedef struct { unsigned long pd; } hugepd_t;
21#define is_hugepd(hugepd) (0)
22#define __hugepd(x) ((hugepd_t) { (x) })
e2299292
AK
23#endif
24
1da177e4
LT
25#ifdef CONFIG_HUGETLB_PAGE
26
27#include <linux/mempolicy.h>
516dffdc 28#include <linux/shm.h>
63551ae0 29#include <asm/tlbflush.h>
1da177e4 30
90481622
DG
31struct hugepage_subpool {
32 spinlock_t lock;
33 long count;
c6a91820
MK
34 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
35 long used_hpages; /* Used count against maximum, includes */
36 /* both alloced and reserved pages. */
37 struct hstate *hstate;
38 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
39 long rsv_hpages; /* Pages reserved against global pool to */
6c26d310 40 /* satisfy minimum size. */
90481622
DG
41};
42
9119a41e
JK
43struct resv_map {
44 struct kref refs;
7b24d861 45 spinlock_t lock;
9119a41e 46 struct list_head regions;
5e911373
MK
47 long adds_in_progress;
48 struct list_head region_cache;
49 long region_cache_count;
e9fe92ae
MA
50#ifdef CONFIG_CGROUP_HUGETLB
51 /*
52 * On private mappings, the counter to uncharge reservations is stored
53 * here. If these fields are 0, then either the mapping is shared, or
54 * cgroup accounting is disabled for this resv_map.
55 */
56 struct page_counter *reservation_counter;
57 unsigned long pages_per_hpage;
58 struct cgroup_subsys_state *css;
59#endif
9119a41e 60};
075a61d0
MA
61
62/*
63 * Region tracking -- allows tracking of reservations and instantiated pages
64 * across the pages in a mapping.
65 *
66 * The region data structures are embedded into a resv_map and protected
67 * by a resv_map's lock. The set of regions within the resv_map represent
68 * reservations for huge pages, or huge pages that have already been
69 * instantiated within the map. The from and to elements are huge page
70 * indicies into the associated mapping. from indicates the starting index
71 * of the region. to represents the first index past the end of the region.
72 *
73 * For example, a file region structure with from == 0 and to == 4 represents
74 * four huge pages in a mapping. It is important to note that the to element
75 * represents the first element past the end of the region. This is used in
76 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
77 *
78 * Interval notation of the form [from, to) will be used to indicate that
79 * the endpoint from is inclusive and to is exclusive.
80 */
81struct file_region {
82 struct list_head link;
83 long from;
84 long to;
85#ifdef CONFIG_CGROUP_HUGETLB
86 /*
87 * On shared mappings, each reserved region appears as a struct
88 * file_region in resv_map. These fields hold the info needed to
89 * uncharge each reservation.
90 */
91 struct page_counter *reservation_counter;
92 struct cgroup_subsys_state *css;
93#endif
94};
95
9119a41e
JK
96extern struct resv_map *resv_map_alloc(void);
97void resv_map_release(struct kref *ref);
98
c3f38a38
AK
99extern spinlock_t hugetlb_lock;
100extern int hugetlb_max_hstate __read_mostly;
101#define for_each_hstate(h) \
102 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
103
7ca02d0a
MK
104struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
105 long min_hpages);
90481622
DG
106void hugepage_put_subpool(struct hugepage_subpool *spool);
107
a1e78772 108void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
32927393
CH
109int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
110int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
111 loff_t *);
112int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
113 loff_t *);
114int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
115 loff_t *);
06808b08 116
1da177e4 117int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
28a35716
ML
118long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
119 struct page **, struct vm_area_struct **,
87ffc118
AA
120 unsigned long *, unsigned long *, long, unsigned int,
121 int *);
04f2cbe3 122void unmap_hugepage_range(struct vm_area_struct *,
24669e58 123 unsigned long, unsigned long, struct page *);
d833352a
MG
124void __unmap_hugepage_range_final(struct mmu_gather *tlb,
125 struct vm_area_struct *vma,
126 unsigned long start, unsigned long end,
127 struct page *ref_page);
24669e58
AK
128void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
129 unsigned long start, unsigned long end,
130 struct page *ref_page);
e1759c21 131void hugetlb_report_meminfo(struct seq_file *);
7981593b 132int hugetlb_report_node_meminfo(char *buf, int len, int nid);
949f7ec5 133void hugetlb_show_meminfo(void);
1da177e4 134unsigned long hugetlb_total_pages(void);
2b740303 135vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 136 unsigned long address, unsigned int flags);
8fb5debc
MK
137int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
138 struct vm_area_struct *dst_vma,
139 unsigned long dst_addr,
140 unsigned long src_addr,
141 struct page **pagep);
33b8f84a 142bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
5a6fe125 143 struct vm_area_struct *vma,
ca16d140 144 vm_flags_t vm_flags);
b5cec28d
MK
145long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
146 long freed);
31caf665
NH
147bool isolate_huge_page(struct page *page, struct list_head *list);
148void putback_active_hugepage(struct page *page);
ab5ac90a 149void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
8f1d26d0 150void free_huge_page(struct page *page);
72e2936c 151void hugetlb_fix_reserve_counts(struct inode *inode);
c672c7f2 152extern struct mutex *hugetlb_fault_mutex_table;
188b04a7 153u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
1da177e4 154
aec44e0f
PX
155pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
156 unsigned long addr, pud_t *pud);
3212b535 157
c0d0381a
MK
158struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
159
1da177e4 160extern int sysctl_hugetlb_shm_group;
53ba51d2 161extern struct list_head huge_boot_pages;
1da177e4 162
63551ae0
DG
163/* arch callbacks */
164
aec44e0f 165pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
a5516438 166 unsigned long addr, unsigned long sz);
7868a208
PA
167pte_t *huge_pte_offset(struct mm_struct *mm,
168 unsigned long addr, unsigned long sz);
34ae204f
MK
169int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
170 unsigned long *addr, pte_t *ptep);
017b1660
MK
171void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
172 unsigned long *start, unsigned long *end);
63551ae0
DG
173struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
174 int write);
4dc71451
AK
175struct page *follow_huge_pd(struct vm_area_struct *vma,
176 unsigned long address, hugepd_t hpd,
177 int flags, int pdshift);
63551ae0 178struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
e66f17ff 179 pmd_t *pmd, int flags);
ceb86879 180struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
e66f17ff 181 pud_t *pud, int flags);
faaa5b62
AK
182struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
183 pgd_t *pgd, int flags);
184
63551ae0 185int pmd_huge(pmd_t pmd);
c2febafc 186int pud_huge(pud_t pud);
7da4d641 187unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
8f860591 188 unsigned long address, unsigned long end, pgprot_t newprot);
63551ae0 189
d5ed7444 190bool is_hugetlb_entry_migration(pte_t pte);
6dfeaff9 191void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
ab5ac90a 192
1da177e4
LT
193#else /* !CONFIG_HUGETLB_PAGE */
194
a1e78772
MG
195static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
196{
197}
198
1da177e4
LT
199static inline unsigned long hugetlb_total_pages(void)
200{
201 return 0;
202}
203
c0d0381a
MK
204static inline struct address_space *hugetlb_page_mapping_lock_write(
205 struct page *hpage)
206{
207 return NULL;
208}
209
34ae204f
MK
210static inline int huge_pmd_unshare(struct mm_struct *mm,
211 struct vm_area_struct *vma,
212 unsigned long *addr, pte_t *ptep)
017b1660
MK
213{
214 return 0;
215}
216
217static inline void adjust_range_if_pmd_sharing_possible(
218 struct vm_area_struct *vma,
219 unsigned long *start, unsigned long *end)
220{
221}
222
1f9dccb2
MK
223static inline long follow_hugetlb_page(struct mm_struct *mm,
224 struct vm_area_struct *vma, struct page **pages,
225 struct vm_area_struct **vmas, unsigned long *position,
226 unsigned long *nr_pages, long i, unsigned int flags,
227 int *nonblocking)
228{
229 BUG();
230 return 0;
231}
232
233static inline struct page *follow_huge_addr(struct mm_struct *mm,
234 unsigned long address, int write)
235{
236 return ERR_PTR(-EINVAL);
237}
238
239static inline int copy_hugetlb_page_range(struct mm_struct *dst,
240 struct mm_struct *src, struct vm_area_struct *vma)
241{
242 BUG();
243 return 0;
244}
245
e1759c21
AD
246static inline void hugetlb_report_meminfo(struct seq_file *m)
247{
248}
1f9dccb2 249
7981593b 250static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
1f9dccb2
MK
251{
252 return 0;
253}
254
949f7ec5
DR
255static inline void hugetlb_show_meminfo(void)
256{
257}
1f9dccb2
MK
258
259static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
260 unsigned long address, hugepd_t hpd, int flags,
261 int pdshift)
262{
263 return NULL;
264}
265
266static inline struct page *follow_huge_pmd(struct mm_struct *mm,
267 unsigned long address, pmd_t *pmd, int flags)
268{
269 return NULL;
270}
271
272static inline struct page *follow_huge_pud(struct mm_struct *mm,
273 unsigned long address, pud_t *pud, int flags)
274{
275 return NULL;
276}
277
278static inline struct page *follow_huge_pgd(struct mm_struct *mm,
279 unsigned long address, pgd_t *pgd, int flags)
280{
281 return NULL;
282}
283
284static inline int prepare_hugepage_range(struct file *file,
285 unsigned long addr, unsigned long len)
286{
287 return -EINVAL;
288}
289
290static inline int pmd_huge(pmd_t pmd)
291{
292 return 0;
293}
294
295static inline int pud_huge(pud_t pud)
296{
297 return 0;
298}
299
300static inline int is_hugepage_only_range(struct mm_struct *mm,
301 unsigned long addr, unsigned long len)
302{
303 return 0;
304}
305
306static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
307 unsigned long addr, unsigned long end,
308 unsigned long floor, unsigned long ceiling)
309{
310 BUG();
311}
312
313static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
314 pte_t *dst_pte,
315 struct vm_area_struct *dst_vma,
316 unsigned long dst_addr,
317 unsigned long src_addr,
318 struct page **pagep)
319{
320 BUG();
321 return 0;
322}
323
324static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
325 unsigned long sz)
326{
327 return NULL;
328}
24669e58 329
f40386a4
NH
330static inline bool isolate_huge_page(struct page *page, struct list_head *list)
331{
332 return false;
333}
1da177e4 334
1f9dccb2
MK
335static inline void putback_active_hugepage(struct page *page)
336{
337}
338
339static inline void move_hugetlb_state(struct page *oldpage,
340 struct page *newpage, int reason)
341{
342}
343
344static inline unsigned long hugetlb_change_protection(
345 struct vm_area_struct *vma, unsigned long address,
346 unsigned long end, pgprot_t newprot)
7da4d641
PZ
347{
348 return 0;
349}
8f860591 350
d833352a
MG
351static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
352 struct vm_area_struct *vma, unsigned long start,
353 unsigned long end, struct page *ref_page)
354{
355 BUG();
356}
357
24669e58
AK
358static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
359 struct vm_area_struct *vma, unsigned long start,
360 unsigned long end, struct page *ref_page)
361{
362 BUG();
363}
1f9dccb2 364
a953e772 365static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
1f9dccb2
MK
366 struct vm_area_struct *vma, unsigned long address,
367 unsigned int flags)
a953e772
SJ
368{
369 BUG();
370 return 0;
371}
24669e58 372
6dfeaff9
PX
373static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
374
1da177e4 375#endif /* !CONFIG_HUGETLB_PAGE */
f30c59e9
AK
376/*
377 * hugepages at page global directory. If arch support
378 * hugepages at pgd level, they need to define this.
379 */
380#ifndef pgd_huge
381#define pgd_huge(x) 0
382#endif
c2febafc
KS
383#ifndef p4d_huge
384#define p4d_huge(x) 0
385#endif
f30c59e9
AK
386
387#ifndef pgd_write
388static inline int pgd_write(pgd_t pgd)
389{
390 BUG();
391 return 0;
392}
393#endif
394
4e52780d
EM
395#define HUGETLB_ANON_FILE "anon_hugepage"
396
6bfde05b
EM
397enum {
398 /*
399 * The file will be used as an shm file so shmfs accounting rules
400 * apply
401 */
402 HUGETLB_SHMFS_INODE = 1,
4e52780d
EM
403 /*
404 * The file is being created on the internal vfs mount and shmfs
405 * accounting rules do not apply
406 */
407 HUGETLB_ANONHUGE_INODE = 2,
6bfde05b
EM
408};
409
1da177e4 410#ifdef CONFIG_HUGETLBFS
1da177e4 411struct hugetlbfs_sb_info {
1da177e4
LT
412 long max_inodes; /* inodes allowed */
413 long free_inodes; /* inodes free */
414 spinlock_t stat_lock;
a137e1cc 415 struct hstate *hstate;
90481622 416 struct hugepage_subpool *spool;
4a25220d
DH
417 kuid_t uid;
418 kgid_t gid;
419 umode_t mode;
1da177e4
LT
420};
421
1da177e4
LT
422static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
423{
424 return sb->s_fs_info;
425}
426
da14c1e5
MAL
427struct hugetlbfs_inode_info {
428 struct shared_policy policy;
429 struct inode vfs_inode;
ff62a342 430 unsigned int seals;
da14c1e5
MAL
431};
432
433static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
434{
435 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
436}
437
4b6f5d20 438extern const struct file_operations hugetlbfs_file_operations;
f0f37e2f 439extern const struct vm_operations_struct hugetlb_vm_ops;
af73e4d9 440struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
42d7395f
AK
441 struct user_struct **user, int creat_flags,
442 int page_size_log);
1da177e4 443
719ff321 444static inline bool is_file_hugepages(struct file *file)
1da177e4 445{
516dffdc 446 if (file->f_op == &hugetlbfs_file_operations)
719ff321 447 return true;
516dffdc 448
719ff321 449 return is_file_shm_hugepages(file);
1da177e4
LT
450}
451
bb297bb2
CL
452static inline struct hstate *hstate_inode(struct inode *i)
453{
454 return HUGETLBFS_SB(i->i_sb)->hstate;
455}
1da177e4
LT
456#else /* !CONFIG_HUGETLBFS */
457
719ff321 458#define is_file_hugepages(file) false
40716e29 459static inline struct file *
af73e4d9
NH
460hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
461 struct user_struct **user, int creat_flags,
42d7395f 462 int page_size_log)
e9ea0e2d
AM
463{
464 return ERR_PTR(-ENOSYS);
465}
1da177e4 466
bb297bb2
CL
467static inline struct hstate *hstate_inode(struct inode *i)
468{
469 return NULL;
470}
1da177e4
LT
471#endif /* !CONFIG_HUGETLBFS */
472
d2ba27e8
AB
473#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
474unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
475 unsigned long len, unsigned long pgoff,
476 unsigned long flags);
477#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
478
d6995da3
MK
479/*
480 * huegtlb page specific state flags. These flags are located in page.private
481 * of the hugetlb head page. Functions created via the below macros should be
482 * used to manipulate these flags.
483 *
484 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
485 * allocation time. Cleared when page is fully instantiated. Free
486 * routine checks flag to restore a reservation on error paths.
d95c0337
MK
487 * Synchronization: Examined or modified by code that knows it has
488 * the only reference to page. i.e. After allocation but before use
489 * or when the page is being freed.
8f251a3d
MK
490 * HPG_migratable - Set after a newly allocated page is added to the page
491 * cache and/or page tables. Indicates the page is a candidate for
492 * migration.
d95c0337
MK
493 * Synchronization: Initially set after new page allocation with no
494 * locking. When examined and modified during migration processing
495 * (isolate, migrate, putback) the hugetlb_lock is held.
9157c311
MK
496 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
497 * allocator. Typically used for migration target pages when no pages
498 * are available in the pool. The hugetlb free page path will
499 * immediately free pages with this flag set to the buddy allocator.
d95c0337
MK
500 * Synchronization: Can be set after huge page allocation from buddy when
501 * code knows it has only reference. All other examinations and
502 * modifications require hugetlb_lock.
6c037149 503 * HPG_freed - Set when page is on the free lists.
d95c0337 504 * Synchronization: hugetlb_lock held for examination and modification.
d6995da3
MK
505 */
506enum hugetlb_page_flags {
507 HPG_restore_reserve = 0,
8f251a3d 508 HPG_migratable,
9157c311 509 HPG_temporary,
6c037149 510 HPG_freed,
d6995da3
MK
511 __NR_HPAGEFLAGS,
512};
513
514/*
515 * Macros to create test, set and clear function definitions for
516 * hugetlb specific page flags.
517 */
518#ifdef CONFIG_HUGETLB_PAGE
519#define TESTHPAGEFLAG(uname, flname) \
520static inline int HPage##uname(struct page *page) \
521 { return test_bit(HPG_##flname, &(page->private)); }
522
523#define SETHPAGEFLAG(uname, flname) \
524static inline void SetHPage##uname(struct page *page) \
525 { set_bit(HPG_##flname, &(page->private)); }
526
527#define CLEARHPAGEFLAG(uname, flname) \
528static inline void ClearHPage##uname(struct page *page) \
529 { clear_bit(HPG_##flname, &(page->private)); }
530#else
531#define TESTHPAGEFLAG(uname, flname) \
532static inline int HPage##uname(struct page *page) \
533 { return 0; }
534
535#define SETHPAGEFLAG(uname, flname) \
536static inline void SetHPage##uname(struct page *page) \
537 { }
538
539#define CLEARHPAGEFLAG(uname, flname) \
540static inline void ClearHPage##uname(struct page *page) \
541 { }
542#endif
543
544#define HPAGEFLAG(uname, flname) \
545 TESTHPAGEFLAG(uname, flname) \
546 SETHPAGEFLAG(uname, flname) \
547 CLEARHPAGEFLAG(uname, flname) \
548
549/*
550 * Create functions associated with hugetlb page flags
551 */
552HPAGEFLAG(RestoreReserve, restore_reserve)
8f251a3d 553HPAGEFLAG(Migratable, migratable)
9157c311 554HPAGEFLAG(Temporary, temporary)
6c037149 555HPAGEFLAG(Freed, freed)
d6995da3 556
a5516438
AK
557#ifdef CONFIG_HUGETLB_PAGE
558
a3437870 559#define HSTATE_NAME_LEN 32
a5516438
AK
560/* Defines one hugetlb page size */
561struct hstate {
29383967 562 struct mutex resize_lock;
e8c5c824
LS
563 int next_nid_to_alloc;
564 int next_nid_to_free;
a5516438
AK
565 unsigned int order;
566 unsigned long mask;
567 unsigned long max_huge_pages;
568 unsigned long nr_huge_pages;
569 unsigned long free_huge_pages;
570 unsigned long resv_huge_pages;
571 unsigned long surplus_huge_pages;
572 unsigned long nr_overcommit_huge_pages;
0edaecfa 573 struct list_head hugepage_activelist;
a5516438
AK
574 struct list_head hugepage_freelists[MAX_NUMNODES];
575 unsigned int nr_huge_pages_node[MAX_NUMNODES];
576 unsigned int free_huge_pages_node[MAX_NUMNODES];
577 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
abb8206c
AK
578#ifdef CONFIG_CGROUP_HUGETLB
579 /* cgroup control files */
cdc2fcfe
MA
580 struct cftype cgroup_files_dfl[7];
581 struct cftype cgroup_files_legacy[9];
abb8206c 582#endif
a3437870 583 char name[HSTATE_NAME_LEN];
a5516438
AK
584};
585
53ba51d2
JT
586struct huge_bootmem_page {
587 struct list_head list;
588 struct hstate *hstate;
589};
590
369fa227 591int isolate_or_dissolve_huge_page(struct page *page);
70c3547e
MK
592struct page *alloc_huge_page(struct vm_area_struct *vma,
593 unsigned long addr, int avoid_reserve);
3e59fcb0 594struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
d92bbc27 595 nodemask_t *nmask, gfp_t gfp_mask);
389c8178
MH
596struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
597 unsigned long address);
ab76ad54
MK
598int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
599 pgoff_t idx);
bf50bab2 600
53ba51d2 601/* arch callback */
e24a1307 602int __init __alloc_bootmem_huge_page(struct hstate *h);
53ba51d2
JT
603int __init alloc_bootmem_huge_page(struct hstate *h);
604
e5ff2159 605void __init hugetlb_add_hstate(unsigned order);
ae94da89 606bool __init arch_hugetlb_valid_size(unsigned long size);
e5ff2159
AK
607struct hstate *size_to_hstate(unsigned long size);
608
609#ifndef HUGE_MAX_HSTATE
610#define HUGE_MAX_HSTATE 1
611#endif
612
613extern struct hstate hstates[HUGE_MAX_HSTATE];
614extern unsigned int default_hstate_idx;
615
616#define default_hstate (hstates[default_hstate_idx])
a5516438 617
d6995da3
MK
618/*
619 * hugetlb page subpool pointer located in hpage[1].private
620 */
621static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
622{
623 return (struct hugepage_subpool *)(hpage+1)->private;
624}
625
626static inline void hugetlb_set_page_subpool(struct page *hpage,
627 struct hugepage_subpool *subpool)
628{
629 set_page_private(hpage+1, (unsigned long)subpool);
630}
631
a5516438
AK
632static inline struct hstate *hstate_file(struct file *f)
633{
496ad9aa 634 return hstate_inode(file_inode(f));
a5516438
AK
635}
636
af73e4d9
NH
637static inline struct hstate *hstate_sizelog(int page_size_log)
638{
639 if (!page_size_log)
640 return &default_hstate;
97ad2be1
SL
641
642 return size_to_hstate(1UL << page_size_log);
af73e4d9
NH
643}
644
a137e1cc 645static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
a5516438 646{
a137e1cc 647 return hstate_file(vma->vm_file);
a5516438
AK
648}
649
650static inline unsigned long huge_page_size(struct hstate *h)
651{
652 return (unsigned long)PAGE_SIZE << h->order;
653}
654
08fba699
MG
655extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
656
3340289d
MG
657extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
658
a5516438
AK
659static inline unsigned long huge_page_mask(struct hstate *h)
660{
661 return h->mask;
662}
663
664static inline unsigned int huge_page_order(struct hstate *h)
665{
666 return h->order;
667}
668
669static inline unsigned huge_page_shift(struct hstate *h)
670{
671 return h->order + PAGE_SHIFT;
672}
673
bae7f4ae
LC
674static inline bool hstate_is_gigantic(struct hstate *h)
675{
676 return huge_page_order(h) >= MAX_ORDER;
677}
678
a5516438
AK
679static inline unsigned int pages_per_huge_page(struct hstate *h)
680{
681 return 1 << h->order;
682}
683
684static inline unsigned int blocks_per_huge_page(struct hstate *h)
685{
686 return huge_page_size(h) / 512;
687}
688
689#include <asm/hugetlb.h>
690
b0eae98c
AK
691#ifndef is_hugepage_only_range
692static inline int is_hugepage_only_range(struct mm_struct *mm,
693 unsigned long addr, unsigned long len)
694{
695 return 0;
696}
697#define is_hugepage_only_range is_hugepage_only_range
698#endif
699
5be99343
AK
700#ifndef arch_clear_hugepage_flags
701static inline void arch_clear_hugepage_flags(struct page *page) { }
702#define arch_clear_hugepage_flags arch_clear_hugepage_flags
703#endif
704
d9ed9faa
CM
705#ifndef arch_make_huge_pte
706static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
707 struct page *page, int writable)
708{
709 return entry;
710}
711#endif
712
e5ff2159
AK
713static inline struct hstate *page_hstate(struct page *page)
714{
309381fe 715 VM_BUG_ON_PAGE(!PageHuge(page), page);
a50b854e 716 return size_to_hstate(page_size(page));
e5ff2159
AK
717}
718
aa50d3a7
AK
719static inline unsigned hstate_index_to_shift(unsigned index)
720{
721 return hstates[index].order + PAGE_SHIFT;
722}
723
972dc4de
AK
724static inline int hstate_index(struct hstate *h)
725{
726 return h - hstates;
727}
728
13d60f4b
ZY
729pgoff_t __basepage_index(struct page *page);
730
731/* Return page->index in PAGE_SIZE units */
732static inline pgoff_t basepage_index(struct page *page)
733{
734 if (!PageCompound(page))
735 return page->index;
736
737 return __basepage_index(page);
738}
739
c3114a84 740extern int dissolve_free_huge_page(struct page *page);
082d5b6b
GS
741extern int dissolve_free_huge_pages(unsigned long start_pfn,
742 unsigned long end_pfn);
e693de18 743
c177c81e 744#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
e693de18
AK
745#ifndef arch_hugetlb_migration_supported
746static inline bool arch_hugetlb_migration_supported(struct hstate *h)
747{
94310cbc 748 if ((huge_page_shift(h) == PMD_SHIFT) ||
9b553bf5
AK
749 (huge_page_shift(h) == PUD_SHIFT) ||
750 (huge_page_shift(h) == PGDIR_SHIFT))
94310cbc
AK
751 return true;
752 else
753 return false;
e693de18
AK
754}
755#endif
c177c81e 756#else
e693de18
AK
757static inline bool arch_hugetlb_migration_supported(struct hstate *h)
758{
d70c17d4 759 return false;
e693de18 760}
c177c81e 761#endif
e693de18
AK
762
763static inline bool hugepage_migration_supported(struct hstate *h)
764{
765 return arch_hugetlb_migration_supported(h);
83467efb 766}
c8721bbb 767
7ed2c31d
AK
768/*
769 * Movability check is different as compared to migration check.
770 * It determines whether or not a huge page should be placed on
771 * movable zone or not. Movability of any huge page should be
772 * required only if huge page size is supported for migration.
773 * There wont be any reason for the huge page to be movable if
774 * it is not migratable to start with. Also the size of the huge
775 * page should be large enough to be placed under a movable zone
776 * and still feasible enough to be migratable. Just the presence
777 * in movable zone does not make the migration feasible.
778 *
779 * So even though large huge page sizes like the gigantic ones
780 * are migratable they should not be movable because its not
781 * feasible to migrate them from movable zone.
782 */
783static inline bool hugepage_movable_supported(struct hstate *h)
784{
785 if (!hugepage_migration_supported(h))
786 return false;
787
788 if (hstate_is_gigantic(h))
789 return false;
790 return true;
791}
792
d92bbc27
JK
793/* Movability of hugepages depends on migration support. */
794static inline gfp_t htlb_alloc_mask(struct hstate *h)
795{
796 if (hugepage_movable_supported(h))
797 return GFP_HIGHUSER_MOVABLE;
798 else
799 return GFP_HIGHUSER;
800}
801
19fc7bed
JK
802static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
803{
804 gfp_t modified_mask = htlb_alloc_mask(h);
805
806 /* Some callers might want to enforce node */
807 modified_mask |= (gfp_mask & __GFP_THISNODE);
808
41b4dc14
JK
809 modified_mask |= (gfp_mask & __GFP_NOWARN);
810
19fc7bed
JK
811 return modified_mask;
812}
813
cb900f41
KS
814static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
815 struct mm_struct *mm, pte_t *pte)
816{
817 if (huge_page_size(h) == PMD_SIZE)
818 return pmd_lockptr(mm, (pmd_t *) pte);
819 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
820 return &mm->page_table_lock;
821}
822
2531c8cf
DD
823#ifndef hugepages_supported
824/*
825 * Some platform decide whether they support huge pages at boot
826 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
827 * when there is no such support
828 */
829#define hugepages_supported() (HPAGE_SHIFT != 0)
830#endif
457c1b27 831
5d317b2b
NH
832void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
833
834static inline void hugetlb_count_add(long l, struct mm_struct *mm)
835{
836 atomic_long_add(l, &mm->hugetlb_usage);
837}
838
839static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
840{
841 atomic_long_sub(l, &mm->hugetlb_usage);
842}
e5251fd4
PA
843
844#ifndef set_huge_swap_pte_at
845static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
846 pte_t *ptep, pte_t pte, unsigned long sz)
847{
848 set_huge_pte_at(mm, addr, ptep, pte);
849}
850#endif
023bdd00
AK
851
852#ifndef huge_ptep_modify_prot_start
853#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
854static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
855 unsigned long addr, pte_t *ptep)
856{
857 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
858}
859#endif
860
861#ifndef huge_ptep_modify_prot_commit
862#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
863static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
864 unsigned long addr, pte_t *ptep,
865 pte_t old_pte, pte_t pte)
866{
867 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
868}
869#endif
870
af73e4d9 871#else /* CONFIG_HUGETLB_PAGE */
a5516438 872struct hstate {};
442a5a9a 873
369fa227
OS
874static inline int isolate_or_dissolve_huge_page(struct page *page)
875{
876 return -ENOMEM;
877}
878
442a5a9a
JG
879static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
880 unsigned long addr,
881 int avoid_reserve)
882{
883 return NULL;
884}
885
442a5a9a 886static inline struct page *
d92bbc27
JK
887alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
888 nodemask_t *nmask, gfp_t gfp_mask)
442a5a9a
JG
889{
890 return NULL;
891}
892
893static inline struct page *alloc_huge_page_vma(struct hstate *h,
894 struct vm_area_struct *vma,
895 unsigned long address)
896{
897 return NULL;
898}
899
900static inline int __alloc_bootmem_huge_page(struct hstate *h)
901{
902 return 0;
903}
904
905static inline struct hstate *hstate_file(struct file *f)
906{
907 return NULL;
908}
909
910static inline struct hstate *hstate_sizelog(int page_size_log)
911{
912 return NULL;
913}
914
915static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
916{
917 return NULL;
442a5a9a
JG
918}
919
920static inline struct hstate *page_hstate(struct page *page)
921{
922 return NULL;
923}
924
925static inline unsigned long huge_page_size(struct hstate *h)
926{
927 return PAGE_SIZE;
928}
929
930static inline unsigned long huge_page_mask(struct hstate *h)
931{
932 return PAGE_MASK;
933}
934
935static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
936{
937 return PAGE_SIZE;
938}
939
940static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
941{
942 return PAGE_SIZE;
943}
944
945static inline unsigned int huge_page_order(struct hstate *h)
946{
947 return 0;
948}
949
950static inline unsigned int huge_page_shift(struct hstate *h)
951{
952 return PAGE_SHIFT;
953}
954
94310cbc
AK
955static inline bool hstate_is_gigantic(struct hstate *h)
956{
957 return false;
958}
959
510a35d4
AR
960static inline unsigned int pages_per_huge_page(struct hstate *h)
961{
962 return 1;
963}
c3114a84
AK
964
965static inline unsigned hstate_index_to_shift(unsigned index)
966{
967 return 0;
968}
969
970static inline int hstate_index(struct hstate *h)
971{
972 return 0;
973}
13d60f4b
ZY
974
975static inline pgoff_t basepage_index(struct page *page)
976{
977 return page->index;
978}
c3114a84
AK
979
980static inline int dissolve_free_huge_page(struct page *page)
981{
982 return 0;
983}
984
985static inline int dissolve_free_huge_pages(unsigned long start_pfn,
986 unsigned long end_pfn)
987{
988 return 0;
989}
990
991static inline bool hugepage_migration_supported(struct hstate *h)
992{
993 return false;
994}
cb900f41 995
7ed2c31d
AK
996static inline bool hugepage_movable_supported(struct hstate *h)
997{
998 return false;
999}
1000
d92bbc27
JK
1001static inline gfp_t htlb_alloc_mask(struct hstate *h)
1002{
1003 return 0;
1004}
1005
19fc7bed
JK
1006static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1007{
1008 return 0;
1009}
1010
cb900f41
KS
1011static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1012 struct mm_struct *mm, pte_t *pte)
1013{
1014 return &mm->page_table_lock;
1015}
5d317b2b
NH
1016
1017static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1018{
1019}
1020
1021static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1022{
1023}
e5251fd4
PA
1024
1025static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1026 pte_t *ptep, pte_t pte, unsigned long sz)
1027{
1028}
af73e4d9 1029#endif /* CONFIG_HUGETLB_PAGE */
a5516438 1030
cb900f41
KS
1031static inline spinlock_t *huge_pte_lock(struct hstate *h,
1032 struct mm_struct *mm, pte_t *pte)
1033{
1034 spinlock_t *ptl;
1035
1036 ptl = huge_pte_lockptr(h, mm, pte);
1037 spin_lock(ptl);
1038 return ptl;
1039}
1040
cf11e85f
RG
1041#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1042extern void __init hugetlb_cma_reserve(int order);
1043extern void __init hugetlb_cma_check(void);
1044#else
1045static inline __init void hugetlb_cma_reserve(int order)
1046{
1047}
1048static inline __init void hugetlb_cma_check(void)
1049{
1050}
1051#endif
1052
c1991e07
PX
1053bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1054
537cf30b
PX
1055#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1056/*
1057 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1058 * implement this.
1059 */
1060#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1061#endif
1062
1da177e4 1063#endif /* _LINUX_HUGETLB_H */