mm, hugetlb: integrate giga hugetlb more naturally to the allocation path
[linux-2.6-block.git] / include / linux / hugetlb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
be93d8cf 5#include <linux/mm_types.h>
309381fe 6#include <linux/mmdebug.h>
4e950f6f 7#include <linux/fs.h>
8edf344c 8#include <linux/hugetlb_inline.h>
abb8206c 9#include <linux/cgroup.h>
9119a41e
JK
10#include <linux/list.h>
11#include <linux/kref.h>
888cdbc2 12#include <asm/pgtable.h>
4e950f6f 13
e9ea0e2d
AM
14struct ctl_table;
15struct user_struct;
24669e58 16struct mmu_gather;
e9ea0e2d 17
e2299292
AK
18#ifndef is_hugepd
19/*
20 * Some architectures requires a hugepage directory format that is
21 * required to support multiple hugepage sizes. For example
22 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
23 * introduced the same on powerpc. This allows for a more flexible hugepage
24 * pagetable layout.
25 */
26typedef struct { unsigned long pd; } hugepd_t;
27#define is_hugepd(hugepd) (0)
28#define __hugepd(x) ((hugepd_t) { (x) })
29static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
30 unsigned pdshift, unsigned long end,
31 int write, struct page **pages, int *nr)
32{
33 return 0;
34}
35#else
36extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
37 unsigned pdshift, unsigned long end,
38 int write, struct page **pages, int *nr);
39#endif
40
41
1da177e4
LT
42#ifdef CONFIG_HUGETLB_PAGE
43
44#include <linux/mempolicy.h>
516dffdc 45#include <linux/shm.h>
63551ae0 46#include <asm/tlbflush.h>
1da177e4 47
90481622
DG
48struct hugepage_subpool {
49 spinlock_t lock;
50 long count;
c6a91820
MK
51 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
52 long used_hpages; /* Used count against maximum, includes */
53 /* both alloced and reserved pages. */
54 struct hstate *hstate;
55 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
56 long rsv_hpages; /* Pages reserved against global pool to */
57 /* sasitfy minimum size. */
90481622
DG
58};
59
9119a41e
JK
60struct resv_map {
61 struct kref refs;
7b24d861 62 spinlock_t lock;
9119a41e 63 struct list_head regions;
5e911373
MK
64 long adds_in_progress;
65 struct list_head region_cache;
66 long region_cache_count;
9119a41e
JK
67};
68extern struct resv_map *resv_map_alloc(void);
69void resv_map_release(struct kref *ref);
70
c3f38a38
AK
71extern spinlock_t hugetlb_lock;
72extern int hugetlb_max_hstate __read_mostly;
73#define for_each_hstate(h) \
74 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
75
7ca02d0a
MK
76struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
77 long min_hpages);
90481622
DG
78void hugepage_put_subpool(struct hugepage_subpool *spool);
79
a1e78772 80void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
8d65af78
AD
81int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
83int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
06808b08
LS
84
85#ifdef CONFIG_NUMA
86int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
87 void __user *, size_t *, loff_t *);
88#endif
89
1da177e4 90int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
28a35716
ML
91long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
92 struct page **, struct vm_area_struct **,
87ffc118
AA
93 unsigned long *, unsigned long *, long, unsigned int,
94 int *);
04f2cbe3 95void unmap_hugepage_range(struct vm_area_struct *,
24669e58 96 unsigned long, unsigned long, struct page *);
d833352a
MG
97void __unmap_hugepage_range_final(struct mmu_gather *tlb,
98 struct vm_area_struct *vma,
99 unsigned long start, unsigned long end,
100 struct page *ref_page);
24669e58
AK
101void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
102 unsigned long start, unsigned long end,
103 struct page *ref_page);
e1759c21 104void hugetlb_report_meminfo(struct seq_file *);
1da177e4 105int hugetlb_report_node_meminfo(int, char *);
949f7ec5 106void hugetlb_show_meminfo(void);
1da177e4 107unsigned long hugetlb_total_pages(void);
ac9b9c66 108int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 109 unsigned long address, unsigned int flags);
8fb5debc
MK
110int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111 struct vm_area_struct *dst_vma,
112 unsigned long dst_addr,
113 unsigned long src_addr,
114 struct page **pagep);
a1e78772 115int hugetlb_reserve_pages(struct inode *inode, long from, long to,
5a6fe125 116 struct vm_area_struct *vma,
ca16d140 117 vm_flags_t vm_flags);
b5cec28d
MK
118long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119 long freed);
31caf665
NH
120bool isolate_huge_page(struct page *page, struct list_head *list);
121void putback_active_hugepage(struct page *page);
8f1d26d0 122void free_huge_page(struct page *page);
72e2936c 123void hugetlb_fix_reserve_counts(struct inode *inode);
c672c7f2
MK
124extern struct mutex *hugetlb_fault_mutex_table;
125u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
126 struct vm_area_struct *vma,
127 struct address_space *mapping,
128 pgoff_t idx, unsigned long address);
1da177e4 129
3212b535 130pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
3212b535 131
1da177e4 132extern int sysctl_hugetlb_shm_group;
53ba51d2 133extern struct list_head huge_boot_pages;
1da177e4 134
63551ae0
DG
135/* arch callbacks */
136
a5516438
AK
137pte_t *huge_pte_alloc(struct mm_struct *mm,
138 unsigned long addr, unsigned long sz);
7868a208
PA
139pte_t *huge_pte_offset(struct mm_struct *mm,
140 unsigned long addr, unsigned long sz);
39dde65c 141int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
63551ae0
DG
142struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
143 int write);
4dc71451
AK
144struct page *follow_huge_pd(struct vm_area_struct *vma,
145 unsigned long address, hugepd_t hpd,
146 int flags, int pdshift);
63551ae0 147struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
e66f17ff 148 pmd_t *pmd, int flags);
ceb86879 149struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
e66f17ff 150 pud_t *pud, int flags);
faaa5b62
AK
151struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
152 pgd_t *pgd, int flags);
153
63551ae0 154int pmd_huge(pmd_t pmd);
c2febafc 155int pud_huge(pud_t pud);
7da4d641 156unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
8f860591 157 unsigned long address, unsigned long end, pgprot_t newprot);
63551ae0 158
d5ed7444 159bool is_hugetlb_entry_migration(pte_t pte);
1da177e4
LT
160#else /* !CONFIG_HUGETLB_PAGE */
161
a1e78772
MG
162static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
163{
164}
165
1da177e4
LT
166static inline unsigned long hugetlb_total_pages(void)
167{
168 return 0;
169}
170
87ffc118 171#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
1da177e4
LT
172#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
173#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
e1759c21
AD
174static inline void hugetlb_report_meminfo(struct seq_file *m)
175{
176}
1da177e4 177#define hugetlb_report_node_meminfo(n, buf) 0
949f7ec5
DR
178static inline void hugetlb_show_meminfo(void)
179{
180}
4dc71451 181#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
e66f17ff
NH
182#define follow_huge_pmd(mm, addr, pmd, flags) NULL
183#define follow_huge_pud(mm, addr, pud, flags) NULL
faaa5b62 184#define follow_huge_pgd(mm, addr, pgd, flags) NULL
a5516438 185#define prepare_hugepage_range(file, addr, len) (-EINVAL)
1da177e4 186#define pmd_huge(x) 0
ceb86879 187#define pud_huge(x) 0
1da177e4 188#define is_hugepage_only_range(mm, addr, len) 0
9da61aef 189#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
788c7df4 190#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
8fb5debc
MK
191#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
192 src_addr, pagep) ({ BUG(); 0; })
7868a208 193#define huge_pte_offset(mm, address, sz) 0
24669e58 194
f40386a4
NH
195static inline bool isolate_huge_page(struct page *page, struct list_head *list)
196{
197 return false;
198}
31caf665 199#define putback_active_hugepage(p) do {} while (0)
1da177e4 200
7da4d641
PZ
201static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
202 unsigned long address, unsigned long end, pgprot_t newprot)
203{
204 return 0;
205}
8f860591 206
d833352a
MG
207static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
208 struct vm_area_struct *vma, unsigned long start,
209 unsigned long end, struct page *ref_page)
210{
211 BUG();
212}
213
24669e58
AK
214static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
215 struct vm_area_struct *vma, unsigned long start,
216 unsigned long end, struct page *ref_page)
217{
218 BUG();
219}
220
1da177e4 221#endif /* !CONFIG_HUGETLB_PAGE */
f30c59e9
AK
222/*
223 * hugepages at page global directory. If arch support
224 * hugepages at pgd level, they need to define this.
225 */
226#ifndef pgd_huge
227#define pgd_huge(x) 0
228#endif
c2febafc
KS
229#ifndef p4d_huge
230#define p4d_huge(x) 0
231#endif
f30c59e9
AK
232
233#ifndef pgd_write
234static inline int pgd_write(pgd_t pgd)
235{
236 BUG();
237 return 0;
238}
239#endif
240
4e52780d
EM
241#define HUGETLB_ANON_FILE "anon_hugepage"
242
6bfde05b
EM
243enum {
244 /*
245 * The file will be used as an shm file so shmfs accounting rules
246 * apply
247 */
248 HUGETLB_SHMFS_INODE = 1,
4e52780d
EM
249 /*
250 * The file is being created on the internal vfs mount and shmfs
251 * accounting rules do not apply
252 */
253 HUGETLB_ANONHUGE_INODE = 2,
6bfde05b
EM
254};
255
1da177e4 256#ifdef CONFIG_HUGETLBFS
1da177e4 257struct hugetlbfs_sb_info {
1da177e4
LT
258 long max_inodes; /* inodes allowed */
259 long free_inodes; /* inodes free */
260 spinlock_t stat_lock;
a137e1cc 261 struct hstate *hstate;
90481622 262 struct hugepage_subpool *spool;
4a25220d
DH
263 kuid_t uid;
264 kgid_t gid;
265 umode_t mode;
1da177e4
LT
266};
267
1da177e4
LT
268static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
269{
270 return sb->s_fs_info;
271}
272
da14c1e5
MAL
273struct hugetlbfs_inode_info {
274 struct shared_policy policy;
275 struct inode vfs_inode;
ff62a342 276 unsigned int seals;
da14c1e5
MAL
277};
278
279static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
280{
281 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
282}
283
4b6f5d20 284extern const struct file_operations hugetlbfs_file_operations;
f0f37e2f 285extern const struct vm_operations_struct hugetlb_vm_ops;
af73e4d9 286struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
42d7395f
AK
287 struct user_struct **user, int creat_flags,
288 int page_size_log);
1da177e4 289
719ff321 290static inline bool is_file_hugepages(struct file *file)
1da177e4 291{
516dffdc 292 if (file->f_op == &hugetlbfs_file_operations)
719ff321 293 return true;
516dffdc 294
719ff321 295 return is_file_shm_hugepages(file);
1da177e4
LT
296}
297
42d7395f 298
1da177e4
LT
299#else /* !CONFIG_HUGETLBFS */
300
719ff321 301#define is_file_hugepages(file) false
40716e29 302static inline struct file *
af73e4d9
NH
303hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
304 struct user_struct **user, int creat_flags,
42d7395f 305 int page_size_log)
e9ea0e2d
AM
306{
307 return ERR_PTR(-ENOSYS);
308}
1da177e4
LT
309
310#endif /* !CONFIG_HUGETLBFS */
311
d2ba27e8
AB
312#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
313unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
314 unsigned long len, unsigned long pgoff,
315 unsigned long flags);
316#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
317
a5516438
AK
318#ifdef CONFIG_HUGETLB_PAGE
319
a3437870 320#define HSTATE_NAME_LEN 32
a5516438
AK
321/* Defines one hugetlb page size */
322struct hstate {
e8c5c824
LS
323 int next_nid_to_alloc;
324 int next_nid_to_free;
a5516438
AK
325 unsigned int order;
326 unsigned long mask;
327 unsigned long max_huge_pages;
328 unsigned long nr_huge_pages;
329 unsigned long free_huge_pages;
330 unsigned long resv_huge_pages;
331 unsigned long surplus_huge_pages;
332 unsigned long nr_overcommit_huge_pages;
0edaecfa 333 struct list_head hugepage_activelist;
a5516438
AK
334 struct list_head hugepage_freelists[MAX_NUMNODES];
335 unsigned int nr_huge_pages_node[MAX_NUMNODES];
336 unsigned int free_huge_pages_node[MAX_NUMNODES];
337 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
abb8206c
AK
338#ifdef CONFIG_CGROUP_HUGETLB
339 /* cgroup control files */
340 struct cftype cgroup_files[5];
341#endif
a3437870 342 char name[HSTATE_NAME_LEN];
a5516438
AK
343};
344
53ba51d2
JT
345struct huge_bootmem_page {
346 struct list_head list;
347 struct hstate *hstate;
ee8f248d
BB
348#ifdef CONFIG_HIGHMEM
349 phys_addr_t phys;
350#endif
53ba51d2
JT
351};
352
70c3547e
MK
353struct page *alloc_huge_page(struct vm_area_struct *vma,
354 unsigned long addr, int avoid_reserve);
bf50bab2 355struct page *alloc_huge_page_node(struct hstate *h, int nid);
74060e4d
NH
356struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
357 unsigned long addr, int avoid_reserve);
3e59fcb0
MH
358struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
359 nodemask_t *nmask);
ab76ad54
MK
360int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
361 pgoff_t idx);
bf50bab2 362
53ba51d2 363/* arch callback */
e24a1307 364int __init __alloc_bootmem_huge_page(struct hstate *h);
53ba51d2
JT
365int __init alloc_bootmem_huge_page(struct hstate *h);
366
9fee021d 367void __init hugetlb_bad_size(void);
e5ff2159
AK
368void __init hugetlb_add_hstate(unsigned order);
369struct hstate *size_to_hstate(unsigned long size);
370
371#ifndef HUGE_MAX_HSTATE
372#define HUGE_MAX_HSTATE 1
373#endif
374
375extern struct hstate hstates[HUGE_MAX_HSTATE];
376extern unsigned int default_hstate_idx;
377
378#define default_hstate (hstates[default_hstate_idx])
a5516438 379
a137e1cc 380static inline struct hstate *hstate_inode(struct inode *i)
a5516438 381{
7fab358d 382 return HUGETLBFS_SB(i->i_sb)->hstate;
a5516438
AK
383}
384
385static inline struct hstate *hstate_file(struct file *f)
386{
496ad9aa 387 return hstate_inode(file_inode(f));
a5516438
AK
388}
389
af73e4d9
NH
390static inline struct hstate *hstate_sizelog(int page_size_log)
391{
392 if (!page_size_log)
393 return &default_hstate;
97ad2be1
SL
394
395 return size_to_hstate(1UL << page_size_log);
af73e4d9
NH
396}
397
a137e1cc 398static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
a5516438 399{
a137e1cc 400 return hstate_file(vma->vm_file);
a5516438
AK
401}
402
403static inline unsigned long huge_page_size(struct hstate *h)
404{
405 return (unsigned long)PAGE_SIZE << h->order;
406}
407
08fba699
MG
408extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
409
3340289d
MG
410extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
411
a5516438
AK
412static inline unsigned long huge_page_mask(struct hstate *h)
413{
414 return h->mask;
415}
416
417static inline unsigned int huge_page_order(struct hstate *h)
418{
419 return h->order;
420}
421
422static inline unsigned huge_page_shift(struct hstate *h)
423{
424 return h->order + PAGE_SHIFT;
425}
426
bae7f4ae
LC
427static inline bool hstate_is_gigantic(struct hstate *h)
428{
429 return huge_page_order(h) >= MAX_ORDER;
430}
431
a5516438
AK
432static inline unsigned int pages_per_huge_page(struct hstate *h)
433{
434 return 1 << h->order;
435}
436
437static inline unsigned int blocks_per_huge_page(struct hstate *h)
438{
439 return huge_page_size(h) / 512;
440}
441
442#include <asm/hugetlb.h>
443
d9ed9faa
CM
444#ifndef arch_make_huge_pte
445static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
446 struct page *page, int writable)
447{
448 return entry;
449}
450#endif
451
e5ff2159
AK
452static inline struct hstate *page_hstate(struct page *page)
453{
309381fe 454 VM_BUG_ON_PAGE(!PageHuge(page), page);
e5ff2159
AK
455 return size_to_hstate(PAGE_SIZE << compound_order(page));
456}
457
aa50d3a7
AK
458static inline unsigned hstate_index_to_shift(unsigned index)
459{
460 return hstates[index].order + PAGE_SHIFT;
461}
462
972dc4de
AK
463static inline int hstate_index(struct hstate *h)
464{
465 return h - hstates;
466}
467
13d60f4b
ZY
468pgoff_t __basepage_index(struct page *page);
469
470/* Return page->index in PAGE_SIZE units */
471static inline pgoff_t basepage_index(struct page *page)
472{
473 if (!PageCompound(page))
474 return page->index;
475
476 return __basepage_index(page);
477}
478
c3114a84 479extern int dissolve_free_huge_page(struct page *page);
082d5b6b
GS
480extern int dissolve_free_huge_pages(unsigned long start_pfn,
481 unsigned long end_pfn);
d70c17d4 482static inline bool hugepage_migration_supported(struct hstate *h)
83467efb 483{
c177c81e 484#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
94310cbc
AK
485 if ((huge_page_shift(h) == PMD_SHIFT) ||
486 (huge_page_shift(h) == PGDIR_SHIFT))
487 return true;
488 else
489 return false;
c177c81e 490#else
d70c17d4 491 return false;
c177c81e 492#endif
83467efb 493}
c8721bbb 494
cb900f41
KS
495static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
496 struct mm_struct *mm, pte_t *pte)
497{
498 if (huge_page_size(h) == PMD_SIZE)
499 return pmd_lockptr(mm, (pmd_t *) pte);
500 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
501 return &mm->page_table_lock;
502}
503
2531c8cf
DD
504#ifndef hugepages_supported
505/*
506 * Some platform decide whether they support huge pages at boot
507 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
508 * when there is no such support
509 */
510#define hugepages_supported() (HPAGE_SHIFT != 0)
511#endif
457c1b27 512
5d317b2b
NH
513void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
514
515static inline void hugetlb_count_add(long l, struct mm_struct *mm)
516{
517 atomic_long_add(l, &mm->hugetlb_usage);
518}
519
520static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
521{
522 atomic_long_sub(l, &mm->hugetlb_usage);
523}
e5251fd4
PA
524
525#ifndef set_huge_swap_pte_at
526static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
527 pte_t *ptep, pte_t pte, unsigned long sz)
528{
529 set_huge_pte_at(mm, addr, ptep, pte);
530}
531#endif
af73e4d9 532#else /* CONFIG_HUGETLB_PAGE */
a5516438 533struct hstate {};
70c3547e 534#define alloc_huge_page(v, a, r) NULL
bf50bab2 535#define alloc_huge_page_node(h, nid) NULL
3e59fcb0 536#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
74060e4d 537#define alloc_huge_page_noerr(v, a, r) NULL
53ba51d2 538#define alloc_bootmem_huge_page(h) NULL
a5516438 539#define hstate_file(f) NULL
af73e4d9 540#define hstate_sizelog(s) NULL
a5516438
AK
541#define hstate_vma(v) NULL
542#define hstate_inode(i) NULL
cb900f41 543#define page_hstate(page) NULL
a5516438
AK
544#define huge_page_size(h) PAGE_SIZE
545#define huge_page_mask(h) PAGE_MASK
08fba699 546#define vma_kernel_pagesize(v) PAGE_SIZE
3340289d 547#define vma_mmu_pagesize(v) PAGE_SIZE
a5516438
AK
548#define huge_page_order(h) 0
549#define huge_page_shift(h) PAGE_SHIFT
94310cbc
AK
550static inline bool hstate_is_gigantic(struct hstate *h)
551{
552 return false;
553}
554
510a35d4
AR
555static inline unsigned int pages_per_huge_page(struct hstate *h)
556{
557 return 1;
558}
c3114a84
AK
559
560static inline unsigned hstate_index_to_shift(unsigned index)
561{
562 return 0;
563}
564
565static inline int hstate_index(struct hstate *h)
566{
567 return 0;
568}
13d60f4b
ZY
569
570static inline pgoff_t basepage_index(struct page *page)
571{
572 return page->index;
573}
c3114a84
AK
574
575static inline int dissolve_free_huge_page(struct page *page)
576{
577 return 0;
578}
579
580static inline int dissolve_free_huge_pages(unsigned long start_pfn,
581 unsigned long end_pfn)
582{
583 return 0;
584}
585
586static inline bool hugepage_migration_supported(struct hstate *h)
587{
588 return false;
589}
cb900f41
KS
590
591static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
592 struct mm_struct *mm, pte_t *pte)
593{
594 return &mm->page_table_lock;
595}
5d317b2b
NH
596
597static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
598{
599}
600
601static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
602{
603}
e5251fd4
PA
604
605static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
606 pte_t *ptep, pte_t pte, unsigned long sz)
607{
608}
af73e4d9 609#endif /* CONFIG_HUGETLB_PAGE */
a5516438 610
cb900f41
KS
611static inline spinlock_t *huge_pte_lock(struct hstate *h,
612 struct mm_struct *mm, pte_t *pte)
613{
614 spinlock_t *ptl;
615
616 ptl = huge_pte_lockptr(h, mm, pte);
617 spin_lock(ptl);
618 return ptl;
619}
620
1da177e4 621#endif /* _LINUX_HUGETLB_H */