Merge tag 'powerpc-6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-block.git] / include / linux / hugetlb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
9c67a207 5#include <linux/mm.h>
be93d8cf 6#include <linux/mm_types.h>
309381fe 7#include <linux/mmdebug.h>
4e950f6f 8#include <linux/fs.h>
8edf344c 9#include <linux/hugetlb_inline.h>
abb8206c 10#include <linux/cgroup.h>
3489dbb6 11#include <linux/page_ref.h>
9119a41e
JK
12#include <linux/list.h>
13#include <linux/kref.h>
ca5999fd 14#include <linux/pgtable.h>
d92bbc27 15#include <linux/gfp.h>
f6191471 16#include <linux/userfaultfd_k.h>
4e950f6f 17
e9ea0e2d
AM
18struct ctl_table;
19struct user_struct;
24669e58 20struct mmu_gather;
a4a00b45 21struct node;
e9ea0e2d 22
691cdf01 23#ifndef CONFIG_ARCH_HAS_HUGEPD
e2299292
AK
24typedef struct { unsigned long pd; } hugepd_t;
25#define is_hugepd(hugepd) (0)
26#define __hugepd(x) ((hugepd_t) { (x) })
e2299292
AK
27#endif
28
454a00c4 29void free_huge_folio(struct folio *folio);
dd6fa0b6 30
1da177e4
LT
31#ifdef CONFIG_HUGETLB_PAGE
32
10969b55 33#include <linux/pagemap.h>
516dffdc 34#include <linux/shm.h>
63551ae0 35#include <asm/tlbflush.h>
1da177e4 36
cd39d4e9
MS
37/*
38 * For HugeTLB page, there are more metadata to save in the struct page. But
39 * the head struct page cannot meet our needs, so we have to abuse other tail
dad6a5eb 40 * struct page to store the metadata.
cd39d4e9 41 */
dad6a5eb 42#define __NR_USED_SUBPAGE 3
cd39d4e9 43
90481622
DG
44struct hugepage_subpool {
45 spinlock_t lock;
46 long count;
c6a91820
MK
47 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
48 long used_hpages; /* Used count against maximum, includes */
06c88398 49 /* both allocated and reserved pages. */
c6a91820
MK
50 struct hstate *hstate;
51 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
52 long rsv_hpages; /* Pages reserved against global pool to */
6c26d310 53 /* satisfy minimum size. */
90481622
DG
54};
55
9119a41e
JK
56struct resv_map {
57 struct kref refs;
7b24d861 58 spinlock_t lock;
9119a41e 59 struct list_head regions;
5e911373
MK
60 long adds_in_progress;
61 struct list_head region_cache;
62 long region_cache_count;
bf491692 63 struct rw_semaphore rw_sema;
e9fe92ae
MA
64#ifdef CONFIG_CGROUP_HUGETLB
65 /*
66 * On private mappings, the counter to uncharge reservations is stored
67 * here. If these fields are 0, then either the mapping is shared, or
68 * cgroup accounting is disabled for this resv_map.
69 */
70 struct page_counter *reservation_counter;
71 unsigned long pages_per_hpage;
72 struct cgroup_subsys_state *css;
73#endif
9119a41e 74};
075a61d0
MA
75
76/*
77 * Region tracking -- allows tracking of reservations and instantiated pages
78 * across the pages in a mapping.
79 *
80 * The region data structures are embedded into a resv_map and protected
81 * by a resv_map's lock. The set of regions within the resv_map represent
82 * reservations for huge pages, or huge pages that have already been
83 * instantiated within the map. The from and to elements are huge page
06c88398 84 * indices into the associated mapping. from indicates the starting index
075a61d0
MA
85 * of the region. to represents the first index past the end of the region.
86 *
87 * For example, a file region structure with from == 0 and to == 4 represents
88 * four huge pages in a mapping. It is important to note that the to element
89 * represents the first element past the end of the region. This is used in
90 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
91 *
92 * Interval notation of the form [from, to) will be used to indicate that
93 * the endpoint from is inclusive and to is exclusive.
94 */
95struct file_region {
96 struct list_head link;
97 long from;
98 long to;
99#ifdef CONFIG_CGROUP_HUGETLB
100 /*
101 * On shared mappings, each reserved region appears as a struct
102 * file_region in resv_map. These fields hold the info needed to
103 * uncharge each reservation.
104 */
105 struct page_counter *reservation_counter;
106 struct cgroup_subsys_state *css;
107#endif
108};
109
8d9bfb26
MK
110struct hugetlb_vma_lock {
111 struct kref refs;
112 struct rw_semaphore rw_sema;
113 struct vm_area_struct *vma;
114};
115
9119a41e
JK
116extern struct resv_map *resv_map_alloc(void);
117void resv_map_release(struct kref *ref);
118
c3f38a38
AK
119extern spinlock_t hugetlb_lock;
120extern int hugetlb_max_hstate __read_mostly;
121#define for_each_hstate(h) \
122 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
123
7ca02d0a
MK
124struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
125 long min_hpages);
90481622
DG
126void hugepage_put_subpool(struct hugepage_subpool *spool);
127
8d9bfb26 128void hugetlb_dup_vma_private(struct vm_area_struct *vma);
550a7d60 129void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
550a7d60
MA
130int move_hugetlb_page_tables(struct vm_area_struct *vma,
131 struct vm_area_struct *new_vma,
132 unsigned long old_addr, unsigned long new_addr,
133 unsigned long len);
bc70fbf2
PX
134int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
135 struct vm_area_struct *, struct vm_area_struct *);
57a196a5 136struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
5502ea44
PX
137 unsigned long address, unsigned int flags,
138 unsigned int *page_mask);
04f2cbe3 139void unmap_hugepage_range(struct vm_area_struct *,
05e90bd0
PX
140 unsigned long, unsigned long, struct page *,
141 zap_flags_t);
2820b0f0 142void __unmap_hugepage_range(struct mmu_gather *tlb,
d833352a
MG
143 struct vm_area_struct *vma,
144 unsigned long start, unsigned long end,
05e90bd0 145 struct page *ref_page, zap_flags_t zap_flags);
e1759c21 146void hugetlb_report_meminfo(struct seq_file *);
7981593b 147int hugetlb_report_node_meminfo(char *buf, int len, int nid);
dcadcf1c 148void hugetlb_show_meminfo_node(int nid);
1da177e4 149unsigned long hugetlb_total_pages(void);
2b740303 150vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 151 unsigned long address, unsigned int flags);
714c1891 152#ifdef CONFIG_USERFAULTFD
61c50040 153int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
a734991c
AR
154 struct vm_area_struct *dst_vma,
155 unsigned long dst_addr,
156 unsigned long src_addr,
d9712937 157 uffd_flags_t flags,
0169fd51 158 struct folio **foliop);
714c1891 159#endif /* CONFIG_USERFAULTFD */
33b8f84a 160bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
5a6fe125 161 struct vm_area_struct *vma,
ca16d140 162 vm_flags_t vm_flags);
b5cec28d
MK
163long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
164 long freed);
9747b9e9 165bool isolate_hugetlb(struct folio *folio, struct list_head *list);
04bac040 166int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
e591ef7d
NH
167int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
168 bool *migratable_cleared);
ea8e72f4 169void folio_putback_active_hugetlb(struct folio *folio);
345c62d1 170void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
72e2936c 171void hugetlb_fix_reserve_counts(struct inode *inode);
c672c7f2 172extern struct mutex *hugetlb_fault_mutex_table;
188b04a7 173u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
1da177e4 174
aec44e0f
PX
175pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
176 unsigned long addr, pud_t *pud);
3212b535 177
c0d0381a
MK
178struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
179
1da177e4 180extern int sysctl_hugetlb_shm_group;
b78b27d0 181extern struct list_head huge_boot_pages[MAX_NUMNODES];
1da177e4 182
63551ae0
DG
183/* arch callbacks */
184
f7243924
HD
185#ifndef CONFIG_HIGHPTE
186/*
187 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
188 * which may go down to the lowest PTE level in their huge_pte_offset() and
189 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
190 */
191static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
192{
193 return pte_offset_kernel(pmd, address);
194}
195static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
196 unsigned long address)
197{
198 return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
199}
200#endif
201
aec44e0f 202pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
a5516438 203 unsigned long addr, unsigned long sz);
fe7d4c6d
PX
204/*
205 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
206 * Returns the pte_t* if found, or NULL if the address is not mapped.
207 *
9c67a207
PX
208 * IMPORTANT: we should normally not directly call this function, instead
209 * this is only a common interface to implement arch-specific
210 * walker. Please use hugetlb_walk() instead, because that will attempt to
211 * verify the locking for you.
212 *
fe7d4c6d
PX
213 * Since this function will walk all the pgtable pages (including not only
214 * high-level pgtable page, but also PUD entry that can be unshared
215 * concurrently for VM_SHARED), the caller of this function should be
216 * responsible of its thread safety. One can follow this rule:
217 *
218 * (1) For private mappings: pmd unsharing is not possible, so holding the
219 * mmap_lock for either read or write is sufficient. Most callers
220 * already hold the mmap_lock, so normally, no special action is
221 * required.
222 *
223 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
224 * pgtable page can go away from under us! It can be done by a pmd
225 * unshare with a follow up munmap() on the other process), then we
226 * need either:
227 *
228 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
229 * won't happen upon the range (it also makes sure the pte_t we
230 * read is the right and stable one), or,
231 *
232 * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
233 * sure even if unshare happened the racy unmap() will wait until
234 * i_mmap_rwsem is released.
235 *
236 * Option (2.1) is the safest, which guarantees pte stability from pmd
237 * sharing pov, until the vma lock released. Option (2.2) doesn't protect
238 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
239 * access.
240 */
7868a208
PA
241pte_t *huge_pte_offset(struct mm_struct *mm,
242 unsigned long addr, unsigned long sz);
e95a9851 243unsigned long hugetlb_mask_last_page(struct hstate *h);
34ae204f 244int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
4ddb4d91 245 unsigned long addr, pte_t *ptep);
017b1660
MK
246void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
247 unsigned long *start, unsigned long *end);
faaa5b62 248
2820b0f0
RR
249extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
250 unsigned long *begin, unsigned long *end);
251extern void __hugetlb_zap_end(struct vm_area_struct *vma,
252 struct zap_details *details);
253
254static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
255 unsigned long *start, unsigned long *end)
256{
257 if (is_vm_hugetlb_page(vma))
258 __hugetlb_zap_begin(vma, start, end);
259}
260
261static inline void hugetlb_zap_end(struct vm_area_struct *vma,
262 struct zap_details *details)
263{
264 if (is_vm_hugetlb_page(vma))
265 __hugetlb_zap_end(vma, details);
266}
267
8d9bfb26
MK
268void hugetlb_vma_lock_read(struct vm_area_struct *vma);
269void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
270void hugetlb_vma_lock_write(struct vm_area_struct *vma);
271void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
272int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
273void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
274void hugetlb_vma_lock_release(struct kref *kref);
275
63551ae0 276int pmd_huge(pmd_t pmd);
c2febafc 277int pud_huge(pud_t pud);
a79390f5 278long hugetlb_change_protection(struct vm_area_struct *vma,
5a90d5a1
PX
279 unsigned long address, unsigned long end, pgprot_t newprot,
280 unsigned long cp_flags);
63551ae0 281
d5ed7444 282bool is_hugetlb_entry_migration(pte_t pte);
52526ca7 283bool is_hugetlb_entry_hwpoisoned(pte_t pte);
6dfeaff9 284void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
ab5ac90a 285
1da177e4
LT
286#else /* !CONFIG_HUGETLB_PAGE */
287
8d9bfb26 288static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
a1e78772
MG
289{
290}
291
550a7d60
MA
292static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
293{
294}
295
1da177e4
LT
296static inline unsigned long hugetlb_total_pages(void)
297{
298 return 0;
299}
300
c0d0381a
MK
301static inline struct address_space *hugetlb_page_mapping_lock_write(
302 struct page *hpage)
303{
304 return NULL;
305}
306
34ae204f
MK
307static inline int huge_pmd_unshare(struct mm_struct *mm,
308 struct vm_area_struct *vma,
4ddb4d91 309 unsigned long addr, pte_t *ptep)
017b1660
MK
310{
311 return 0;
312}
313
314static inline void adjust_range_if_pmd_sharing_possible(
315 struct vm_area_struct *vma,
316 unsigned long *start, unsigned long *end)
317{
318}
319
2820b0f0
RR
320static inline void hugetlb_zap_begin(
321 struct vm_area_struct *vma,
322 unsigned long *start, unsigned long *end)
323{
324}
325
326static inline void hugetlb_zap_end(
327 struct vm_area_struct *vma,
328 struct zap_details *details)
329{
330}
331
5502ea44
PX
332static inline struct page *hugetlb_follow_page_mask(
333 struct vm_area_struct *vma, unsigned long address, unsigned int flags,
334 unsigned int *page_mask)
57a196a5
MK
335{
336 BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
337}
338
1f9dccb2 339static inline int copy_hugetlb_page_range(struct mm_struct *dst,
bc70fbf2
PX
340 struct mm_struct *src,
341 struct vm_area_struct *dst_vma,
342 struct vm_area_struct *src_vma)
1f9dccb2
MK
343{
344 BUG();
345 return 0;
346}
347
550a7d60
MA
348static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
349 struct vm_area_struct *new_vma,
350 unsigned long old_addr,
351 unsigned long new_addr,
352 unsigned long len)
353{
354 BUG();
355 return 0;
356}
357
e1759c21
AD
358static inline void hugetlb_report_meminfo(struct seq_file *m)
359{
360}
1f9dccb2 361
7981593b 362static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
1f9dccb2
MK
363{
364 return 0;
365}
366
dcadcf1c 367static inline void hugetlb_show_meminfo_node(int nid)
949f7ec5
DR
368{
369}
1f9dccb2 370
1f9dccb2
MK
371static inline int prepare_hugepage_range(struct file *file,
372 unsigned long addr, unsigned long len)
373{
374 return -EINVAL;
375}
376
8d9bfb26
MK
377static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
378{
379}
380
381static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
382{
383}
384
385static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
386{
387}
388
389static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
390{
391}
392
393static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
394{
395 return 1;
396}
397
398static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
399{
400}
401
1f9dccb2
MK
402static inline int pmd_huge(pmd_t pmd)
403{
404 return 0;
405}
406
407static inline int pud_huge(pud_t pud)
408{
409 return 0;
410}
411
412static inline int is_hugepage_only_range(struct mm_struct *mm,
413 unsigned long addr, unsigned long len)
414{
415 return 0;
416}
417
418static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
419 unsigned long addr, unsigned long end,
420 unsigned long floor, unsigned long ceiling)
421{
422 BUG();
423}
424
714c1891 425#ifdef CONFIG_USERFAULTFD
61c50040 426static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
a734991c
AR
427 struct vm_area_struct *dst_vma,
428 unsigned long dst_addr,
429 unsigned long src_addr,
d9712937 430 uffd_flags_t flags,
0169fd51 431 struct folio **foliop)
1f9dccb2
MK
432{
433 BUG();
434 return 0;
435}
714c1891 436#endif /* CONFIG_USERFAULTFD */
1f9dccb2
MK
437
438static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
439 unsigned long sz)
440{
441 return NULL;
442}
24669e58 443
9747b9e9 444static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
f40386a4 445{
9747b9e9 446 return false;
f40386a4 447}
1da177e4 448
04bac040 449static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
25182f05
NH
450{
451 return 0;
452}
453
e591ef7d
NH
454static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
455 bool *migratable_cleared)
405ce051
NH
456{
457 return 0;
458}
459
ea8e72f4 460static inline void folio_putback_active_hugetlb(struct folio *folio)
1f9dccb2
MK
461{
462}
463
345c62d1
SK
464static inline void move_hugetlb_state(struct folio *old_folio,
465 struct folio *new_folio, int reason)
1f9dccb2
MK
466{
467}
468
a79390f5 469static inline long hugetlb_change_protection(
1f9dccb2 470 struct vm_area_struct *vma, unsigned long address,
5a90d5a1
PX
471 unsigned long end, pgprot_t newprot,
472 unsigned long cp_flags)
7da4d641
PZ
473{
474 return 0;
475}
8f860591 476
2820b0f0 477static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
d833352a 478 struct vm_area_struct *vma, unsigned long start,
05e90bd0
PX
479 unsigned long end, struct page *ref_page,
480 zap_flags_t zap_flags)
d833352a
MG
481{
482 BUG();
483}
484
a953e772 485static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
1f9dccb2
MK
486 struct vm_area_struct *vma, unsigned long address,
487 unsigned int flags)
a953e772
SJ
488{
489 BUG();
490 return 0;
491}
24669e58 492
6dfeaff9
PX
493static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
494
1da177e4 495#endif /* !CONFIG_HUGETLB_PAGE */
f30c59e9
AK
496/*
497 * hugepages at page global directory. If arch support
498 * hugepages at pgd level, they need to define this.
499 */
500#ifndef pgd_huge
501#define pgd_huge(x) 0
502#endif
c2febafc
KS
503#ifndef p4d_huge
504#define p4d_huge(x) 0
505#endif
f30c59e9
AK
506
507#ifndef pgd_write
508static inline int pgd_write(pgd_t pgd)
509{
510 BUG();
511 return 0;
512}
513#endif
514
4e52780d
EM
515#define HUGETLB_ANON_FILE "anon_hugepage"
516
6bfde05b
EM
517enum {
518 /*
519 * The file will be used as an shm file so shmfs accounting rules
520 * apply
521 */
522 HUGETLB_SHMFS_INODE = 1,
4e52780d
EM
523 /*
524 * The file is being created on the internal vfs mount and shmfs
525 * accounting rules do not apply
526 */
527 HUGETLB_ANONHUGE_INODE = 2,
6bfde05b
EM
528};
529
1da177e4 530#ifdef CONFIG_HUGETLBFS
1da177e4 531struct hugetlbfs_sb_info {
1da177e4
LT
532 long max_inodes; /* inodes allowed */
533 long free_inodes; /* inodes free */
534 spinlock_t stat_lock;
a137e1cc 535 struct hstate *hstate;
90481622 536 struct hugepage_subpool *spool;
4a25220d
DH
537 kuid_t uid;
538 kgid_t gid;
539 umode_t mode;
1da177e4
LT
540};
541
1da177e4
LT
542static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
543{
544 return sb->s_fs_info;
545}
546
da14c1e5 547struct hugetlbfs_inode_info {
da14c1e5 548 struct inode vfs_inode;
ff62a342 549 unsigned int seals;
da14c1e5
MAL
550};
551
552static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
553{
554 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
555}
556
4b6f5d20 557extern const struct file_operations hugetlbfs_file_operations;
f0f37e2f 558extern const struct vm_operations_struct hugetlb_vm_ops;
af73e4d9 559struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
83c1fd76 560 int creat_flags, int page_size_log);
1da177e4 561
719ff321 562static inline bool is_file_hugepages(struct file *file)
1da177e4 563{
516dffdc 564 if (file->f_op == &hugetlbfs_file_operations)
719ff321 565 return true;
516dffdc 566
719ff321 567 return is_file_shm_hugepages(file);
1da177e4
LT
568}
569
bb297bb2
CL
570static inline struct hstate *hstate_inode(struct inode *i)
571{
572 return HUGETLBFS_SB(i->i_sb)->hstate;
573}
1da177e4
LT
574#else /* !CONFIG_HUGETLBFS */
575
719ff321 576#define is_file_hugepages(file) false
40716e29 577static inline struct file *
af73e4d9 578hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
83c1fd76 579 int creat_flags, int page_size_log)
e9ea0e2d
AM
580{
581 return ERR_PTR(-ENOSYS);
582}
1da177e4 583
bb297bb2
CL
584static inline struct hstate *hstate_inode(struct inode *i)
585{
586 return NULL;
587}
1da177e4
LT
588#endif /* !CONFIG_HUGETLBFS */
589
d2ba27e8
AB
590#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
591unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
592 unsigned long len, unsigned long pgoff,
593 unsigned long flags);
594#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
595
4b439e25
CL
596unsigned long
597generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
598 unsigned long len, unsigned long pgoff,
599 unsigned long flags);
600
d6995da3
MK
601/*
602 * huegtlb page specific state flags. These flags are located in page.private
603 * of the hugetlb head page. Functions created via the below macros should be
604 * used to manipulate these flags.
605 *
606 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
607 * allocation time. Cleared when page is fully instantiated. Free
608 * routine checks flag to restore a reservation on error paths.
d95c0337
MK
609 * Synchronization: Examined or modified by code that knows it has
610 * the only reference to page. i.e. After allocation but before use
611 * or when the page is being freed.
8f251a3d
MK
612 * HPG_migratable - Set after a newly allocated page is added to the page
613 * cache and/or page tables. Indicates the page is a candidate for
614 * migration.
d95c0337
MK
615 * Synchronization: Initially set after new page allocation with no
616 * locking. When examined and modified during migration processing
617 * (isolate, migrate, putback) the hugetlb_lock is held.
161df60e 618 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
9157c311
MK
619 * allocator. Typically used for migration target pages when no pages
620 * are available in the pool. The hugetlb free page path will
621 * immediately free pages with this flag set to the buddy allocator.
d95c0337
MK
622 * Synchronization: Can be set after huge page allocation from buddy when
623 * code knows it has only reference. All other examinations and
624 * modifications require hugetlb_lock.
6c037149 625 * HPG_freed - Set when page is on the free lists.
d95c0337 626 * Synchronization: hugetlb_lock held for examination and modification.
ad2fa371 627 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
161df60e
NH
628 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
629 * that is not tracked by raw_hwp_page list.
d6995da3
MK
630 */
631enum hugetlb_page_flags {
632 HPG_restore_reserve = 0,
8f251a3d 633 HPG_migratable,
9157c311 634 HPG_temporary,
6c037149 635 HPG_freed,
ad2fa371 636 HPG_vmemmap_optimized,
161df60e 637 HPG_raw_hwp_unreliable,
d6995da3
MK
638 __NR_HPAGEFLAGS,
639};
640
641/*
642 * Macros to create test, set and clear function definitions for
643 * hugetlb specific page flags.
644 */
645#ifdef CONFIG_HUGETLB_PAGE
646#define TESTHPAGEFLAG(uname, flname) \
d03c376d
SK
647static __always_inline \
648bool folio_test_hugetlb_##flname(struct folio *folio) \
649 { void *private = &folio->private; \
650 return test_bit(HPG_##flname, private); \
651 } \
d6995da3
MK
652static inline int HPage##uname(struct page *page) \
653 { return test_bit(HPG_##flname, &(page->private)); }
654
655#define SETHPAGEFLAG(uname, flname) \
d03c376d
SK
656static __always_inline \
657void folio_set_hugetlb_##flname(struct folio *folio) \
658 { void *private = &folio->private; \
659 set_bit(HPG_##flname, private); \
660 } \
d6995da3
MK
661static inline void SetHPage##uname(struct page *page) \
662 { set_bit(HPG_##flname, &(page->private)); }
663
664#define CLEARHPAGEFLAG(uname, flname) \
d03c376d
SK
665static __always_inline \
666void folio_clear_hugetlb_##flname(struct folio *folio) \
667 { void *private = &folio->private; \
668 clear_bit(HPG_##flname, private); \
669 } \
d6995da3
MK
670static inline void ClearHPage##uname(struct page *page) \
671 { clear_bit(HPG_##flname, &(page->private)); }
672#else
673#define TESTHPAGEFLAG(uname, flname) \
d03c376d
SK
674static inline bool \
675folio_test_hugetlb_##flname(struct folio *folio) \
676 { return 0; } \
d6995da3
MK
677static inline int HPage##uname(struct page *page) \
678 { return 0; }
679
680#define SETHPAGEFLAG(uname, flname) \
d03c376d
SK
681static inline void \
682folio_set_hugetlb_##flname(struct folio *folio) \
683 { } \
d6995da3
MK
684static inline void SetHPage##uname(struct page *page) \
685 { }
686
687#define CLEARHPAGEFLAG(uname, flname) \
d03c376d
SK
688static inline void \
689folio_clear_hugetlb_##flname(struct folio *folio) \
690 { } \
d6995da3
MK
691static inline void ClearHPage##uname(struct page *page) \
692 { }
693#endif
694
695#define HPAGEFLAG(uname, flname) \
696 TESTHPAGEFLAG(uname, flname) \
697 SETHPAGEFLAG(uname, flname) \
698 CLEARHPAGEFLAG(uname, flname) \
699
700/*
701 * Create functions associated with hugetlb page flags
702 */
703HPAGEFLAG(RestoreReserve, restore_reserve)
8f251a3d 704HPAGEFLAG(Migratable, migratable)
9157c311 705HPAGEFLAG(Temporary, temporary)
6c037149 706HPAGEFLAG(Freed, freed)
ad2fa371 707HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
161df60e 708HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
d6995da3 709
a5516438
AK
710#ifdef CONFIG_HUGETLB_PAGE
711
a3437870 712#define HSTATE_NAME_LEN 32
a5516438
AK
713/* Defines one hugetlb page size */
714struct hstate {
29383967 715 struct mutex resize_lock;
e8c5c824
LS
716 int next_nid_to_alloc;
717 int next_nid_to_free;
a5516438 718 unsigned int order;
79dfc695 719 unsigned int demote_order;
a5516438
AK
720 unsigned long mask;
721 unsigned long max_huge_pages;
722 unsigned long nr_huge_pages;
723 unsigned long free_huge_pages;
724 unsigned long resv_huge_pages;
725 unsigned long surplus_huge_pages;
726 unsigned long nr_overcommit_huge_pages;
0edaecfa 727 struct list_head hugepage_activelist;
a5516438 728 struct list_head hugepage_freelists[MAX_NUMNODES];
b5389086 729 unsigned int max_huge_pages_node[MAX_NUMNODES];
a5516438
AK
730 unsigned int nr_huge_pages_node[MAX_NUMNODES];
731 unsigned int free_huge_pages_node[MAX_NUMNODES];
732 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
abb8206c
AK
733#ifdef CONFIG_CGROUP_HUGETLB
734 /* cgroup control files */
f4776199
MA
735 struct cftype cgroup_files_dfl[8];
736 struct cftype cgroup_files_legacy[10];
abb8206c 737#endif
a3437870 738 char name[HSTATE_NAME_LEN];
a5516438
AK
739};
740
53ba51d2
JT
741struct huge_bootmem_page {
742 struct list_head list;
743 struct hstate *hstate;
744};
745
ae37c7ff 746int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
d0ce0e47 747struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
70c3547e 748 unsigned long addr, int avoid_reserve);
e37d3e83 749struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
d92bbc27 750 nodemask_t *nmask, gfp_t gfp_mask);
9b91c0e2 751int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
ab76ad54 752 pgoff_t idx);
846be085 753void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
d2d7bb44 754 unsigned long address, struct folio *folio);
bf50bab2 755
53ba51d2 756/* arch callback */
b5389086
ZY
757int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
758int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
759bool __init hugetlb_node_alloc_supported(void);
53ba51d2 760
e5ff2159 761void __init hugetlb_add_hstate(unsigned order);
ae94da89 762bool __init arch_hugetlb_valid_size(unsigned long size);
e5ff2159
AK
763struct hstate *size_to_hstate(unsigned long size);
764
765#ifndef HUGE_MAX_HSTATE
766#define HUGE_MAX_HSTATE 1
767#endif
768
769extern struct hstate hstates[HUGE_MAX_HSTATE];
770extern unsigned int default_hstate_idx;
771
772#define default_hstate (hstates[default_hstate_idx])
a5516438 773
149562f7
SK
774static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
775{
dad6a5eb 776 return folio->_hugetlb_subpool;
149562f7
SK
777}
778
149562f7
SK
779static inline void hugetlb_set_folio_subpool(struct folio *folio,
780 struct hugepage_subpool *subpool)
781{
dad6a5eb 782 folio->_hugetlb_subpool = subpool;
d6995da3
MK
783}
784
a5516438
AK
785static inline struct hstate *hstate_file(struct file *f)
786{
496ad9aa 787 return hstate_inode(file_inode(f));
a5516438
AK
788}
789
af73e4d9
NH
790static inline struct hstate *hstate_sizelog(int page_size_log)
791{
792 if (!page_size_log)
793 return &default_hstate;
97ad2be1 794
ec4288fe
MK
795 if (page_size_log < BITS_PER_LONG)
796 return size_to_hstate(1UL << page_size_log);
797
798 return NULL;
af73e4d9
NH
799}
800
a137e1cc 801static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
a5516438 802{
a137e1cc 803 return hstate_file(vma->vm_file);
a5516438
AK
804}
805
6213834c 806static inline unsigned long huge_page_size(const struct hstate *h)
a5516438
AK
807{
808 return (unsigned long)PAGE_SIZE << h->order;
809}
810
08fba699
MG
811extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
812
3340289d
MG
813extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
814
a5516438
AK
815static inline unsigned long huge_page_mask(struct hstate *h)
816{
817 return h->mask;
818}
819
820static inline unsigned int huge_page_order(struct hstate *h)
821{
822 return h->order;
823}
824
825static inline unsigned huge_page_shift(struct hstate *h)
826{
827 return h->order + PAGE_SHIFT;
828}
829
bae7f4ae
LC
830static inline bool hstate_is_gigantic(struct hstate *h)
831{
5e0a760b 832 return huge_page_order(h) > MAX_PAGE_ORDER;
bae7f4ae
LC
833}
834
6213834c 835static inline unsigned int pages_per_huge_page(const struct hstate *h)
a5516438
AK
836{
837 return 1 << h->order;
838}
839
840static inline unsigned int blocks_per_huge_page(struct hstate *h)
841{
842 return huge_page_size(h) / 512;
843}
844
a08c7193
SK
845static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
846 struct address_space *mapping, pgoff_t idx)
847{
848 return filemap_lock_folio(mapping, idx << huge_page_order(h));
849}
850
a5516438
AK
851#include <asm/hugetlb.h>
852
b0eae98c
AK
853#ifndef is_hugepage_only_range
854static inline int is_hugepage_only_range(struct mm_struct *mm,
855 unsigned long addr, unsigned long len)
856{
857 return 0;
858}
859#define is_hugepage_only_range is_hugepage_only_range
860#endif
861
5be99343
AK
862#ifndef arch_clear_hugepage_flags
863static inline void arch_clear_hugepage_flags(struct page *page) { }
864#define arch_clear_hugepage_flags arch_clear_hugepage_flags
865#endif
866
d9ed9faa 867#ifndef arch_make_huge_pte
79c1c594
CL
868static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
869 vm_flags_t flags)
d9ed9faa 870{
16785bd7 871 return pte_mkhuge(entry);
d9ed9faa
CM
872}
873#endif
874
e51da3a9
SK
875static inline struct hstate *folio_hstate(struct folio *folio)
876{
877 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
878 return size_to_hstate(folio_size(folio));
879}
880
aa50d3a7
AK
881static inline unsigned hstate_index_to_shift(unsigned index)
882{
883 return hstates[index].order + PAGE_SHIFT;
884}
885
972dc4de
AK
886static inline int hstate_index(struct hstate *h)
887{
888 return h - hstates;
889}
890
c3114a84 891extern int dissolve_free_huge_page(struct page *page);
082d5b6b
GS
892extern int dissolve_free_huge_pages(unsigned long start_pfn,
893 unsigned long end_pfn);
e693de18 894
161df60e 895#ifdef CONFIG_MEMORY_FAILURE
2ff6cece 896extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
161df60e 897#else
2ff6cece 898static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
161df60e
NH
899{
900}
901#endif
902
c177c81e 903#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
e693de18
AK
904#ifndef arch_hugetlb_migration_supported
905static inline bool arch_hugetlb_migration_supported(struct hstate *h)
906{
94310cbc 907 if ((huge_page_shift(h) == PMD_SHIFT) ||
9b553bf5
AK
908 (huge_page_shift(h) == PUD_SHIFT) ||
909 (huge_page_shift(h) == PGDIR_SHIFT))
94310cbc
AK
910 return true;
911 else
912 return false;
e693de18
AK
913}
914#endif
c177c81e 915#else
e693de18
AK
916static inline bool arch_hugetlb_migration_supported(struct hstate *h)
917{
d70c17d4 918 return false;
e693de18 919}
c177c81e 920#endif
e693de18
AK
921
922static inline bool hugepage_migration_supported(struct hstate *h)
923{
924 return arch_hugetlb_migration_supported(h);
83467efb 925}
c8721bbb 926
7ed2c31d
AK
927/*
928 * Movability check is different as compared to migration check.
929 * It determines whether or not a huge page should be placed on
930 * movable zone or not. Movability of any huge page should be
931 * required only if huge page size is supported for migration.
06c88398 932 * There won't be any reason for the huge page to be movable if
7ed2c31d
AK
933 * it is not migratable to start with. Also the size of the huge
934 * page should be large enough to be placed under a movable zone
935 * and still feasible enough to be migratable. Just the presence
936 * in movable zone does not make the migration feasible.
937 *
938 * So even though large huge page sizes like the gigantic ones
939 * are migratable they should not be movable because its not
940 * feasible to migrate them from movable zone.
941 */
942static inline bool hugepage_movable_supported(struct hstate *h)
943{
944 if (!hugepage_migration_supported(h))
945 return false;
946
947 if (hstate_is_gigantic(h))
948 return false;
949 return true;
950}
951
d92bbc27
JK
952/* Movability of hugepages depends on migration support. */
953static inline gfp_t htlb_alloc_mask(struct hstate *h)
954{
955 if (hugepage_movable_supported(h))
956 return GFP_HIGHUSER_MOVABLE;
957 else
958 return GFP_HIGHUSER;
959}
960
19fc7bed
JK
961static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
962{
963 gfp_t modified_mask = htlb_alloc_mask(h);
964
965 /* Some callers might want to enforce node */
966 modified_mask |= (gfp_mask & __GFP_THISNODE);
967
41b4dc14
JK
968 modified_mask |= (gfp_mask & __GFP_NOWARN);
969
19fc7bed
JK
970 return modified_mask;
971}
972
cb900f41
KS
973static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
974 struct mm_struct *mm, pte_t *pte)
975{
976 if (huge_page_size(h) == PMD_SIZE)
977 return pmd_lockptr(mm, (pmd_t *) pte);
978 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
979 return &mm->page_table_lock;
980}
981
2531c8cf
DD
982#ifndef hugepages_supported
983/*
984 * Some platform decide whether they support huge pages at boot
985 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
986 * when there is no such support
987 */
988#define hugepages_supported() (HPAGE_SHIFT != 0)
989#endif
457c1b27 990
5d317b2b
NH
991void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
992
13db8c50
LZ
993static inline void hugetlb_count_init(struct mm_struct *mm)
994{
995 atomic_long_set(&mm->hugetlb_usage, 0);
996}
997
5d317b2b
NH
998static inline void hugetlb_count_add(long l, struct mm_struct *mm)
999{
1000 atomic_long_add(l, &mm->hugetlb_usage);
1001}
1002
1003static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1004{
1005 atomic_long_sub(l, &mm->hugetlb_usage);
1006}
e5251fd4 1007
023bdd00
AK
1008#ifndef huge_ptep_modify_prot_start
1009#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
1010static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
1011 unsigned long addr, pte_t *ptep)
1012{
1013 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
1014}
1015#endif
1016
1017#ifndef huge_ptep_modify_prot_commit
1018#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
1019static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
1020 unsigned long addr, pte_t *ptep,
1021 pte_t old_pte, pte_t pte)
1022{
935d4f0c
RR
1023 unsigned long psize = huge_page_size(hstate_vma(vma));
1024
1025 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
023bdd00
AK
1026}
1027#endif
1028
a4a00b45
MS
1029#ifdef CONFIG_NUMA
1030void hugetlb_register_node(struct node *node);
1031void hugetlb_unregister_node(struct node *node);
1032#endif
1033
b79f8eb4
JY
1034/*
1035 * Check if a given raw @page in a hugepage is HWPOISON.
1036 */
1037bool is_raw_hwpoison_page_in_hugepage(struct page *page);
1038
af73e4d9 1039#else /* CONFIG_HUGETLB_PAGE */
a5516438 1040struct hstate {};
442a5a9a 1041
345c62d1
SK
1042static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
1043{
1044 return NULL;
1045}
1046
a08c7193
SK
1047static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
1048 struct address_space *mapping, pgoff_t idx)
1049{
1050 return NULL;
1051}
1052
ae37c7ff
OS
1053static inline int isolate_or_dissolve_huge_page(struct page *page,
1054 struct list_head *list)
369fa227
OS
1055{
1056 return -ENOMEM;
1057}
1058
d0ce0e47 1059static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
442a5a9a
JG
1060 unsigned long addr,
1061 int avoid_reserve)
1062{
1063 return NULL;
1064}
1065
e37d3e83
SK
1066static inline struct folio *
1067alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
d92bbc27 1068 nodemask_t *nmask, gfp_t gfp_mask)
442a5a9a
JG
1069{
1070 return NULL;
1071}
1072
442a5a9a
JG
1073static inline int __alloc_bootmem_huge_page(struct hstate *h)
1074{
1075 return 0;
1076}
1077
1078static inline struct hstate *hstate_file(struct file *f)
1079{
1080 return NULL;
1081}
1082
1083static inline struct hstate *hstate_sizelog(int page_size_log)
1084{
1085 return NULL;
1086}
1087
1088static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1089{
1090 return NULL;
442a5a9a
JG
1091}
1092
e51da3a9
SK
1093static inline struct hstate *folio_hstate(struct folio *folio)
1094{
1095 return NULL;
1096}
1097
2aff7a47
MWO
1098static inline struct hstate *size_to_hstate(unsigned long size)
1099{
1100 return NULL;
1101}
1102
442a5a9a
JG
1103static inline unsigned long huge_page_size(struct hstate *h)
1104{
1105 return PAGE_SIZE;
1106}
1107
1108static inline unsigned long huge_page_mask(struct hstate *h)
1109{
1110 return PAGE_MASK;
1111}
1112
1113static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1114{
1115 return PAGE_SIZE;
1116}
1117
1118static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1119{
1120 return PAGE_SIZE;
1121}
1122
1123static inline unsigned int huge_page_order(struct hstate *h)
1124{
1125 return 0;
1126}
1127
1128static inline unsigned int huge_page_shift(struct hstate *h)
1129{
1130 return PAGE_SHIFT;
1131}
1132
94310cbc
AK
1133static inline bool hstate_is_gigantic(struct hstate *h)
1134{
1135 return false;
1136}
1137
510a35d4
AR
1138static inline unsigned int pages_per_huge_page(struct hstate *h)
1139{
1140 return 1;
1141}
c3114a84
AK
1142
1143static inline unsigned hstate_index_to_shift(unsigned index)
1144{
1145 return 0;
1146}
1147
1148static inline int hstate_index(struct hstate *h)
1149{
1150 return 0;
1151}
13d60f4b 1152
c3114a84
AK
1153static inline int dissolve_free_huge_page(struct page *page)
1154{
1155 return 0;
1156}
1157
1158static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1159 unsigned long end_pfn)
1160{
1161 return 0;
1162}
1163
1164static inline bool hugepage_migration_supported(struct hstate *h)
1165{
1166 return false;
1167}
cb900f41 1168
7ed2c31d
AK
1169static inline bool hugepage_movable_supported(struct hstate *h)
1170{
1171 return false;
1172}
1173
d92bbc27
JK
1174static inline gfp_t htlb_alloc_mask(struct hstate *h)
1175{
1176 return 0;
1177}
1178
19fc7bed
JK
1179static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1180{
1181 return 0;
1182}
1183
cb900f41
KS
1184static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1185 struct mm_struct *mm, pte_t *pte)
1186{
1187 return &mm->page_table_lock;
1188}
5d317b2b 1189
13db8c50
LZ
1190static inline void hugetlb_count_init(struct mm_struct *mm)
1191{
1192}
1193
5d317b2b
NH
1194static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1195{
1196}
1197
1198static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1199{
1200}
e5251fd4 1201
5d4af619
BW
1202static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1203 unsigned long addr, pte_t *ptep)
1204{
c33c7948
RR
1205#ifdef CONFIG_MMU
1206 return ptep_get(ptep);
1207#else
5d4af619 1208 return *ptep;
c33c7948 1209#endif
5d4af619
BW
1210}
1211
1212static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
935d4f0c 1213 pte_t *ptep, pte_t pte, unsigned long sz)
5d4af619
BW
1214{
1215}
a4a00b45
MS
1216
1217static inline void hugetlb_register_node(struct node *node)
1218{
1219}
1220
1221static inline void hugetlb_unregister_node(struct node *node)
1222{
1223}
af73e4d9 1224#endif /* CONFIG_HUGETLB_PAGE */
a5516438 1225
cb900f41
KS
1226static inline spinlock_t *huge_pte_lock(struct hstate *h,
1227 struct mm_struct *mm, pte_t *pte)
1228{
1229 spinlock_t *ptl;
1230
1231 ptl = huge_pte_lockptr(h, mm, pte);
1232 spin_lock(ptl);
1233 return ptl;
1234}
1235
cf11e85f
RG
1236#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1237extern void __init hugetlb_cma_reserve(int order);
cf11e85f
RG
1238#else
1239static inline __init void hugetlb_cma_reserve(int order)
1240{
1241}
cf11e85f
RG
1242#endif
1243
3489dbb6
MK
1244#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
1245static inline bool hugetlb_pmd_shared(pte_t *pte)
1246{
1247 return page_count(virt_to_page(pte)) > 1;
1248}
1249#else
1250static inline bool hugetlb_pmd_shared(pte_t *pte)
1251{
1252 return false;
1253}
1254#endif
1255
c1991e07
PX
1256bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1257
537cf30b
PX
1258#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1259/*
1260 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1261 * implement this.
1262 */
1263#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1264#endif
1265
9c67a207
PX
1266static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
1267{
1268 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
1269}
1270
187da0f8 1271bool __vma_private_lock(struct vm_area_struct *vma);
bf491692 1272
9c67a207
PX
1273/*
1274 * Safe version of huge_pte_offset() to check the locks. See comments
1275 * above huge_pte_offset().
1276 */
1277static inline pte_t *
1278hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
1279{
1280#if defined(CONFIG_HUGETLB_PAGE) && \
1281 defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
1282 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1283
1284 /*
1285 * If pmd sharing possible, locking needed to safely walk the
1286 * hugetlb pgtables. More information can be found at the comment
1287 * above huge_pte_offset() in the same file.
1288 *
1289 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
1290 */
1291 if (__vma_shareable_lock(vma))
1292 WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
1293 !lockdep_is_held(
1294 &vma->vm_file->f_mapping->i_mmap_rwsem));
1295#endif
1296 return huge_pte_offset(vma->vm_mm, addr, sz);
1297}
1298
1da177e4 1299#endif /* _LINUX_HUGETLB_H */