Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[linux-2.6-block.git] / include / linux / hugetlb.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
9c67a207 5#include <linux/mm.h>
be93d8cf 6#include <linux/mm_types.h>
309381fe 7#include <linux/mmdebug.h>
4e950f6f 8#include <linux/fs.h>
8edf344c 9#include <linux/hugetlb_inline.h>
abb8206c 10#include <linux/cgroup.h>
3489dbb6 11#include <linux/page_ref.h>
9119a41e
JK
12#include <linux/list.h>
13#include <linux/kref.h>
ca5999fd 14#include <linux/pgtable.h>
d92bbc27 15#include <linux/gfp.h>
f6191471 16#include <linux/userfaultfd_k.h>
4e950f6f 17
e9ea0e2d
AM
18struct ctl_table;
19struct user_struct;
24669e58 20struct mmu_gather;
a4a00b45 21struct node;
e9ea0e2d 22
691cdf01 23#ifndef CONFIG_ARCH_HAS_HUGEPD
e2299292
AK
24typedef struct { unsigned long pd; } hugepd_t;
25#define is_hugepd(hugepd) (0)
26#define __hugepd(x) ((hugepd_t) { (x) })
e2299292
AK
27#endif
28
454a00c4 29void free_huge_folio(struct folio *folio);
dd6fa0b6 30
1da177e4
LT
31#ifdef CONFIG_HUGETLB_PAGE
32
10969b55 33#include <linux/pagemap.h>
516dffdc 34#include <linux/shm.h>
63551ae0 35#include <asm/tlbflush.h>
1da177e4 36
cd39d4e9
MS
37/*
38 * For HugeTLB page, there are more metadata to save in the struct page. But
39 * the head struct page cannot meet our needs, so we have to abuse other tail
dad6a5eb 40 * struct page to store the metadata.
cd39d4e9 41 */
dad6a5eb 42#define __NR_USED_SUBPAGE 3
cd39d4e9 43
90481622
DG
44struct hugepage_subpool {
45 spinlock_t lock;
46 long count;
c6a91820
MK
47 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
48 long used_hpages; /* Used count against maximum, includes */
06c88398 49 /* both allocated and reserved pages. */
c6a91820
MK
50 struct hstate *hstate;
51 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
52 long rsv_hpages; /* Pages reserved against global pool to */
6c26d310 53 /* satisfy minimum size. */
90481622
DG
54};
55
9119a41e
JK
56struct resv_map {
57 struct kref refs;
7b24d861 58 spinlock_t lock;
9119a41e 59 struct list_head regions;
5e911373
MK
60 long adds_in_progress;
61 struct list_head region_cache;
62 long region_cache_count;
bf491692 63 struct rw_semaphore rw_sema;
e9fe92ae
MA
64#ifdef CONFIG_CGROUP_HUGETLB
65 /*
66 * On private mappings, the counter to uncharge reservations is stored
67 * here. If these fields are 0, then either the mapping is shared, or
68 * cgroup accounting is disabled for this resv_map.
69 */
70 struct page_counter *reservation_counter;
71 unsigned long pages_per_hpage;
72 struct cgroup_subsys_state *css;
73#endif
9119a41e 74};
075a61d0
MA
75
76/*
77 * Region tracking -- allows tracking of reservations and instantiated pages
78 * across the pages in a mapping.
79 *
80 * The region data structures are embedded into a resv_map and protected
81 * by a resv_map's lock. The set of regions within the resv_map represent
82 * reservations for huge pages, or huge pages that have already been
83 * instantiated within the map. The from and to elements are huge page
06c88398 84 * indices into the associated mapping. from indicates the starting index
075a61d0
MA
85 * of the region. to represents the first index past the end of the region.
86 *
87 * For example, a file region structure with from == 0 and to == 4 represents
88 * four huge pages in a mapping. It is important to note that the to element
89 * represents the first element past the end of the region. This is used in
90 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
91 *
92 * Interval notation of the form [from, to) will be used to indicate that
93 * the endpoint from is inclusive and to is exclusive.
94 */
95struct file_region {
96 struct list_head link;
97 long from;
98 long to;
99#ifdef CONFIG_CGROUP_HUGETLB
100 /*
101 * On shared mappings, each reserved region appears as a struct
102 * file_region in resv_map. These fields hold the info needed to
103 * uncharge each reservation.
104 */
105 struct page_counter *reservation_counter;
106 struct cgroup_subsys_state *css;
107#endif
108};
109
8d9bfb26
MK
110struct hugetlb_vma_lock {
111 struct kref refs;
112 struct rw_semaphore rw_sema;
113 struct vm_area_struct *vma;
114};
115
9119a41e
JK
116extern struct resv_map *resv_map_alloc(void);
117void resv_map_release(struct kref *ref);
118
c3f38a38
AK
119extern spinlock_t hugetlb_lock;
120extern int hugetlb_max_hstate __read_mostly;
121#define for_each_hstate(h) \
122 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
123
7ca02d0a
MK
124struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
125 long min_hpages);
90481622
DG
126void hugepage_put_subpool(struct hugepage_subpool *spool);
127
8d9bfb26 128void hugetlb_dup_vma_private(struct vm_area_struct *vma);
550a7d60 129void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
550a7d60
MA
130int move_hugetlb_page_tables(struct vm_area_struct *vma,
131 struct vm_area_struct *new_vma,
132 unsigned long old_addr, unsigned long new_addr,
133 unsigned long len);
bc70fbf2
PX
134int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
135 struct vm_area_struct *, struct vm_area_struct *);
57a196a5 136struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
5502ea44
PX
137 unsigned long address, unsigned int flags,
138 unsigned int *page_mask);
04f2cbe3 139void unmap_hugepage_range(struct vm_area_struct *,
05e90bd0
PX
140 unsigned long, unsigned long, struct page *,
141 zap_flags_t);
2820b0f0 142void __unmap_hugepage_range(struct mmu_gather *tlb,
d833352a
MG
143 struct vm_area_struct *vma,
144 unsigned long start, unsigned long end,
05e90bd0 145 struct page *ref_page, zap_flags_t zap_flags);
e1759c21 146void hugetlb_report_meminfo(struct seq_file *);
7981593b 147int hugetlb_report_node_meminfo(char *buf, int len, int nid);
dcadcf1c 148void hugetlb_show_meminfo_node(int nid);
1da177e4 149unsigned long hugetlb_total_pages(void);
2b740303 150vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 151 unsigned long address, unsigned int flags);
714c1891 152#ifdef CONFIG_USERFAULTFD
61c50040 153int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
a734991c
AR
154 struct vm_area_struct *dst_vma,
155 unsigned long dst_addr,
156 unsigned long src_addr,
d9712937 157 uffd_flags_t flags,
0169fd51 158 struct folio **foliop);
714c1891 159#endif /* CONFIG_USERFAULTFD */
33b8f84a 160bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
5a6fe125 161 struct vm_area_struct *vma,
ca16d140 162 vm_flags_t vm_flags);
b5cec28d
MK
163long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
164 long freed);
9747b9e9 165bool isolate_hugetlb(struct folio *folio, struct list_head *list);
04bac040 166int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
e591ef7d
NH
167int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
168 bool *migratable_cleared);
ea8e72f4 169void folio_putback_active_hugetlb(struct folio *folio);
345c62d1 170void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
72e2936c 171void hugetlb_fix_reserve_counts(struct inode *inode);
c672c7f2 172extern struct mutex *hugetlb_fault_mutex_table;
188b04a7 173u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
1da177e4 174
aec44e0f
PX
175pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
176 unsigned long addr, pud_t *pud);
24334e78
PX
177bool hugetlbfs_pagecache_present(struct hstate *h,
178 struct vm_area_struct *vma,
179 unsigned long address);
3212b535 180
6e8cda4c 181struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
c0d0381a 182
1da177e4 183extern int sysctl_hugetlb_shm_group;
b78b27d0 184extern struct list_head huge_boot_pages[MAX_NUMNODES];
1da177e4 185
63551ae0
DG
186/* arch callbacks */
187
f7243924
HD
188#ifndef CONFIG_HIGHPTE
189/*
190 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
191 * which may go down to the lowest PTE level in their huge_pte_offset() and
192 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
193 */
194static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
195{
196 return pte_offset_kernel(pmd, address);
197}
198static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
199 unsigned long address)
200{
201 return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
202}
203#endif
204
aec44e0f 205pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
a5516438 206 unsigned long addr, unsigned long sz);
fe7d4c6d
PX
207/*
208 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
209 * Returns the pte_t* if found, or NULL if the address is not mapped.
210 *
9c67a207
PX
211 * IMPORTANT: we should normally not directly call this function, instead
212 * this is only a common interface to implement arch-specific
213 * walker. Please use hugetlb_walk() instead, because that will attempt to
214 * verify the locking for you.
215 *
fe7d4c6d
PX
216 * Since this function will walk all the pgtable pages (including not only
217 * high-level pgtable page, but also PUD entry that can be unshared
218 * concurrently for VM_SHARED), the caller of this function should be
219 * responsible of its thread safety. One can follow this rule:
220 *
221 * (1) For private mappings: pmd unsharing is not possible, so holding the
222 * mmap_lock for either read or write is sufficient. Most callers
223 * already hold the mmap_lock, so normally, no special action is
224 * required.
225 *
226 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
227 * pgtable page can go away from under us! It can be done by a pmd
228 * unshare with a follow up munmap() on the other process), then we
229 * need either:
230 *
231 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
232 * won't happen upon the range (it also makes sure the pte_t we
233 * read is the right and stable one), or,
234 *
235 * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
236 * sure even if unshare happened the racy unmap() will wait until
237 * i_mmap_rwsem is released.
238 *
239 * Option (2.1) is the safest, which guarantees pte stability from pmd
240 * sharing pov, until the vma lock released. Option (2.2) doesn't protect
241 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
242 * access.
243 */
7868a208
PA
244pte_t *huge_pte_offset(struct mm_struct *mm,
245 unsigned long addr, unsigned long sz);
e95a9851 246unsigned long hugetlb_mask_last_page(struct hstate *h);
34ae204f 247int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
4ddb4d91 248 unsigned long addr, pte_t *ptep);
017b1660
MK
249void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
250 unsigned long *start, unsigned long *end);
faaa5b62 251
2820b0f0
RR
252extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
253 unsigned long *begin, unsigned long *end);
254extern void __hugetlb_zap_end(struct vm_area_struct *vma,
255 struct zap_details *details);
256
257static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
258 unsigned long *start, unsigned long *end)
259{
260 if (is_vm_hugetlb_page(vma))
261 __hugetlb_zap_begin(vma, start, end);
262}
263
264static inline void hugetlb_zap_end(struct vm_area_struct *vma,
265 struct zap_details *details)
266{
267 if (is_vm_hugetlb_page(vma))
268 __hugetlb_zap_end(vma, details);
269}
270
8d9bfb26
MK
271void hugetlb_vma_lock_read(struct vm_area_struct *vma);
272void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
273void hugetlb_vma_lock_write(struct vm_area_struct *vma);
274void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
275int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
276void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
277void hugetlb_vma_lock_release(struct kref *kref);
a79390f5 278long hugetlb_change_protection(struct vm_area_struct *vma,
5a90d5a1
PX
279 unsigned long address, unsigned long end, pgprot_t newprot,
280 unsigned long cp_flags);
d5ed7444 281bool is_hugetlb_entry_migration(pte_t pte);
52526ca7 282bool is_hugetlb_entry_hwpoisoned(pte_t pte);
6dfeaff9 283void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
ab5ac90a 284
1da177e4
LT
285#else /* !CONFIG_HUGETLB_PAGE */
286
8d9bfb26 287static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
a1e78772
MG
288{
289}
290
550a7d60
MA
291static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
292{
293}
294
1da177e4
LT
295static inline unsigned long hugetlb_total_pages(void)
296{
297 return 0;
298}
299
6e8cda4c
MWO
300static inline struct address_space *hugetlb_folio_mapping_lock_write(
301 struct folio *folio)
c0d0381a
MK
302{
303 return NULL;
304}
305
34ae204f
MK
306static inline int huge_pmd_unshare(struct mm_struct *mm,
307 struct vm_area_struct *vma,
4ddb4d91 308 unsigned long addr, pte_t *ptep)
017b1660
MK
309{
310 return 0;
311}
312
313static inline void adjust_range_if_pmd_sharing_possible(
314 struct vm_area_struct *vma,
315 unsigned long *start, unsigned long *end)
316{
317}
318
2820b0f0
RR
319static inline void hugetlb_zap_begin(
320 struct vm_area_struct *vma,
321 unsigned long *start, unsigned long *end)
322{
323}
324
325static inline void hugetlb_zap_end(
326 struct vm_area_struct *vma,
327 struct zap_details *details)
328{
329}
330
1f9dccb2 331static inline int copy_hugetlb_page_range(struct mm_struct *dst,
bc70fbf2
PX
332 struct mm_struct *src,
333 struct vm_area_struct *dst_vma,
334 struct vm_area_struct *src_vma)
1f9dccb2
MK
335{
336 BUG();
337 return 0;
338}
339
550a7d60
MA
340static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
341 struct vm_area_struct *new_vma,
342 unsigned long old_addr,
343 unsigned long new_addr,
344 unsigned long len)
345{
346 BUG();
347 return 0;
348}
349
e1759c21
AD
350static inline void hugetlb_report_meminfo(struct seq_file *m)
351{
352}
1f9dccb2 353
7981593b 354static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
1f9dccb2
MK
355{
356 return 0;
357}
358
dcadcf1c 359static inline void hugetlb_show_meminfo_node(int nid)
949f7ec5
DR
360{
361}
1f9dccb2 362
1f9dccb2
MK
363static inline int prepare_hugepage_range(struct file *file,
364 unsigned long addr, unsigned long len)
365{
366 return -EINVAL;
367}
368
8d9bfb26
MK
369static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
370{
371}
372
373static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
374{
375}
376
377static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
378{
379}
380
381static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
382{
383}
384
385static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
386{
387 return 1;
388}
389
390static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
391{
392}
393
1f9dccb2
MK
394static inline int is_hugepage_only_range(struct mm_struct *mm,
395 unsigned long addr, unsigned long len)
396{
397 return 0;
398}
399
400static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
401 unsigned long addr, unsigned long end,
402 unsigned long floor, unsigned long ceiling)
403{
404 BUG();
405}
406
714c1891 407#ifdef CONFIG_USERFAULTFD
61c50040 408static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
a734991c
AR
409 struct vm_area_struct *dst_vma,
410 unsigned long dst_addr,
411 unsigned long src_addr,
d9712937 412 uffd_flags_t flags,
0169fd51 413 struct folio **foliop)
1f9dccb2
MK
414{
415 BUG();
416 return 0;
417}
714c1891 418#endif /* CONFIG_USERFAULTFD */
1f9dccb2
MK
419
420static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
421 unsigned long sz)
422{
423 return NULL;
424}
24669e58 425
9747b9e9 426static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
f40386a4 427{
9747b9e9 428 return false;
f40386a4 429}
1da177e4 430
04bac040 431static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
25182f05
NH
432{
433 return 0;
434}
435
e591ef7d
NH
436static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
437 bool *migratable_cleared)
405ce051
NH
438{
439 return 0;
440}
441
ea8e72f4 442static inline void folio_putback_active_hugetlb(struct folio *folio)
1f9dccb2
MK
443{
444}
445
345c62d1
SK
446static inline void move_hugetlb_state(struct folio *old_folio,
447 struct folio *new_folio, int reason)
1f9dccb2
MK
448{
449}
450
a79390f5 451static inline long hugetlb_change_protection(
1f9dccb2 452 struct vm_area_struct *vma, unsigned long address,
5a90d5a1
PX
453 unsigned long end, pgprot_t newprot,
454 unsigned long cp_flags)
7da4d641
PZ
455{
456 return 0;
457}
8f860591 458
2820b0f0 459static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
d833352a 460 struct vm_area_struct *vma, unsigned long start,
05e90bd0
PX
461 unsigned long end, struct page *ref_page,
462 zap_flags_t zap_flags)
d833352a
MG
463{
464 BUG();
465}
466
a953e772 467static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
1f9dccb2
MK
468 struct vm_area_struct *vma, unsigned long address,
469 unsigned int flags)
a953e772
SJ
470{
471 BUG();
472 return 0;
473}
24669e58 474
6dfeaff9
PX
475static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
476
1da177e4 477#endif /* !CONFIG_HUGETLB_PAGE */
f30c59e9
AK
478
479#ifndef pgd_write
480static inline int pgd_write(pgd_t pgd)
481{
482 BUG();
483 return 0;
484}
485#endif
486
4e52780d
EM
487#define HUGETLB_ANON_FILE "anon_hugepage"
488
6bfde05b
EM
489enum {
490 /*
491 * The file will be used as an shm file so shmfs accounting rules
492 * apply
493 */
494 HUGETLB_SHMFS_INODE = 1,
4e52780d
EM
495 /*
496 * The file is being created on the internal vfs mount and shmfs
497 * accounting rules do not apply
498 */
499 HUGETLB_ANONHUGE_INODE = 2,
6bfde05b
EM
500};
501
1da177e4 502#ifdef CONFIG_HUGETLBFS
1da177e4 503struct hugetlbfs_sb_info {
1da177e4
LT
504 long max_inodes; /* inodes allowed */
505 long free_inodes; /* inodes free */
506 spinlock_t stat_lock;
a137e1cc 507 struct hstate *hstate;
90481622 508 struct hugepage_subpool *spool;
4a25220d
DH
509 kuid_t uid;
510 kgid_t gid;
511 umode_t mode;
1da177e4
LT
512};
513
1da177e4
LT
514static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
515{
516 return sb->s_fs_info;
517}
518
da14c1e5 519struct hugetlbfs_inode_info {
da14c1e5 520 struct inode vfs_inode;
ff62a342 521 unsigned int seals;
da14c1e5
MAL
522};
523
524static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
525{
526 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
527}
528
f0f37e2f 529extern const struct vm_operations_struct hugetlb_vm_ops;
af73e4d9 530struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
83c1fd76 531 int creat_flags, int page_size_log);
1da177e4 532
886b94d2 533static inline bool is_file_hugepages(const struct file *file)
1da177e4 534{
886b94d2 535 return file->f_op->fop_flags & FOP_HUGE_PAGES;
1da177e4
LT
536}
537
bb297bb2
CL
538static inline struct hstate *hstate_inode(struct inode *i)
539{
540 return HUGETLBFS_SB(i->i_sb)->hstate;
541}
1da177e4
LT
542#else /* !CONFIG_HUGETLBFS */
543
719ff321 544#define is_file_hugepages(file) false
40716e29 545static inline struct file *
af73e4d9 546hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
83c1fd76 547 int creat_flags, int page_size_log)
e9ea0e2d
AM
548{
549 return ERR_PTR(-ENOSYS);
550}
1da177e4 551
bb297bb2
CL
552static inline struct hstate *hstate_inode(struct inode *i)
553{
554 return NULL;
555}
1da177e4
LT
556#endif /* !CONFIG_HUGETLBFS */
557
d2ba27e8
AB
558#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
559unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
560 unsigned long len, unsigned long pgoff,
561 unsigned long flags);
562#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
563
4b439e25
CL
564unsigned long
565generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
566 unsigned long len, unsigned long pgoff,
567 unsigned long flags);
568
d6995da3
MK
569/*
570 * huegtlb page specific state flags. These flags are located in page.private
571 * of the hugetlb head page. Functions created via the below macros should be
572 * used to manipulate these flags.
573 *
574 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
575 * allocation time. Cleared when page is fully instantiated. Free
576 * routine checks flag to restore a reservation on error paths.
d95c0337
MK
577 * Synchronization: Examined or modified by code that knows it has
578 * the only reference to page. i.e. After allocation but before use
579 * or when the page is being freed.
8f251a3d
MK
580 * HPG_migratable - Set after a newly allocated page is added to the page
581 * cache and/or page tables. Indicates the page is a candidate for
582 * migration.
d95c0337
MK
583 * Synchronization: Initially set after new page allocation with no
584 * locking. When examined and modified during migration processing
585 * (isolate, migrate, putback) the hugetlb_lock is held.
161df60e 586 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
9157c311
MK
587 * allocator. Typically used for migration target pages when no pages
588 * are available in the pool. The hugetlb free page path will
589 * immediately free pages with this flag set to the buddy allocator.
d95c0337
MK
590 * Synchronization: Can be set after huge page allocation from buddy when
591 * code knows it has only reference. All other examinations and
592 * modifications require hugetlb_lock.
6c037149 593 * HPG_freed - Set when page is on the free lists.
d95c0337 594 * Synchronization: hugetlb_lock held for examination and modification.
ad2fa371 595 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
161df60e
NH
596 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
597 * that is not tracked by raw_hwp_page list.
d6995da3
MK
598 */
599enum hugetlb_page_flags {
600 HPG_restore_reserve = 0,
8f251a3d 601 HPG_migratable,
9157c311 602 HPG_temporary,
6c037149 603 HPG_freed,
ad2fa371 604 HPG_vmemmap_optimized,
161df60e 605 HPG_raw_hwp_unreliable,
d6995da3
MK
606 __NR_HPAGEFLAGS,
607};
608
609/*
610 * Macros to create test, set and clear function definitions for
611 * hugetlb specific page flags.
612 */
613#ifdef CONFIG_HUGETLB_PAGE
614#define TESTHPAGEFLAG(uname, flname) \
d03c376d
SK
615static __always_inline \
616bool folio_test_hugetlb_##flname(struct folio *folio) \
617 { void *private = &folio->private; \
618 return test_bit(HPG_##flname, private); \
619 } \
d6995da3
MK
620static inline int HPage##uname(struct page *page) \
621 { return test_bit(HPG_##flname, &(page->private)); }
622
623#define SETHPAGEFLAG(uname, flname) \
d03c376d
SK
624static __always_inline \
625void folio_set_hugetlb_##flname(struct folio *folio) \
626 { void *private = &folio->private; \
627 set_bit(HPG_##flname, private); \
628 } \
d6995da3
MK
629static inline void SetHPage##uname(struct page *page) \
630 { set_bit(HPG_##flname, &(page->private)); }
631
632#define CLEARHPAGEFLAG(uname, flname) \
d03c376d
SK
633static __always_inline \
634void folio_clear_hugetlb_##flname(struct folio *folio) \
635 { void *private = &folio->private; \
636 clear_bit(HPG_##flname, private); \
637 } \
d6995da3
MK
638static inline void ClearHPage##uname(struct page *page) \
639 { clear_bit(HPG_##flname, &(page->private)); }
640#else
641#define TESTHPAGEFLAG(uname, flname) \
d03c376d
SK
642static inline bool \
643folio_test_hugetlb_##flname(struct folio *folio) \
644 { return 0; } \
d6995da3
MK
645static inline int HPage##uname(struct page *page) \
646 { return 0; }
647
648#define SETHPAGEFLAG(uname, flname) \
d03c376d
SK
649static inline void \
650folio_set_hugetlb_##flname(struct folio *folio) \
651 { } \
d6995da3
MK
652static inline void SetHPage##uname(struct page *page) \
653 { }
654
655#define CLEARHPAGEFLAG(uname, flname) \
d03c376d
SK
656static inline void \
657folio_clear_hugetlb_##flname(struct folio *folio) \
658 { } \
d6995da3
MK
659static inline void ClearHPage##uname(struct page *page) \
660 { }
661#endif
662
663#define HPAGEFLAG(uname, flname) \
664 TESTHPAGEFLAG(uname, flname) \
665 SETHPAGEFLAG(uname, flname) \
666 CLEARHPAGEFLAG(uname, flname) \
667
668/*
669 * Create functions associated with hugetlb page flags
670 */
671HPAGEFLAG(RestoreReserve, restore_reserve)
8f251a3d 672HPAGEFLAG(Migratable, migratable)
9157c311 673HPAGEFLAG(Temporary, temporary)
6c037149 674HPAGEFLAG(Freed, freed)
ad2fa371 675HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
161df60e 676HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
d6995da3 677
a5516438
AK
678#ifdef CONFIG_HUGETLB_PAGE
679
a3437870 680#define HSTATE_NAME_LEN 32
a5516438
AK
681/* Defines one hugetlb page size */
682struct hstate {
29383967 683 struct mutex resize_lock;
e8c5c824
LS
684 int next_nid_to_alloc;
685 int next_nid_to_free;
a5516438 686 unsigned int order;
79dfc695 687 unsigned int demote_order;
a5516438
AK
688 unsigned long mask;
689 unsigned long max_huge_pages;
690 unsigned long nr_huge_pages;
691 unsigned long free_huge_pages;
692 unsigned long resv_huge_pages;
693 unsigned long surplus_huge_pages;
694 unsigned long nr_overcommit_huge_pages;
0edaecfa 695 struct list_head hugepage_activelist;
a5516438 696 struct list_head hugepage_freelists[MAX_NUMNODES];
b5389086 697 unsigned int max_huge_pages_node[MAX_NUMNODES];
a5516438
AK
698 unsigned int nr_huge_pages_node[MAX_NUMNODES];
699 unsigned int free_huge_pages_node[MAX_NUMNODES];
700 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
abb8206c
AK
701#ifdef CONFIG_CGROUP_HUGETLB
702 /* cgroup control files */
f4776199
MA
703 struct cftype cgroup_files_dfl[8];
704 struct cftype cgroup_files_legacy[10];
abb8206c 705#endif
a3437870 706 char name[HSTATE_NAME_LEN];
a5516438
AK
707};
708
53ba51d2
JT
709struct huge_bootmem_page {
710 struct list_head list;
711 struct hstate *hstate;
712};
713
ae37c7ff 714int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
d0ce0e47 715struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
70c3547e 716 unsigned long addr, int avoid_reserve);
e37d3e83 717struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
42d0c3fb
BW
718 nodemask_t *nmask, gfp_t gfp_mask,
719 bool allow_alloc_fallback);
9b91c0e2 720int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
ab76ad54 721 pgoff_t idx);
846be085 722void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
d2d7bb44 723 unsigned long address, struct folio *folio);
bf50bab2 724
53ba51d2 725/* arch callback */
b5389086
ZY
726int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
727int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
728bool __init hugetlb_node_alloc_supported(void);
53ba51d2 729
e5ff2159 730void __init hugetlb_add_hstate(unsigned order);
ae94da89 731bool __init arch_hugetlb_valid_size(unsigned long size);
e5ff2159
AK
732struct hstate *size_to_hstate(unsigned long size);
733
734#ifndef HUGE_MAX_HSTATE
735#define HUGE_MAX_HSTATE 1
736#endif
737
738extern struct hstate hstates[HUGE_MAX_HSTATE];
739extern unsigned int default_hstate_idx;
740
741#define default_hstate (hstates[default_hstate_idx])
a5516438 742
149562f7
SK
743static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
744{
dad6a5eb 745 return folio->_hugetlb_subpool;
149562f7
SK
746}
747
149562f7
SK
748static inline void hugetlb_set_folio_subpool(struct folio *folio,
749 struct hugepage_subpool *subpool)
750{
dad6a5eb 751 folio->_hugetlb_subpool = subpool;
d6995da3
MK
752}
753
a5516438
AK
754static inline struct hstate *hstate_file(struct file *f)
755{
496ad9aa 756 return hstate_inode(file_inode(f));
a5516438
AK
757}
758
af73e4d9
NH
759static inline struct hstate *hstate_sizelog(int page_size_log)
760{
761 if (!page_size_log)
762 return &default_hstate;
97ad2be1 763
ec4288fe
MK
764 if (page_size_log < BITS_PER_LONG)
765 return size_to_hstate(1UL << page_size_log);
766
767 return NULL;
af73e4d9
NH
768}
769
a137e1cc 770static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
a5516438 771{
a137e1cc 772 return hstate_file(vma->vm_file);
a5516438
AK
773}
774
6213834c 775static inline unsigned long huge_page_size(const struct hstate *h)
a5516438
AK
776{
777 return (unsigned long)PAGE_SIZE << h->order;
778}
779
08fba699
MG
780extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
781
3340289d
MG
782extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
783
a5516438
AK
784static inline unsigned long huge_page_mask(struct hstate *h)
785{
786 return h->mask;
787}
788
789static inline unsigned int huge_page_order(struct hstate *h)
790{
791 return h->order;
792}
793
794static inline unsigned huge_page_shift(struct hstate *h)
795{
796 return h->order + PAGE_SHIFT;
797}
798
bae7f4ae
LC
799static inline bool hstate_is_gigantic(struct hstate *h)
800{
5e0a760b 801 return huge_page_order(h) > MAX_PAGE_ORDER;
bae7f4ae
LC
802}
803
6213834c 804static inline unsigned int pages_per_huge_page(const struct hstate *h)
a5516438
AK
805{
806 return 1 << h->order;
807}
808
809static inline unsigned int blocks_per_huge_page(struct hstate *h)
810{
811 return huge_page_size(h) / 512;
812}
813
a08c7193
SK
814static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
815 struct address_space *mapping, pgoff_t idx)
816{
817 return filemap_lock_folio(mapping, idx << huge_page_order(h));
818}
819
a5516438
AK
820#include <asm/hugetlb.h>
821
b0eae98c
AK
822#ifndef is_hugepage_only_range
823static inline int is_hugepage_only_range(struct mm_struct *mm,
824 unsigned long addr, unsigned long len)
825{
826 return 0;
827}
828#define is_hugepage_only_range is_hugepage_only_range
829#endif
830
51718e25
MWO
831#ifndef arch_clear_hugetlb_flags
832static inline void arch_clear_hugetlb_flags(struct folio *folio) { }
833#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
5be99343
AK
834#endif
835
d9ed9faa 836#ifndef arch_make_huge_pte
79c1c594
CL
837static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
838 vm_flags_t flags)
d9ed9faa 839{
16785bd7 840 return pte_mkhuge(entry);
d9ed9faa
CM
841}
842#endif
843
e51da3a9
SK
844static inline struct hstate *folio_hstate(struct folio *folio)
845{
846 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
847 return size_to_hstate(folio_size(folio));
848}
849
aa50d3a7
AK
850static inline unsigned hstate_index_to_shift(unsigned index)
851{
852 return hstates[index].order + PAGE_SHIFT;
853}
854
972dc4de
AK
855static inline int hstate_index(struct hstate *h)
856{
857 return h - hstates;
858}
859
54fa49b2 860int dissolve_free_hugetlb_folio(struct folio *folio);
d199483c 861int dissolve_free_hugetlb_folios(unsigned long start_pfn,
082d5b6b 862 unsigned long end_pfn);
e693de18 863
161df60e 864#ifdef CONFIG_MEMORY_FAILURE
2ff6cece 865extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
161df60e 866#else
2ff6cece 867static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
161df60e
NH
868{
869}
870#endif
871
c177c81e 872#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
e693de18
AK
873#ifndef arch_hugetlb_migration_supported
874static inline bool arch_hugetlb_migration_supported(struct hstate *h)
875{
94310cbc 876 if ((huge_page_shift(h) == PMD_SHIFT) ||
9b553bf5
AK
877 (huge_page_shift(h) == PUD_SHIFT) ||
878 (huge_page_shift(h) == PGDIR_SHIFT))
94310cbc
AK
879 return true;
880 else
881 return false;
e693de18
AK
882}
883#endif
c177c81e 884#else
e693de18
AK
885static inline bool arch_hugetlb_migration_supported(struct hstate *h)
886{
d70c17d4 887 return false;
e693de18 888}
c177c81e 889#endif
e693de18
AK
890
891static inline bool hugepage_migration_supported(struct hstate *h)
892{
893 return arch_hugetlb_migration_supported(h);
83467efb 894}
c8721bbb 895
7ed2c31d
AK
896/*
897 * Movability check is different as compared to migration check.
898 * It determines whether or not a huge page should be placed on
899 * movable zone or not. Movability of any huge page should be
900 * required only if huge page size is supported for migration.
06c88398 901 * There won't be any reason for the huge page to be movable if
7ed2c31d
AK
902 * it is not migratable to start with. Also the size of the huge
903 * page should be large enough to be placed under a movable zone
904 * and still feasible enough to be migratable. Just the presence
905 * in movable zone does not make the migration feasible.
906 *
907 * So even though large huge page sizes like the gigantic ones
908 * are migratable they should not be movable because its not
909 * feasible to migrate them from movable zone.
910 */
911static inline bool hugepage_movable_supported(struct hstate *h)
912{
913 if (!hugepage_migration_supported(h))
914 return false;
915
916 if (hstate_is_gigantic(h))
917 return false;
918 return true;
919}
920
d92bbc27
JK
921/* Movability of hugepages depends on migration support. */
922static inline gfp_t htlb_alloc_mask(struct hstate *h)
923{
924 if (hugepage_movable_supported(h))
925 return GFP_HIGHUSER_MOVABLE;
926 else
927 return GFP_HIGHUSER;
928}
929
19fc7bed
JK
930static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
931{
932 gfp_t modified_mask = htlb_alloc_mask(h);
933
934 /* Some callers might want to enforce node */
935 modified_mask |= (gfp_mask & __GFP_THISNODE);
936
41b4dc14
JK
937 modified_mask |= (gfp_mask & __GFP_NOWARN);
938
19fc7bed
JK
939 return modified_mask;
940}
941
42d0c3fb
BW
942static inline bool htlb_allow_alloc_fallback(int reason)
943{
944 bool allowed_fallback = false;
945
946 /*
947 * Note: the memory offline, memory failure and migration syscalls will
948 * be allowed to fallback to other nodes due to lack of a better chioce,
949 * that might break the per-node hugetlb pool. While other cases will
950 * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool.
951 */
952 switch (reason) {
953 case MR_MEMORY_HOTPLUG:
954 case MR_MEMORY_FAILURE:
955 case MR_SYSCALL:
956 case MR_MEMPOLICY_MBIND:
957 allowed_fallback = true;
958 break;
959 default:
960 break;
961 }
962
963 return allowed_fallback;
964}
965
cb900f41
KS
966static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
967 struct mm_struct *mm, pte_t *pte)
968{
969 if (huge_page_size(h) == PMD_SIZE)
970 return pmd_lockptr(mm, (pmd_t *) pte);
971 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
972 return &mm->page_table_lock;
973}
974
2531c8cf
DD
975#ifndef hugepages_supported
976/*
977 * Some platform decide whether they support huge pages at boot
978 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
979 * when there is no such support
980 */
981#define hugepages_supported() (HPAGE_SHIFT != 0)
982#endif
457c1b27 983
5d317b2b
NH
984void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
985
13db8c50
LZ
986static inline void hugetlb_count_init(struct mm_struct *mm)
987{
988 atomic_long_set(&mm->hugetlb_usage, 0);
989}
990
5d317b2b
NH
991static inline void hugetlb_count_add(long l, struct mm_struct *mm)
992{
993 atomic_long_add(l, &mm->hugetlb_usage);
994}
995
996static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
997{
998 atomic_long_sub(l, &mm->hugetlb_usage);
999}
e5251fd4 1000
023bdd00
AK
1001#ifndef huge_ptep_modify_prot_start
1002#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
1003static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
1004 unsigned long addr, pte_t *ptep)
1005{
1006 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
1007}
1008#endif
1009
1010#ifndef huge_ptep_modify_prot_commit
1011#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
1012static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
1013 unsigned long addr, pte_t *ptep,
1014 pte_t old_pte, pte_t pte)
1015{
935d4f0c
RR
1016 unsigned long psize = huge_page_size(hstate_vma(vma));
1017
1018 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
023bdd00
AK
1019}
1020#endif
1021
a4a00b45
MS
1022#ifdef CONFIG_NUMA
1023void hugetlb_register_node(struct node *node);
1024void hugetlb_unregister_node(struct node *node);
1025#endif
1026
b79f8eb4
JY
1027/*
1028 * Check if a given raw @page in a hugepage is HWPOISON.
1029 */
1030bool is_raw_hwpoison_page_in_hugepage(struct page *page);
1031
af73e4d9 1032#else /* CONFIG_HUGETLB_PAGE */
a5516438 1033struct hstate {};
442a5a9a 1034
345c62d1
SK
1035static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
1036{
1037 return NULL;
1038}
1039
a08c7193
SK
1040static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
1041 struct address_space *mapping, pgoff_t idx)
1042{
1043 return NULL;
1044}
1045
ae37c7ff
OS
1046static inline int isolate_or_dissolve_huge_page(struct page *page,
1047 struct list_head *list)
369fa227
OS
1048{
1049 return -ENOMEM;
1050}
1051
d0ce0e47 1052static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
442a5a9a
JG
1053 unsigned long addr,
1054 int avoid_reserve)
1055{
1056 return NULL;
1057}
1058
e37d3e83
SK
1059static inline struct folio *
1060alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
42d0c3fb
BW
1061 nodemask_t *nmask, gfp_t gfp_mask,
1062 bool allow_alloc_fallback)
442a5a9a
JG
1063{
1064 return NULL;
1065}
1066
442a5a9a
JG
1067static inline int __alloc_bootmem_huge_page(struct hstate *h)
1068{
1069 return 0;
1070}
1071
1072static inline struct hstate *hstate_file(struct file *f)
1073{
1074 return NULL;
1075}
1076
1077static inline struct hstate *hstate_sizelog(int page_size_log)
1078{
1079 return NULL;
1080}
1081
1082static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1083{
1084 return NULL;
442a5a9a
JG
1085}
1086
e51da3a9
SK
1087static inline struct hstate *folio_hstate(struct folio *folio)
1088{
1089 return NULL;
1090}
1091
2aff7a47
MWO
1092static inline struct hstate *size_to_hstate(unsigned long size)
1093{
1094 return NULL;
1095}
1096
442a5a9a
JG
1097static inline unsigned long huge_page_size(struct hstate *h)
1098{
1099 return PAGE_SIZE;
1100}
1101
1102static inline unsigned long huge_page_mask(struct hstate *h)
1103{
1104 return PAGE_MASK;
1105}
1106
1107static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1108{
1109 return PAGE_SIZE;
1110}
1111
1112static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1113{
1114 return PAGE_SIZE;
1115}
1116
1117static inline unsigned int huge_page_order(struct hstate *h)
1118{
1119 return 0;
1120}
1121
1122static inline unsigned int huge_page_shift(struct hstate *h)
1123{
1124 return PAGE_SHIFT;
1125}
1126
94310cbc
AK
1127static inline bool hstate_is_gigantic(struct hstate *h)
1128{
1129 return false;
1130}
1131
510a35d4
AR
1132static inline unsigned int pages_per_huge_page(struct hstate *h)
1133{
1134 return 1;
1135}
c3114a84
AK
1136
1137static inline unsigned hstate_index_to_shift(unsigned index)
1138{
1139 return 0;
1140}
1141
1142static inline int hstate_index(struct hstate *h)
1143{
1144 return 0;
1145}
13d60f4b 1146
54fa49b2 1147static inline int dissolve_free_hugetlb_folio(struct folio *folio)
c3114a84
AK
1148{
1149 return 0;
1150}
1151
d199483c 1152static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn,
c3114a84
AK
1153 unsigned long end_pfn)
1154{
1155 return 0;
1156}
1157
1158static inline bool hugepage_migration_supported(struct hstate *h)
1159{
1160 return false;
1161}
cb900f41 1162
7ed2c31d
AK
1163static inline bool hugepage_movable_supported(struct hstate *h)
1164{
1165 return false;
1166}
1167
d92bbc27
JK
1168static inline gfp_t htlb_alloc_mask(struct hstate *h)
1169{
1170 return 0;
1171}
1172
19fc7bed
JK
1173static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1174{
1175 return 0;
1176}
1177
42d0c3fb
BW
1178static inline bool htlb_allow_alloc_fallback(int reason)
1179{
1180 return false;
1181}
1182
cb900f41
KS
1183static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1184 struct mm_struct *mm, pte_t *pte)
1185{
1186 return &mm->page_table_lock;
1187}
5d317b2b 1188
13db8c50
LZ
1189static inline void hugetlb_count_init(struct mm_struct *mm)
1190{
1191}
1192
5d317b2b
NH
1193static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1194{
1195}
1196
1197static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1198{
1199}
e5251fd4 1200
5d4af619
BW
1201static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1202 unsigned long addr, pte_t *ptep)
1203{
c33c7948
RR
1204#ifdef CONFIG_MMU
1205 return ptep_get(ptep);
1206#else
5d4af619 1207 return *ptep;
c33c7948 1208#endif
5d4af619
BW
1209}
1210
1211static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
935d4f0c 1212 pte_t *ptep, pte_t pte, unsigned long sz)
5d4af619
BW
1213{
1214}
a4a00b45
MS
1215
1216static inline void hugetlb_register_node(struct node *node)
1217{
1218}
1219
1220static inline void hugetlb_unregister_node(struct node *node)
1221{
1222}
24334e78
PX
1223
1224static inline bool hugetlbfs_pagecache_present(
1225 struct hstate *h, struct vm_area_struct *vma, unsigned long address)
1226{
1227 return false;
1228}
af73e4d9 1229#endif /* CONFIG_HUGETLB_PAGE */
a5516438 1230
cb900f41
KS
1231static inline spinlock_t *huge_pte_lock(struct hstate *h,
1232 struct mm_struct *mm, pte_t *pte)
1233{
1234 spinlock_t *ptl;
1235
1236 ptl = huge_pte_lockptr(h, mm, pte);
1237 spin_lock(ptl);
1238 return ptl;
1239}
1240
cf11e85f
RG
1241#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1242extern void __init hugetlb_cma_reserve(int order);
cf11e85f
RG
1243#else
1244static inline __init void hugetlb_cma_reserve(int order)
1245{
1246}
cf11e85f
RG
1247#endif
1248
3489dbb6
MK
1249#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
1250static inline bool hugetlb_pmd_shared(pte_t *pte)
1251{
1252 return page_count(virt_to_page(pte)) > 1;
1253}
1254#else
1255static inline bool hugetlb_pmd_shared(pte_t *pte)
1256{
1257 return false;
1258}
1259#endif
1260
c1991e07
PX
1261bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1262
537cf30b
PX
1263#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1264/*
1265 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1266 * implement this.
1267 */
1268#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1269#endif
1270
9c67a207
PX
1271static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
1272{
1273 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
1274}
1275
187da0f8 1276bool __vma_private_lock(struct vm_area_struct *vma);
bf491692 1277
9c67a207
PX
1278/*
1279 * Safe version of huge_pte_offset() to check the locks. See comments
1280 * above huge_pte_offset().
1281 */
1282static inline pte_t *
1283hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
1284{
1285#if defined(CONFIG_HUGETLB_PAGE) && \
1286 defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
1287 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1288
1289 /*
1290 * If pmd sharing possible, locking needed to safely walk the
1291 * hugetlb pgtables. More information can be found at the comment
1292 * above huge_pte_offset() in the same file.
1293 *
1294 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
1295 */
1296 if (__vma_shareable_lock(vma))
1297 WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
1298 !lockdep_is_held(
1299 &vma->vm_file->f_mapping->i_mmap_rwsem));
1300#endif
1301 return huge_pte_offset(vma->vm_mm, addr, sz);
1302}
1303
1da177e4 1304#endif /* _LINUX_HUGETLB_H */