1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
21 #ifndef CONFIG_ARCH_HAS_HUGEPD
22 typedef struct { unsigned long pd; } hugepd_t;
23 #define is_hugepd(hugepd) (0)
24 #define __hugepd(x) ((hugepd_t) { (x) })
27 #ifdef CONFIG_HUGETLB_PAGE
29 #include <linux/mempolicy.h>
30 #include <linux/shm.h>
31 #include <asm/tlbflush.h>
34 * For HugeTLB page, there are more metadata to save in the struct page. But
35 * the head struct page cannot meet our needs, so we have to abuse other tail
36 * struct page to store the metadata. In order to avoid conflicts caused by
37 * subsequent use of more tail struct pages, we gather these discrete indexes
38 * of tail struct page here.
41 SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
42 #ifdef CONFIG_CGROUP_HUGETLB
43 SUBPAGE_INDEX_CGROUP, /* reuse page->private */
44 SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
45 __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
47 #ifdef CONFIG_MEMORY_FAILURE
48 SUBPAGE_INDEX_HWPOISON,
53 struct hugepage_subpool {
56 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
57 long used_hpages; /* Used count against maximum, includes */
58 /* both allocated and reserved pages. */
59 struct hstate *hstate;
60 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
61 long rsv_hpages; /* Pages reserved against global pool to */
62 /* satisfy minimum size. */
68 struct list_head regions;
69 long adds_in_progress;
70 struct list_head region_cache;
71 long region_cache_count;
72 #ifdef CONFIG_CGROUP_HUGETLB
74 * On private mappings, the counter to uncharge reservations is stored
75 * here. If these fields are 0, then either the mapping is shared, or
76 * cgroup accounting is disabled for this resv_map.
78 struct page_counter *reservation_counter;
79 unsigned long pages_per_hpage;
80 struct cgroup_subsys_state *css;
85 * Region tracking -- allows tracking of reservations and instantiated pages
86 * across the pages in a mapping.
88 * The region data structures are embedded into a resv_map and protected
89 * by a resv_map's lock. The set of regions within the resv_map represent
90 * reservations for huge pages, or huge pages that have already been
91 * instantiated within the map. The from and to elements are huge page
92 * indices into the associated mapping. from indicates the starting index
93 * of the region. to represents the first index past the end of the region.
95 * For example, a file region structure with from == 0 and to == 4 represents
96 * four huge pages in a mapping. It is important to note that the to element
97 * represents the first element past the end of the region. This is used in
98 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
100 * Interval notation of the form [from, to) will be used to indicate that
101 * the endpoint from is inclusive and to is exclusive.
104 struct list_head link;
107 #ifdef CONFIG_CGROUP_HUGETLB
109 * On shared mappings, each reserved region appears as a struct
110 * file_region in resv_map. These fields hold the info needed to
111 * uncharge each reservation.
113 struct page_counter *reservation_counter;
114 struct cgroup_subsys_state *css;
118 struct hugetlb_vma_lock {
120 struct rw_semaphore rw_sema;
121 struct vm_area_struct *vma;
124 extern struct resv_map *resv_map_alloc(void);
125 void resv_map_release(struct kref *ref);
127 extern spinlock_t hugetlb_lock;
128 extern int hugetlb_max_hstate __read_mostly;
129 #define for_each_hstate(h) \
130 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
132 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
134 void hugepage_put_subpool(struct hugepage_subpool *spool);
136 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
137 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
138 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
139 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
141 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
143 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
146 int move_hugetlb_page_tables(struct vm_area_struct *vma,
147 struct vm_area_struct *new_vma,
148 unsigned long old_addr, unsigned long new_addr,
150 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
151 struct vm_area_struct *, struct vm_area_struct *);
152 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
153 unsigned long address, unsigned int flags);
154 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
155 struct page **, struct vm_area_struct **,
156 unsigned long *, unsigned long *, long, unsigned int,
158 void unmap_hugepage_range(struct vm_area_struct *,
159 unsigned long, unsigned long, struct page *,
161 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
162 struct vm_area_struct *vma,
163 unsigned long start, unsigned long end,
164 struct page *ref_page, zap_flags_t zap_flags);
165 void hugetlb_report_meminfo(struct seq_file *);
166 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
167 void hugetlb_show_meminfo_node(int nid);
168 unsigned long hugetlb_total_pages(void);
169 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
170 unsigned long address, unsigned int flags);
171 #ifdef CONFIG_USERFAULTFD
172 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
173 struct vm_area_struct *dst_vma,
174 unsigned long dst_addr,
175 unsigned long src_addr,
176 enum mcopy_atomic_mode mode,
179 #endif /* CONFIG_USERFAULTFD */
180 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
181 struct vm_area_struct *vma,
182 vm_flags_t vm_flags);
183 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
185 int isolate_hugetlb(struct page *page, struct list_head *list);
186 int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison);
187 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
188 bool *migratable_cleared);
189 void putback_active_hugepage(struct page *page);
190 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
191 void free_huge_page(struct page *page);
192 void hugetlb_fix_reserve_counts(struct inode *inode);
193 extern struct mutex *hugetlb_fault_mutex_table;
194 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
196 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
197 unsigned long addr, pud_t *pud);
199 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
201 extern int sysctl_hugetlb_shm_group;
202 extern struct list_head huge_boot_pages;
206 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
207 unsigned long addr, unsigned long sz);
208 pte_t *huge_pte_offset(struct mm_struct *mm,
209 unsigned long addr, unsigned long sz);
210 unsigned long hugetlb_mask_last_page(struct hstate *h);
211 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
212 unsigned long addr, pte_t *ptep);
213 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
214 unsigned long *start, unsigned long *end);
216 void hugetlb_vma_lock_read(struct vm_area_struct *vma);
217 void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
218 void hugetlb_vma_lock_write(struct vm_area_struct *vma);
219 void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
220 int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
221 void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
222 void hugetlb_vma_lock_release(struct kref *kref);
224 int pmd_huge(pmd_t pmd);
225 int pud_huge(pud_t pud);
226 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
227 unsigned long address, unsigned long end, pgprot_t newprot,
228 unsigned long cp_flags);
230 bool is_hugetlb_entry_migration(pte_t pte);
231 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
233 #else /* !CONFIG_HUGETLB_PAGE */
235 static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
239 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
243 static inline unsigned long hugetlb_total_pages(void)
248 static inline struct address_space *hugetlb_page_mapping_lock_write(
254 static inline int huge_pmd_unshare(struct mm_struct *mm,
255 struct vm_area_struct *vma,
256 unsigned long addr, pte_t *ptep)
261 static inline void adjust_range_if_pmd_sharing_possible(
262 struct vm_area_struct *vma,
263 unsigned long *start, unsigned long *end)
267 static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
268 unsigned long address, unsigned int flags)
270 BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
273 static inline long follow_hugetlb_page(struct mm_struct *mm,
274 struct vm_area_struct *vma, struct page **pages,
275 struct vm_area_struct **vmas, unsigned long *position,
276 unsigned long *nr_pages, long i, unsigned int flags,
283 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
284 struct mm_struct *src,
285 struct vm_area_struct *dst_vma,
286 struct vm_area_struct *src_vma)
292 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
293 struct vm_area_struct *new_vma,
294 unsigned long old_addr,
295 unsigned long new_addr,
302 static inline void hugetlb_report_meminfo(struct seq_file *m)
306 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
311 static inline void hugetlb_show_meminfo_node(int nid)
315 static inline int prepare_hugepage_range(struct file *file,
316 unsigned long addr, unsigned long len)
321 static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
325 static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
329 static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
333 static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
337 static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
342 static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
346 static inline int pmd_huge(pmd_t pmd)
351 static inline int pud_huge(pud_t pud)
356 static inline int is_hugepage_only_range(struct mm_struct *mm,
357 unsigned long addr, unsigned long len)
362 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
363 unsigned long addr, unsigned long end,
364 unsigned long floor, unsigned long ceiling)
369 #ifdef CONFIG_USERFAULTFD
370 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
372 struct vm_area_struct *dst_vma,
373 unsigned long dst_addr,
374 unsigned long src_addr,
375 enum mcopy_atomic_mode mode,
382 #endif /* CONFIG_USERFAULTFD */
384 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
390 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
395 static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb, bool unpoison)
400 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
401 bool *migratable_cleared)
406 static inline void putback_active_hugepage(struct page *page)
410 static inline void move_hugetlb_state(struct folio *old_folio,
411 struct folio *new_folio, int reason)
415 static inline unsigned long hugetlb_change_protection(
416 struct vm_area_struct *vma, unsigned long address,
417 unsigned long end, pgprot_t newprot,
418 unsigned long cp_flags)
423 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
424 struct vm_area_struct *vma, unsigned long start,
425 unsigned long end, struct page *ref_page,
426 zap_flags_t zap_flags)
431 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
432 struct vm_area_struct *vma, unsigned long address,
439 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
441 #endif /* !CONFIG_HUGETLB_PAGE */
443 * hugepages at page global directory. If arch support
444 * hugepages at pgd level, they need to define this.
447 #define pgd_huge(x) 0
450 #define p4d_huge(x) 0
454 static inline int pgd_write(pgd_t pgd)
461 #define HUGETLB_ANON_FILE "anon_hugepage"
465 * The file will be used as an shm file so shmfs accounting rules
468 HUGETLB_SHMFS_INODE = 1,
470 * The file is being created on the internal vfs mount and shmfs
471 * accounting rules do not apply
473 HUGETLB_ANONHUGE_INODE = 2,
476 #ifdef CONFIG_HUGETLBFS
477 struct hugetlbfs_sb_info {
478 long max_inodes; /* inodes allowed */
479 long free_inodes; /* inodes free */
480 spinlock_t stat_lock;
481 struct hstate *hstate;
482 struct hugepage_subpool *spool;
488 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
490 return sb->s_fs_info;
493 struct hugetlbfs_inode_info {
494 struct shared_policy policy;
495 struct inode vfs_inode;
499 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
501 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
504 extern const struct file_operations hugetlbfs_file_operations;
505 extern const struct vm_operations_struct hugetlb_vm_ops;
506 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
507 int creat_flags, int page_size_log);
509 static inline bool is_file_hugepages(struct file *file)
511 if (file->f_op == &hugetlbfs_file_operations)
514 return is_file_shm_hugepages(file);
517 static inline struct hstate *hstate_inode(struct inode *i)
519 return HUGETLBFS_SB(i->i_sb)->hstate;
521 #else /* !CONFIG_HUGETLBFS */
523 #define is_file_hugepages(file) false
524 static inline struct file *
525 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
526 int creat_flags, int page_size_log)
528 return ERR_PTR(-ENOSYS);
531 static inline struct hstate *hstate_inode(struct inode *i)
535 #endif /* !CONFIG_HUGETLBFS */
537 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
538 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
539 unsigned long len, unsigned long pgoff,
540 unsigned long flags);
541 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
544 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
545 unsigned long len, unsigned long pgoff,
546 unsigned long flags);
549 * huegtlb page specific state flags. These flags are located in page.private
550 * of the hugetlb head page. Functions created via the below macros should be
551 * used to manipulate these flags.
553 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
554 * allocation time. Cleared when page is fully instantiated. Free
555 * routine checks flag to restore a reservation on error paths.
556 * Synchronization: Examined or modified by code that knows it has
557 * the only reference to page. i.e. After allocation but before use
558 * or when the page is being freed.
559 * HPG_migratable - Set after a newly allocated page is added to the page
560 * cache and/or page tables. Indicates the page is a candidate for
562 * Synchronization: Initially set after new page allocation with no
563 * locking. When examined and modified during migration processing
564 * (isolate, migrate, putback) the hugetlb_lock is held.
565 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
566 * allocator. Typically used for migration target pages when no pages
567 * are available in the pool. The hugetlb free page path will
568 * immediately free pages with this flag set to the buddy allocator.
569 * Synchronization: Can be set after huge page allocation from buddy when
570 * code knows it has only reference. All other examinations and
571 * modifications require hugetlb_lock.
572 * HPG_freed - Set when page is on the free lists.
573 * Synchronization: hugetlb_lock held for examination and modification.
574 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
575 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
576 * that is not tracked by raw_hwp_page list.
578 enum hugetlb_page_flags {
579 HPG_restore_reserve = 0,
583 HPG_vmemmap_optimized,
584 HPG_raw_hwp_unreliable,
589 * Macros to create test, set and clear function definitions for
590 * hugetlb specific page flags.
592 #ifdef CONFIG_HUGETLB_PAGE
593 #define TESTHPAGEFLAG(uname, flname) \
594 static __always_inline \
595 bool folio_test_hugetlb_##flname(struct folio *folio) \
596 { void *private = &folio->private; \
597 return test_bit(HPG_##flname, private); \
599 static inline int HPage##uname(struct page *page) \
600 { return test_bit(HPG_##flname, &(page->private)); }
602 #define SETHPAGEFLAG(uname, flname) \
603 static __always_inline \
604 void folio_set_hugetlb_##flname(struct folio *folio) \
605 { void *private = &folio->private; \
606 set_bit(HPG_##flname, private); \
608 static inline void SetHPage##uname(struct page *page) \
609 { set_bit(HPG_##flname, &(page->private)); }
611 #define CLEARHPAGEFLAG(uname, flname) \
612 static __always_inline \
613 void folio_clear_hugetlb_##flname(struct folio *folio) \
614 { void *private = &folio->private; \
615 clear_bit(HPG_##flname, private); \
617 static inline void ClearHPage##uname(struct page *page) \
618 { clear_bit(HPG_##flname, &(page->private)); }
620 #define TESTHPAGEFLAG(uname, flname) \
622 folio_test_hugetlb_##flname(struct folio *folio) \
624 static inline int HPage##uname(struct page *page) \
627 #define SETHPAGEFLAG(uname, flname) \
629 folio_set_hugetlb_##flname(struct folio *folio) \
631 static inline void SetHPage##uname(struct page *page) \
634 #define CLEARHPAGEFLAG(uname, flname) \
636 folio_clear_hugetlb_##flname(struct folio *folio) \
638 static inline void ClearHPage##uname(struct page *page) \
642 #define HPAGEFLAG(uname, flname) \
643 TESTHPAGEFLAG(uname, flname) \
644 SETHPAGEFLAG(uname, flname) \
645 CLEARHPAGEFLAG(uname, flname) \
648 * Create functions associated with hugetlb page flags
650 HPAGEFLAG(RestoreReserve, restore_reserve)
651 HPAGEFLAG(Migratable, migratable)
652 HPAGEFLAG(Temporary, temporary)
653 HPAGEFLAG(Freed, freed)
654 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
655 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
657 #ifdef CONFIG_HUGETLB_PAGE
659 #define HSTATE_NAME_LEN 32
660 /* Defines one hugetlb page size */
662 struct mutex resize_lock;
663 int next_nid_to_alloc;
664 int next_nid_to_free;
666 unsigned int demote_order;
668 unsigned long max_huge_pages;
669 unsigned long nr_huge_pages;
670 unsigned long free_huge_pages;
671 unsigned long resv_huge_pages;
672 unsigned long surplus_huge_pages;
673 unsigned long nr_overcommit_huge_pages;
674 struct list_head hugepage_activelist;
675 struct list_head hugepage_freelists[MAX_NUMNODES];
676 unsigned int max_huge_pages_node[MAX_NUMNODES];
677 unsigned int nr_huge_pages_node[MAX_NUMNODES];
678 unsigned int free_huge_pages_node[MAX_NUMNODES];
679 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
680 #ifdef CONFIG_CGROUP_HUGETLB
681 /* cgroup control files */
682 struct cftype cgroup_files_dfl[8];
683 struct cftype cgroup_files_legacy[10];
685 char name[HSTATE_NAME_LEN];
688 struct huge_bootmem_page {
689 struct list_head list;
690 struct hstate *hstate;
693 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
694 struct page *alloc_huge_page(struct vm_area_struct *vma,
695 unsigned long addr, int avoid_reserve);
696 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
697 nodemask_t *nmask, gfp_t gfp_mask);
698 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
699 unsigned long address);
700 int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
702 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
703 unsigned long address, struct page *page);
706 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
707 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
708 bool __init hugetlb_node_alloc_supported(void);
710 void __init hugetlb_add_hstate(unsigned order);
711 bool __init arch_hugetlb_valid_size(unsigned long size);
712 struct hstate *size_to_hstate(unsigned long size);
714 #ifndef HUGE_MAX_HSTATE
715 #define HUGE_MAX_HSTATE 1
718 extern struct hstate hstates[HUGE_MAX_HSTATE];
719 extern unsigned int default_hstate_idx;
721 #define default_hstate (hstates[default_hstate_idx])
723 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
725 return (void *)folio_get_private_1(folio);
729 * hugetlb page subpool pointer located in hpage[1].private
731 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
733 return hugetlb_folio_subpool(page_folio(hpage));
736 static inline void hugetlb_set_folio_subpool(struct folio *folio,
737 struct hugepage_subpool *subpool)
739 folio_set_private_1(folio, (unsigned long)subpool);
742 static inline void hugetlb_set_page_subpool(struct page *hpage,
743 struct hugepage_subpool *subpool)
745 hugetlb_set_folio_subpool(page_folio(hpage), subpool);
748 static inline struct hstate *hstate_file(struct file *f)
750 return hstate_inode(file_inode(f));
753 static inline struct hstate *hstate_sizelog(int page_size_log)
756 return &default_hstate;
758 return size_to_hstate(1UL << page_size_log);
761 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
763 return hstate_file(vma->vm_file);
766 static inline unsigned long huge_page_size(const struct hstate *h)
768 return (unsigned long)PAGE_SIZE << h->order;
771 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
773 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
775 static inline unsigned long huge_page_mask(struct hstate *h)
780 static inline unsigned int huge_page_order(struct hstate *h)
785 static inline unsigned huge_page_shift(struct hstate *h)
787 return h->order + PAGE_SHIFT;
790 static inline bool hstate_is_gigantic(struct hstate *h)
792 return huge_page_order(h) >= MAX_ORDER;
795 static inline unsigned int pages_per_huge_page(const struct hstate *h)
797 return 1 << h->order;
800 static inline unsigned int blocks_per_huge_page(struct hstate *h)
802 return huge_page_size(h) / 512;
805 #include <asm/hugetlb.h>
807 #ifndef is_hugepage_only_range
808 static inline int is_hugepage_only_range(struct mm_struct *mm,
809 unsigned long addr, unsigned long len)
813 #define is_hugepage_only_range is_hugepage_only_range
816 #ifndef arch_clear_hugepage_flags
817 static inline void arch_clear_hugepage_flags(struct page *page) { }
818 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
821 #ifndef arch_make_huge_pte
822 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
825 return pte_mkhuge(entry);
829 static inline struct hstate *folio_hstate(struct folio *folio)
831 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
832 return size_to_hstate(folio_size(folio));
835 static inline struct hstate *page_hstate(struct page *page)
837 return folio_hstate(page_folio(page));
840 static inline unsigned hstate_index_to_shift(unsigned index)
842 return hstates[index].order + PAGE_SHIFT;
845 static inline int hstate_index(struct hstate *h)
850 extern int dissolve_free_huge_page(struct page *page);
851 extern int dissolve_free_huge_pages(unsigned long start_pfn,
852 unsigned long end_pfn);
854 #ifdef CONFIG_MEMORY_FAILURE
855 extern void hugetlb_clear_page_hwpoison(struct page *hpage);
857 static inline void hugetlb_clear_page_hwpoison(struct page *hpage)
862 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
863 #ifndef arch_hugetlb_migration_supported
864 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
866 if ((huge_page_shift(h) == PMD_SHIFT) ||
867 (huge_page_shift(h) == PUD_SHIFT) ||
868 (huge_page_shift(h) == PGDIR_SHIFT))
875 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
881 static inline bool hugepage_migration_supported(struct hstate *h)
883 return arch_hugetlb_migration_supported(h);
887 * Movability check is different as compared to migration check.
888 * It determines whether or not a huge page should be placed on
889 * movable zone or not. Movability of any huge page should be
890 * required only if huge page size is supported for migration.
891 * There won't be any reason for the huge page to be movable if
892 * it is not migratable to start with. Also the size of the huge
893 * page should be large enough to be placed under a movable zone
894 * and still feasible enough to be migratable. Just the presence
895 * in movable zone does not make the migration feasible.
897 * So even though large huge page sizes like the gigantic ones
898 * are migratable they should not be movable because its not
899 * feasible to migrate them from movable zone.
901 static inline bool hugepage_movable_supported(struct hstate *h)
903 if (!hugepage_migration_supported(h))
906 if (hstate_is_gigantic(h))
911 /* Movability of hugepages depends on migration support. */
912 static inline gfp_t htlb_alloc_mask(struct hstate *h)
914 if (hugepage_movable_supported(h))
915 return GFP_HIGHUSER_MOVABLE;
920 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
922 gfp_t modified_mask = htlb_alloc_mask(h);
924 /* Some callers might want to enforce node */
925 modified_mask |= (gfp_mask & __GFP_THISNODE);
927 modified_mask |= (gfp_mask & __GFP_NOWARN);
929 return modified_mask;
932 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
933 struct mm_struct *mm, pte_t *pte)
935 if (huge_page_size(h) == PMD_SIZE)
936 return pmd_lockptr(mm, (pmd_t *) pte);
937 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
938 return &mm->page_table_lock;
941 #ifndef hugepages_supported
943 * Some platform decide whether they support huge pages at boot
944 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
945 * when there is no such support
947 #define hugepages_supported() (HPAGE_SHIFT != 0)
950 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
952 static inline void hugetlb_count_init(struct mm_struct *mm)
954 atomic_long_set(&mm->hugetlb_usage, 0);
957 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
959 atomic_long_add(l, &mm->hugetlb_usage);
962 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
964 atomic_long_sub(l, &mm->hugetlb_usage);
967 #ifndef huge_ptep_modify_prot_start
968 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
969 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
970 unsigned long addr, pte_t *ptep)
972 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
976 #ifndef huge_ptep_modify_prot_commit
977 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
978 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
979 unsigned long addr, pte_t *ptep,
980 pte_t old_pte, pte_t pte)
982 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
987 void hugetlb_register_node(struct node *node);
988 void hugetlb_unregister_node(struct node *node);
991 #else /* CONFIG_HUGETLB_PAGE */
994 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
999 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
1004 static inline int isolate_or_dissolve_huge_page(struct page *page,
1005 struct list_head *list)
1010 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
1017 static inline struct page *
1018 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1019 nodemask_t *nmask, gfp_t gfp_mask)
1024 static inline struct page *alloc_huge_page_vma(struct hstate *h,
1025 struct vm_area_struct *vma,
1026 unsigned long address)
1031 static inline int __alloc_bootmem_huge_page(struct hstate *h)
1036 static inline struct hstate *hstate_file(struct file *f)
1041 static inline struct hstate *hstate_sizelog(int page_size_log)
1046 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1051 static inline struct hstate *folio_hstate(struct folio *folio)
1056 static inline struct hstate *page_hstate(struct page *page)
1061 static inline struct hstate *size_to_hstate(unsigned long size)
1066 static inline unsigned long huge_page_size(struct hstate *h)
1071 static inline unsigned long huge_page_mask(struct hstate *h)
1076 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1081 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1086 static inline unsigned int huge_page_order(struct hstate *h)
1091 static inline unsigned int huge_page_shift(struct hstate *h)
1096 static inline bool hstate_is_gigantic(struct hstate *h)
1101 static inline unsigned int pages_per_huge_page(struct hstate *h)
1106 static inline unsigned hstate_index_to_shift(unsigned index)
1111 static inline int hstate_index(struct hstate *h)
1116 static inline int dissolve_free_huge_page(struct page *page)
1121 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1122 unsigned long end_pfn)
1127 static inline bool hugepage_migration_supported(struct hstate *h)
1132 static inline bool hugepage_movable_supported(struct hstate *h)
1137 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1142 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1147 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1148 struct mm_struct *mm, pte_t *pte)
1150 return &mm->page_table_lock;
1153 static inline void hugetlb_count_init(struct mm_struct *mm)
1157 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1161 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1165 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1166 unsigned long addr, pte_t *ptep)
1171 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1172 pte_t *ptep, pte_t pte)
1176 static inline void hugetlb_register_node(struct node *node)
1180 static inline void hugetlb_unregister_node(struct node *node)
1183 #endif /* CONFIG_HUGETLB_PAGE */
1185 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1186 struct mm_struct *mm, pte_t *pte)
1190 ptl = huge_pte_lockptr(h, mm, pte);
1195 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1196 extern void __init hugetlb_cma_reserve(int order);
1198 static inline __init void hugetlb_cma_reserve(int order)
1203 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1205 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1207 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1210 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1213 #endif /* _LINUX_HUGETLB_H */