1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
5 #include <linux/mm_types.h>
6 #include <linux/mmdebug.h>
8 #include <linux/hugetlb_inline.h>
9 #include <linux/cgroup.h>
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/pgtable.h>
13 #include <linux/gfp.h>
14 #include <linux/userfaultfd_k.h>
21 typedef struct { unsigned long pd; } hugepd_t;
22 #define is_hugepd(hugepd) (0)
23 #define __hugepd(x) ((hugepd_t) { (x) })
26 #ifdef CONFIG_HUGETLB_PAGE
28 #include <linux/mempolicy.h>
29 #include <linux/shm.h>
30 #include <asm/tlbflush.h>
33 * For HugeTLB page, there are more metadata to save in the struct page. But
34 * the head struct page cannot meet our needs, so we have to abuse other tail
35 * struct page to store the metadata. In order to avoid conflicts caused by
36 * subsequent use of more tail struct pages, we gather these discrete indexes
37 * of tail struct page here.
40 SUBPAGE_INDEX_SUBPOOL = 1, /* reuse page->private */
41 #ifdef CONFIG_CGROUP_HUGETLB
42 SUBPAGE_INDEX_CGROUP, /* reuse page->private */
43 SUBPAGE_INDEX_CGROUP_RSVD, /* reuse page->private */
44 __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
49 struct hugepage_subpool {
52 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
53 long used_hpages; /* Used count against maximum, includes */
54 /* both allocated and reserved pages. */
55 struct hstate *hstate;
56 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
57 long rsv_hpages; /* Pages reserved against global pool to */
58 /* satisfy minimum size. */
64 struct list_head regions;
65 long adds_in_progress;
66 struct list_head region_cache;
67 long region_cache_count;
68 #ifdef CONFIG_CGROUP_HUGETLB
70 * On private mappings, the counter to uncharge reservations is stored
71 * here. If these fields are 0, then either the mapping is shared, or
72 * cgroup accounting is disabled for this resv_map.
74 struct page_counter *reservation_counter;
75 unsigned long pages_per_hpage;
76 struct cgroup_subsys_state *css;
81 * Region tracking -- allows tracking of reservations and instantiated pages
82 * across the pages in a mapping.
84 * The region data structures are embedded into a resv_map and protected
85 * by a resv_map's lock. The set of regions within the resv_map represent
86 * reservations for huge pages, or huge pages that have already been
87 * instantiated within the map. The from and to elements are huge page
88 * indices into the associated mapping. from indicates the starting index
89 * of the region. to represents the first index past the end of the region.
91 * For example, a file region structure with from == 0 and to == 4 represents
92 * four huge pages in a mapping. It is important to note that the to element
93 * represents the first element past the end of the region. This is used in
94 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
96 * Interval notation of the form [from, to) will be used to indicate that
97 * the endpoint from is inclusive and to is exclusive.
100 struct list_head link;
103 #ifdef CONFIG_CGROUP_HUGETLB
105 * On shared mappings, each reserved region appears as a struct
106 * file_region in resv_map. These fields hold the info needed to
107 * uncharge each reservation.
109 struct page_counter *reservation_counter;
110 struct cgroup_subsys_state *css;
114 extern struct resv_map *resv_map_alloc(void);
115 void resv_map_release(struct kref *ref);
117 extern spinlock_t hugetlb_lock;
118 extern int hugetlb_max_hstate __read_mostly;
119 #define for_each_hstate(h) \
120 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
122 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
124 void hugepage_put_subpool(struct hugepage_subpool *spool);
126 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
127 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
128 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
129 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
131 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
133 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
136 int move_hugetlb_page_tables(struct vm_area_struct *vma,
137 struct vm_area_struct *new_vma,
138 unsigned long old_addr, unsigned long new_addr,
140 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
141 struct vm_area_struct *, struct vm_area_struct *);
142 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
143 struct page **, struct vm_area_struct **,
144 unsigned long *, unsigned long *, long, unsigned int,
146 void unmap_hugepage_range(struct vm_area_struct *,
147 unsigned long, unsigned long, struct page *,
149 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
150 struct vm_area_struct *vma,
151 unsigned long start, unsigned long end,
152 struct page *ref_page, zap_flags_t zap_flags);
153 void hugetlb_report_meminfo(struct seq_file *);
154 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
155 void hugetlb_show_meminfo_node(int nid);
156 unsigned long hugetlb_total_pages(void);
157 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
158 unsigned long address, unsigned int flags);
159 #ifdef CONFIG_USERFAULTFD
160 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
161 struct vm_area_struct *dst_vma,
162 unsigned long dst_addr,
163 unsigned long src_addr,
164 enum mcopy_atomic_mode mode,
167 #endif /* CONFIG_USERFAULTFD */
168 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
169 struct vm_area_struct *vma,
170 vm_flags_t vm_flags);
171 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
173 int isolate_hugetlb(struct page *page, struct list_head *list);
174 int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
175 int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
176 void putback_active_hugepage(struct page *page);
177 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
178 void free_huge_page(struct page *page);
179 void hugetlb_fix_reserve_counts(struct inode *inode);
180 extern struct mutex *hugetlb_fault_mutex_table;
181 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
183 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
184 unsigned long addr, pud_t *pud);
186 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
188 extern int sysctl_hugetlb_shm_group;
189 extern struct list_head huge_boot_pages;
193 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
194 unsigned long addr, unsigned long sz);
195 pte_t *huge_pte_offset(struct mm_struct *mm,
196 unsigned long addr, unsigned long sz);
197 unsigned long hugetlb_mask_last_page(struct hstate *h);
198 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
199 unsigned long addr, pte_t *ptep);
200 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
201 unsigned long *start, unsigned long *end);
202 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
204 struct page *follow_huge_pd(struct vm_area_struct *vma,
205 unsigned long address, hugepd_t hpd,
206 int flags, int pdshift);
207 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
208 pmd_t *pmd, int flags);
209 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
210 pud_t *pud, int flags);
211 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
212 pgd_t *pgd, int flags);
214 int pmd_huge(pmd_t pmd);
215 int pud_huge(pud_t pud);
216 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
217 unsigned long address, unsigned long end, pgprot_t newprot,
218 unsigned long cp_flags);
220 bool is_hugetlb_entry_migration(pte_t pte);
221 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
223 #else /* !CONFIG_HUGETLB_PAGE */
225 static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
229 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
233 static inline unsigned long hugetlb_total_pages(void)
238 static inline struct address_space *hugetlb_page_mapping_lock_write(
244 static inline int huge_pmd_unshare(struct mm_struct *mm,
245 struct vm_area_struct *vma,
246 unsigned long addr, pte_t *ptep)
251 static inline void adjust_range_if_pmd_sharing_possible(
252 struct vm_area_struct *vma,
253 unsigned long *start, unsigned long *end)
257 static inline long follow_hugetlb_page(struct mm_struct *mm,
258 struct vm_area_struct *vma, struct page **pages,
259 struct vm_area_struct **vmas, unsigned long *position,
260 unsigned long *nr_pages, long i, unsigned int flags,
267 static inline struct page *follow_huge_addr(struct mm_struct *mm,
268 unsigned long address, int write)
270 return ERR_PTR(-EINVAL);
273 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
274 struct mm_struct *src,
275 struct vm_area_struct *dst_vma,
276 struct vm_area_struct *src_vma)
282 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
283 struct vm_area_struct *new_vma,
284 unsigned long old_addr,
285 unsigned long new_addr,
292 static inline void hugetlb_report_meminfo(struct seq_file *m)
296 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
301 static inline void hugetlb_show_meminfo_node(int nid)
305 static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
306 unsigned long address, hugepd_t hpd, int flags,
312 static inline struct page *follow_huge_pmd(struct mm_struct *mm,
313 unsigned long address, pmd_t *pmd, int flags)
318 static inline struct page *follow_huge_pud(struct mm_struct *mm,
319 unsigned long address, pud_t *pud, int flags)
324 static inline struct page *follow_huge_pgd(struct mm_struct *mm,
325 unsigned long address, pgd_t *pgd, int flags)
330 static inline int prepare_hugepage_range(struct file *file,
331 unsigned long addr, unsigned long len)
336 static inline int pmd_huge(pmd_t pmd)
341 static inline int pud_huge(pud_t pud)
346 static inline int is_hugepage_only_range(struct mm_struct *mm,
347 unsigned long addr, unsigned long len)
352 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
353 unsigned long addr, unsigned long end,
354 unsigned long floor, unsigned long ceiling)
359 #ifdef CONFIG_USERFAULTFD
360 static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
362 struct vm_area_struct *dst_vma,
363 unsigned long dst_addr,
364 unsigned long src_addr,
365 enum mcopy_atomic_mode mode,
372 #endif /* CONFIG_USERFAULTFD */
374 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
380 static inline int isolate_hugetlb(struct page *page, struct list_head *list)
385 static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
390 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
395 static inline void putback_active_hugepage(struct page *page)
399 static inline void move_hugetlb_state(struct page *oldpage,
400 struct page *newpage, int reason)
404 static inline unsigned long hugetlb_change_protection(
405 struct vm_area_struct *vma, unsigned long address,
406 unsigned long end, pgprot_t newprot,
407 unsigned long cp_flags)
412 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
413 struct vm_area_struct *vma, unsigned long start,
414 unsigned long end, struct page *ref_page,
415 zap_flags_t zap_flags)
420 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
421 struct vm_area_struct *vma, unsigned long address,
428 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
430 #endif /* !CONFIG_HUGETLB_PAGE */
432 * hugepages at page global directory. If arch support
433 * hugepages at pgd level, they need to define this.
436 #define pgd_huge(x) 0
439 #define p4d_huge(x) 0
443 static inline int pgd_write(pgd_t pgd)
450 #define HUGETLB_ANON_FILE "anon_hugepage"
454 * The file will be used as an shm file so shmfs accounting rules
457 HUGETLB_SHMFS_INODE = 1,
459 * The file is being created on the internal vfs mount and shmfs
460 * accounting rules do not apply
462 HUGETLB_ANONHUGE_INODE = 2,
465 #ifdef CONFIG_HUGETLBFS
466 struct hugetlbfs_sb_info {
467 long max_inodes; /* inodes allowed */
468 long free_inodes; /* inodes free */
469 spinlock_t stat_lock;
470 struct hstate *hstate;
471 struct hugepage_subpool *spool;
477 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
479 return sb->s_fs_info;
482 struct hugetlbfs_inode_info {
483 struct shared_policy policy;
484 struct inode vfs_inode;
488 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
490 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
493 extern const struct file_operations hugetlbfs_file_operations;
494 extern const struct vm_operations_struct hugetlb_vm_ops;
495 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
496 int creat_flags, int page_size_log);
498 static inline bool is_file_hugepages(struct file *file)
500 if (file->f_op == &hugetlbfs_file_operations)
503 return is_file_shm_hugepages(file);
506 static inline struct hstate *hstate_inode(struct inode *i)
508 return HUGETLBFS_SB(i->i_sb)->hstate;
510 #else /* !CONFIG_HUGETLBFS */
512 #define is_file_hugepages(file) false
513 static inline struct file *
514 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
515 int creat_flags, int page_size_log)
517 return ERR_PTR(-ENOSYS);
520 static inline struct hstate *hstate_inode(struct inode *i)
524 #endif /* !CONFIG_HUGETLBFS */
526 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
527 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
528 unsigned long len, unsigned long pgoff,
529 unsigned long flags);
530 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
533 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
534 unsigned long len, unsigned long pgoff,
535 unsigned long flags);
538 * huegtlb page specific state flags. These flags are located in page.private
539 * of the hugetlb head page. Functions created via the below macros should be
540 * used to manipulate these flags.
542 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
543 * allocation time. Cleared when page is fully instantiated. Free
544 * routine checks flag to restore a reservation on error paths.
545 * Synchronization: Examined or modified by code that knows it has
546 * the only reference to page. i.e. After allocation but before use
547 * or when the page is being freed.
548 * HPG_migratable - Set after a newly allocated page is added to the page
549 * cache and/or page tables. Indicates the page is a candidate for
551 * Synchronization: Initially set after new page allocation with no
552 * locking. When examined and modified during migration processing
553 * (isolate, migrate, putback) the hugetlb_lock is held.
554 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
555 * allocator. Typically used for migration target pages when no pages
556 * are available in the pool. The hugetlb free page path will
557 * immediately free pages with this flag set to the buddy allocator.
558 * Synchronization: Can be set after huge page allocation from buddy when
559 * code knows it has only reference. All other examinations and
560 * modifications require hugetlb_lock.
561 * HPG_freed - Set when page is on the free lists.
562 * Synchronization: hugetlb_lock held for examination and modification.
563 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
565 enum hugetlb_page_flags {
566 HPG_restore_reserve = 0,
570 HPG_vmemmap_optimized,
575 * Macros to create test, set and clear function definitions for
576 * hugetlb specific page flags.
578 #ifdef CONFIG_HUGETLB_PAGE
579 #define TESTHPAGEFLAG(uname, flname) \
580 static inline int HPage##uname(struct page *page) \
581 { return test_bit(HPG_##flname, &(page->private)); }
583 #define SETHPAGEFLAG(uname, flname) \
584 static inline void SetHPage##uname(struct page *page) \
585 { set_bit(HPG_##flname, &(page->private)); }
587 #define CLEARHPAGEFLAG(uname, flname) \
588 static inline void ClearHPage##uname(struct page *page) \
589 { clear_bit(HPG_##flname, &(page->private)); }
591 #define TESTHPAGEFLAG(uname, flname) \
592 static inline int HPage##uname(struct page *page) \
595 #define SETHPAGEFLAG(uname, flname) \
596 static inline void SetHPage##uname(struct page *page) \
599 #define CLEARHPAGEFLAG(uname, flname) \
600 static inline void ClearHPage##uname(struct page *page) \
604 #define HPAGEFLAG(uname, flname) \
605 TESTHPAGEFLAG(uname, flname) \
606 SETHPAGEFLAG(uname, flname) \
607 CLEARHPAGEFLAG(uname, flname) \
610 * Create functions associated with hugetlb page flags
612 HPAGEFLAG(RestoreReserve, restore_reserve)
613 HPAGEFLAG(Migratable, migratable)
614 HPAGEFLAG(Temporary, temporary)
615 HPAGEFLAG(Freed, freed)
616 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
618 #ifdef CONFIG_HUGETLB_PAGE
620 #define HSTATE_NAME_LEN 32
621 /* Defines one hugetlb page size */
623 struct mutex resize_lock;
624 int next_nid_to_alloc;
625 int next_nid_to_free;
627 unsigned int demote_order;
629 unsigned long max_huge_pages;
630 unsigned long nr_huge_pages;
631 unsigned long free_huge_pages;
632 unsigned long resv_huge_pages;
633 unsigned long surplus_huge_pages;
634 unsigned long nr_overcommit_huge_pages;
635 struct list_head hugepage_activelist;
636 struct list_head hugepage_freelists[MAX_NUMNODES];
637 unsigned int max_huge_pages_node[MAX_NUMNODES];
638 unsigned int nr_huge_pages_node[MAX_NUMNODES];
639 unsigned int free_huge_pages_node[MAX_NUMNODES];
640 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
641 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
642 unsigned int optimize_vmemmap_pages;
644 #ifdef CONFIG_CGROUP_HUGETLB
645 /* cgroup control files */
646 struct cftype cgroup_files_dfl[8];
647 struct cftype cgroup_files_legacy[10];
649 char name[HSTATE_NAME_LEN];
652 struct huge_bootmem_page {
653 struct list_head list;
654 struct hstate *hstate;
657 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
658 struct page *alloc_huge_page(struct vm_area_struct *vma,
659 unsigned long addr, int avoid_reserve);
660 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
661 nodemask_t *nmask, gfp_t gfp_mask);
662 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
663 unsigned long address);
664 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
666 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
667 unsigned long address, struct page *page);
670 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
671 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
672 bool __init hugetlb_node_alloc_supported(void);
674 void __init hugetlb_add_hstate(unsigned order);
675 bool __init arch_hugetlb_valid_size(unsigned long size);
676 struct hstate *size_to_hstate(unsigned long size);
678 #ifndef HUGE_MAX_HSTATE
679 #define HUGE_MAX_HSTATE 1
682 extern struct hstate hstates[HUGE_MAX_HSTATE];
683 extern unsigned int default_hstate_idx;
685 #define default_hstate (hstates[default_hstate_idx])
688 * hugetlb page subpool pointer located in hpage[1].private
690 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
692 return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
695 static inline void hugetlb_set_page_subpool(struct page *hpage,
696 struct hugepage_subpool *subpool)
698 set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
701 static inline struct hstate *hstate_file(struct file *f)
703 return hstate_inode(file_inode(f));
706 static inline struct hstate *hstate_sizelog(int page_size_log)
709 return &default_hstate;
711 return size_to_hstate(1UL << page_size_log);
714 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
716 return hstate_file(vma->vm_file);
719 static inline unsigned long huge_page_size(struct hstate *h)
721 return (unsigned long)PAGE_SIZE << h->order;
724 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
726 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
728 static inline unsigned long huge_page_mask(struct hstate *h)
733 static inline unsigned int huge_page_order(struct hstate *h)
738 static inline unsigned huge_page_shift(struct hstate *h)
740 return h->order + PAGE_SHIFT;
743 static inline bool hstate_is_gigantic(struct hstate *h)
745 return huge_page_order(h) >= MAX_ORDER;
748 static inline unsigned int pages_per_huge_page(struct hstate *h)
750 return 1 << h->order;
753 static inline unsigned int blocks_per_huge_page(struct hstate *h)
755 return huge_page_size(h) / 512;
758 #include <asm/hugetlb.h>
760 #ifndef is_hugepage_only_range
761 static inline int is_hugepage_only_range(struct mm_struct *mm,
762 unsigned long addr, unsigned long len)
766 #define is_hugepage_only_range is_hugepage_only_range
769 #ifndef arch_clear_hugepage_flags
770 static inline void arch_clear_hugepage_flags(struct page *page) { }
771 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
774 #ifndef arch_make_huge_pte
775 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
778 return pte_mkhuge(entry);
782 static inline struct hstate *page_hstate(struct page *page)
784 VM_BUG_ON_PAGE(!PageHuge(page), page);
785 return size_to_hstate(page_size(page));
788 static inline unsigned hstate_index_to_shift(unsigned index)
790 return hstates[index].order + PAGE_SHIFT;
793 static inline int hstate_index(struct hstate *h)
798 extern int dissolve_free_huge_page(struct page *page);
799 extern int dissolve_free_huge_pages(unsigned long start_pfn,
800 unsigned long end_pfn);
802 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
803 #ifndef arch_hugetlb_migration_supported
804 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
806 if ((huge_page_shift(h) == PMD_SHIFT) ||
807 (huge_page_shift(h) == PUD_SHIFT) ||
808 (huge_page_shift(h) == PGDIR_SHIFT))
815 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
821 static inline bool hugepage_migration_supported(struct hstate *h)
823 return arch_hugetlb_migration_supported(h);
827 * Movability check is different as compared to migration check.
828 * It determines whether or not a huge page should be placed on
829 * movable zone or not. Movability of any huge page should be
830 * required only if huge page size is supported for migration.
831 * There won't be any reason for the huge page to be movable if
832 * it is not migratable to start with. Also the size of the huge
833 * page should be large enough to be placed under a movable zone
834 * and still feasible enough to be migratable. Just the presence
835 * in movable zone does not make the migration feasible.
837 * So even though large huge page sizes like the gigantic ones
838 * are migratable they should not be movable because its not
839 * feasible to migrate them from movable zone.
841 static inline bool hugepage_movable_supported(struct hstate *h)
843 if (!hugepage_migration_supported(h))
846 if (hstate_is_gigantic(h))
851 /* Movability of hugepages depends on migration support. */
852 static inline gfp_t htlb_alloc_mask(struct hstate *h)
854 if (hugepage_movable_supported(h))
855 return GFP_HIGHUSER_MOVABLE;
860 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
862 gfp_t modified_mask = htlb_alloc_mask(h);
864 /* Some callers might want to enforce node */
865 modified_mask |= (gfp_mask & __GFP_THISNODE);
867 modified_mask |= (gfp_mask & __GFP_NOWARN);
869 return modified_mask;
872 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
873 struct mm_struct *mm, pte_t *pte)
875 if (huge_page_size(h) == PMD_SIZE)
876 return pmd_lockptr(mm, (pmd_t *) pte);
877 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
878 return &mm->page_table_lock;
881 #ifndef hugepages_supported
883 * Some platform decide whether they support huge pages at boot
884 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
885 * when there is no such support
887 #define hugepages_supported() (HPAGE_SHIFT != 0)
890 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
892 static inline void hugetlb_count_init(struct mm_struct *mm)
894 atomic_long_set(&mm->hugetlb_usage, 0);
897 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
899 atomic_long_add(l, &mm->hugetlb_usage);
902 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
904 atomic_long_sub(l, &mm->hugetlb_usage);
907 #ifndef huge_ptep_modify_prot_start
908 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
909 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
910 unsigned long addr, pte_t *ptep)
912 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
916 #ifndef huge_ptep_modify_prot_commit
917 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
918 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
919 unsigned long addr, pte_t *ptep,
920 pte_t old_pte, pte_t pte)
922 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
926 #else /* CONFIG_HUGETLB_PAGE */
929 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
934 static inline int isolate_or_dissolve_huge_page(struct page *page,
935 struct list_head *list)
940 static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
947 static inline struct page *
948 alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
949 nodemask_t *nmask, gfp_t gfp_mask)
954 static inline struct page *alloc_huge_page_vma(struct hstate *h,
955 struct vm_area_struct *vma,
956 unsigned long address)
961 static inline int __alloc_bootmem_huge_page(struct hstate *h)
966 static inline struct hstate *hstate_file(struct file *f)
971 static inline struct hstate *hstate_sizelog(int page_size_log)
976 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
981 static inline struct hstate *page_hstate(struct page *page)
986 static inline struct hstate *size_to_hstate(unsigned long size)
991 static inline unsigned long huge_page_size(struct hstate *h)
996 static inline unsigned long huge_page_mask(struct hstate *h)
1001 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1006 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1011 static inline unsigned int huge_page_order(struct hstate *h)
1016 static inline unsigned int huge_page_shift(struct hstate *h)
1021 static inline bool hstate_is_gigantic(struct hstate *h)
1026 static inline unsigned int pages_per_huge_page(struct hstate *h)
1031 static inline unsigned hstate_index_to_shift(unsigned index)
1036 static inline int hstate_index(struct hstate *h)
1041 static inline int dissolve_free_huge_page(struct page *page)
1046 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1047 unsigned long end_pfn)
1052 static inline bool hugepage_migration_supported(struct hstate *h)
1057 static inline bool hugepage_movable_supported(struct hstate *h)
1062 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1067 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1072 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1073 struct mm_struct *mm, pte_t *pte)
1075 return &mm->page_table_lock;
1078 static inline void hugetlb_count_init(struct mm_struct *mm)
1082 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1086 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1090 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1091 unsigned long addr, pte_t *ptep)
1096 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1097 pte_t *ptep, pte_t pte)
1100 #endif /* CONFIG_HUGETLB_PAGE */
1102 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1103 struct mm_struct *mm, pte_t *pte)
1107 ptl = huge_pte_lockptr(h, mm, pte);
1112 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1113 extern void __init hugetlb_cma_reserve(int order);
1114 extern void __init hugetlb_cma_check(void);
1116 static inline __init void hugetlb_cma_reserve(int order)
1119 static inline __init void hugetlb_cma_check(void)
1124 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1126 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1128 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1131 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1134 #endif /* _LINUX_HUGETLB_H */