Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_HUGETLB_H |
3 | #define _LINUX_HUGETLB_H | |
4 | ||
9c67a207 | 5 | #include <linux/mm.h> |
be93d8cf | 6 | #include <linux/mm_types.h> |
309381fe | 7 | #include <linux/mmdebug.h> |
4e950f6f | 8 | #include <linux/fs.h> |
8edf344c | 9 | #include <linux/hugetlb_inline.h> |
abb8206c | 10 | #include <linux/cgroup.h> |
3489dbb6 | 11 | #include <linux/page_ref.h> |
9119a41e JK |
12 | #include <linux/list.h> |
13 | #include <linux/kref.h> | |
ca5999fd | 14 | #include <linux/pgtable.h> |
d92bbc27 | 15 | #include <linux/gfp.h> |
f6191471 | 16 | #include <linux/userfaultfd_k.h> |
8d88b076 | 17 | #include <linux/nodemask.h> |
4e950f6f | 18 | |
e9ea0e2d AM |
19 | struct ctl_table; |
20 | struct user_struct; | |
24669e58 | 21 | struct mmu_gather; |
a4a00b45 | 22 | struct node; |
e9ea0e2d | 23 | |
454a00c4 | 24 | void free_huge_folio(struct folio *folio); |
dd6fa0b6 | 25 | |
1da177e4 LT |
26 | #ifdef CONFIG_HUGETLB_PAGE |
27 | ||
10969b55 | 28 | #include <linux/pagemap.h> |
516dffdc | 29 | #include <linux/shm.h> |
63551ae0 | 30 | #include <asm/tlbflush.h> |
1da177e4 | 31 | |
cd39d4e9 MS |
32 | /* |
33 | * For HugeTLB page, there are more metadata to save in the struct page. But | |
34 | * the head struct page cannot meet our needs, so we have to abuse other tail | |
dad6a5eb | 35 | * struct page to store the metadata. |
cd39d4e9 | 36 | */ |
dad6a5eb | 37 | #define __NR_USED_SUBPAGE 3 |
cd39d4e9 | 38 | |
90481622 DG |
39 | struct hugepage_subpool { |
40 | spinlock_t lock; | |
41 | long count; | |
c6a91820 MK |
42 | long max_hpages; /* Maximum huge pages or -1 if no maximum. */ |
43 | long used_hpages; /* Used count against maximum, includes */ | |
06c88398 | 44 | /* both allocated and reserved pages. */ |
c6a91820 MK |
45 | struct hstate *hstate; |
46 | long min_hpages; /* Minimum huge pages or -1 if no minimum. */ | |
47 | long rsv_hpages; /* Pages reserved against global pool to */ | |
6c26d310 | 48 | /* satisfy minimum size. */ |
90481622 DG |
49 | }; |
50 | ||
9119a41e JK |
51 | struct resv_map { |
52 | struct kref refs; | |
7b24d861 | 53 | spinlock_t lock; |
9119a41e | 54 | struct list_head regions; |
5e911373 MK |
55 | long adds_in_progress; |
56 | struct list_head region_cache; | |
57 | long region_cache_count; | |
bf491692 | 58 | struct rw_semaphore rw_sema; |
e9fe92ae MA |
59 | #ifdef CONFIG_CGROUP_HUGETLB |
60 | /* | |
61 | * On private mappings, the counter to uncharge reservations is stored | |
62 | * here. If these fields are 0, then either the mapping is shared, or | |
63 | * cgroup accounting is disabled for this resv_map. | |
64 | */ | |
65 | struct page_counter *reservation_counter; | |
66 | unsigned long pages_per_hpage; | |
67 | struct cgroup_subsys_state *css; | |
68 | #endif | |
9119a41e | 69 | }; |
075a61d0 MA |
70 | |
71 | /* | |
72 | * Region tracking -- allows tracking of reservations and instantiated pages | |
73 | * across the pages in a mapping. | |
74 | * | |
75 | * The region data structures are embedded into a resv_map and protected | |
76 | * by a resv_map's lock. The set of regions within the resv_map represent | |
77 | * reservations for huge pages, or huge pages that have already been | |
78 | * instantiated within the map. The from and to elements are huge page | |
06c88398 | 79 | * indices into the associated mapping. from indicates the starting index |
075a61d0 MA |
80 | * of the region. to represents the first index past the end of the region. |
81 | * | |
82 | * For example, a file region structure with from == 0 and to == 4 represents | |
83 | * four huge pages in a mapping. It is important to note that the to element | |
84 | * represents the first element past the end of the region. This is used in | |
85 | * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. | |
86 | * | |
87 | * Interval notation of the form [from, to) will be used to indicate that | |
88 | * the endpoint from is inclusive and to is exclusive. | |
89 | */ | |
90 | struct file_region { | |
91 | struct list_head link; | |
92 | long from; | |
93 | long to; | |
94 | #ifdef CONFIG_CGROUP_HUGETLB | |
95 | /* | |
96 | * On shared mappings, each reserved region appears as a struct | |
97 | * file_region in resv_map. These fields hold the info needed to | |
98 | * uncharge each reservation. | |
99 | */ | |
100 | struct page_counter *reservation_counter; | |
101 | struct cgroup_subsys_state *css; | |
102 | #endif | |
103 | }; | |
104 | ||
8d9bfb26 MK |
105 | struct hugetlb_vma_lock { |
106 | struct kref refs; | |
107 | struct rw_semaphore rw_sema; | |
108 | struct vm_area_struct *vma; | |
109 | }; | |
110 | ||
9119a41e JK |
111 | extern struct resv_map *resv_map_alloc(void); |
112 | void resv_map_release(struct kref *ref); | |
113 | ||
c3f38a38 AK |
114 | extern spinlock_t hugetlb_lock; |
115 | extern int hugetlb_max_hstate __read_mostly; | |
116 | #define for_each_hstate(h) \ | |
117 | for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) | |
118 | ||
7ca02d0a MK |
119 | struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, |
120 | long min_hpages); | |
90481622 DG |
121 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
122 | ||
8d9bfb26 | 123 | void hugetlb_dup_vma_private(struct vm_area_struct *vma); |
550a7d60 | 124 | void clear_vma_resv_huge_pages(struct vm_area_struct *vma); |
550a7d60 MA |
125 | int move_hugetlb_page_tables(struct vm_area_struct *vma, |
126 | struct vm_area_struct *new_vma, | |
127 | unsigned long old_addr, unsigned long new_addr, | |
128 | unsigned long len); | |
bc70fbf2 PX |
129 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, |
130 | struct vm_area_struct *, struct vm_area_struct *); | |
04f2cbe3 | 131 | void unmap_hugepage_range(struct vm_area_struct *, |
81edb1ba FN |
132 | unsigned long start, unsigned long end, |
133 | struct folio *, zap_flags_t); | |
2820b0f0 | 134 | void __unmap_hugepage_range(struct mmu_gather *tlb, |
d833352a MG |
135 | struct vm_area_struct *vma, |
136 | unsigned long start, unsigned long end, | |
7f4b6065 | 137 | struct folio *, zap_flags_t zap_flags); |
e1759c21 | 138 | void hugetlb_report_meminfo(struct seq_file *); |
7981593b | 139 | int hugetlb_report_node_meminfo(char *buf, int len, int nid); |
dcadcf1c | 140 | void hugetlb_show_meminfo_node(int nid); |
1da177e4 | 141 | unsigned long hugetlb_total_pages(void); |
2b740303 | 142 | vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
788c7df4 | 143 | unsigned long address, unsigned int flags); |
714c1891 | 144 | #ifdef CONFIG_USERFAULTFD |
61c50040 | 145 | int hugetlb_mfill_atomic_pte(pte_t *dst_pte, |
a734991c AR |
146 | struct vm_area_struct *dst_vma, |
147 | unsigned long dst_addr, | |
148 | unsigned long src_addr, | |
d9712937 | 149 | uffd_flags_t flags, |
0169fd51 | 150 | struct folio **foliop); |
714c1891 | 151 | #endif /* CONFIG_USERFAULTFD */ |
33b8f84a | 152 | bool hugetlb_reserve_pages(struct inode *inode, long from, long to, |
5a6fe125 | 153 | struct vm_area_struct *vma, |
ca16d140 | 154 | vm_flags_t vm_flags); |
b5cec28d MK |
155 | long hugetlb_unreserve_pages(struct inode *inode, long start, long end, |
156 | long freed); | |
4c640f12 | 157 | bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list); |
04bac040 | 158 | int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); |
e591ef7d NH |
159 | int get_huge_page_for_hwpoison(unsigned long pfn, int flags, |
160 | bool *migratable_cleared); | |
b235448e | 161 | void folio_putback_hugetlb(struct folio *folio); |
345c62d1 | 162 | void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); |
72e2936c | 163 | void hugetlb_fix_reserve_counts(struct inode *inode); |
c672c7f2 | 164 | extern struct mutex *hugetlb_fault_mutex_table; |
188b04a7 | 165 | u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); |
1da177e4 | 166 | |
aec44e0f PX |
167 | pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, |
168 | unsigned long addr, pud_t *pud); | |
24334e78 PX |
169 | bool hugetlbfs_pagecache_present(struct hstate *h, |
170 | struct vm_area_struct *vma, | |
171 | unsigned long address); | |
3212b535 | 172 | |
6e8cda4c | 173 | struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); |
c0d0381a | 174 | |
1da177e4 | 175 | extern int sysctl_hugetlb_shm_group; |
b78b27d0 | 176 | extern struct list_head huge_boot_pages[MAX_NUMNODES]; |
1da177e4 | 177 | |
5b47c029 | 178 | void hugetlb_bootmem_alloc(void); |
d58b2498 | 179 | bool hugetlb_bootmem_allocated(void); |
8d88b076 FL |
180 | extern nodemask_t hugetlb_bootmem_nodes; |
181 | void hugetlb_bootmem_set_nodes(void); | |
5b47c029 | 182 | |
63551ae0 DG |
183 | /* arch callbacks */ |
184 | ||
f7243924 HD |
185 | #ifndef CONFIG_HIGHPTE |
186 | /* | |
187 | * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures | |
188 | * which may go down to the lowest PTE level in their huge_pte_offset() and | |
189 | * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap(). | |
190 | */ | |
191 | static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address) | |
192 | { | |
193 | return pte_offset_kernel(pmd, address); | |
194 | } | |
195 | static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd, | |
196 | unsigned long address) | |
197 | { | |
198 | return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address); | |
199 | } | |
200 | #endif | |
201 | ||
aec44e0f | 202 | pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, |
a5516438 | 203 | unsigned long addr, unsigned long sz); |
fe7d4c6d PX |
204 | /* |
205 | * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE. | |
206 | * Returns the pte_t* if found, or NULL if the address is not mapped. | |
207 | * | |
9c67a207 PX |
208 | * IMPORTANT: we should normally not directly call this function, instead |
209 | * this is only a common interface to implement arch-specific | |
210 | * walker. Please use hugetlb_walk() instead, because that will attempt to | |
211 | * verify the locking for you. | |
212 | * | |
fe7d4c6d PX |
213 | * Since this function will walk all the pgtable pages (including not only |
214 | * high-level pgtable page, but also PUD entry that can be unshared | |
215 | * concurrently for VM_SHARED), the caller of this function should be | |
216 | * responsible of its thread safety. One can follow this rule: | |
217 | * | |
218 | * (1) For private mappings: pmd unsharing is not possible, so holding the | |
219 | * mmap_lock for either read or write is sufficient. Most callers | |
220 | * already hold the mmap_lock, so normally, no special action is | |
221 | * required. | |
222 | * | |
223 | * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged | |
224 | * pgtable page can go away from under us! It can be done by a pmd | |
225 | * unshare with a follow up munmap() on the other process), then we | |
226 | * need either: | |
227 | * | |
228 | * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare | |
229 | * won't happen upon the range (it also makes sure the pte_t we | |
230 | * read is the right and stable one), or, | |
231 | * | |
232 | * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make | |
233 | * sure even if unshare happened the racy unmap() will wait until | |
234 | * i_mmap_rwsem is released. | |
235 | * | |
236 | * Option (2.1) is the safest, which guarantees pte stability from pmd | |
237 | * sharing pov, until the vma lock released. Option (2.2) doesn't protect | |
238 | * a concurrent pmd unshare, but it makes sure the pgtable page is safe to | |
239 | * access. | |
240 | */ | |
7868a208 PA |
241 | pte_t *huge_pte_offset(struct mm_struct *mm, |
242 | unsigned long addr, unsigned long sz); | |
e95a9851 | 243 | unsigned long hugetlb_mask_last_page(struct hstate *h); |
34ae204f | 244 | int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, |
4ddb4d91 | 245 | unsigned long addr, pte_t *ptep); |
017b1660 MK |
246 | void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, |
247 | unsigned long *start, unsigned long *end); | |
faaa5b62 | 248 | |
2820b0f0 RR |
249 | extern void __hugetlb_zap_begin(struct vm_area_struct *vma, |
250 | unsigned long *begin, unsigned long *end); | |
251 | extern void __hugetlb_zap_end(struct vm_area_struct *vma, | |
252 | struct zap_details *details); | |
253 | ||
254 | static inline void hugetlb_zap_begin(struct vm_area_struct *vma, | |
255 | unsigned long *start, unsigned long *end) | |
256 | { | |
257 | if (is_vm_hugetlb_page(vma)) | |
258 | __hugetlb_zap_begin(vma, start, end); | |
259 | } | |
260 | ||
261 | static inline void hugetlb_zap_end(struct vm_area_struct *vma, | |
262 | struct zap_details *details) | |
263 | { | |
264 | if (is_vm_hugetlb_page(vma)) | |
265 | __hugetlb_zap_end(vma, details); | |
266 | } | |
267 | ||
8d9bfb26 MK |
268 | void hugetlb_vma_lock_read(struct vm_area_struct *vma); |
269 | void hugetlb_vma_unlock_read(struct vm_area_struct *vma); | |
270 | void hugetlb_vma_lock_write(struct vm_area_struct *vma); | |
271 | void hugetlb_vma_unlock_write(struct vm_area_struct *vma); | |
272 | int hugetlb_vma_trylock_write(struct vm_area_struct *vma); | |
273 | void hugetlb_vma_assert_locked(struct vm_area_struct *vma); | |
274 | void hugetlb_vma_lock_release(struct kref *kref); | |
a79390f5 | 275 | long hugetlb_change_protection(struct vm_area_struct *vma, |
5a90d5a1 PX |
276 | unsigned long address, unsigned long end, pgprot_t newprot, |
277 | unsigned long cp_flags); | |
d5ed7444 | 278 | bool is_hugetlb_entry_migration(pte_t pte); |
52526ca7 | 279 | bool is_hugetlb_entry_hwpoisoned(pte_t pte); |
6dfeaff9 | 280 | void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); |
ee40c992 | 281 | void fixup_hugetlb_reservations(struct vm_area_struct *vma); |
081056dc | 282 | void hugetlb_split(struct vm_area_struct *vma, unsigned long addr); |
ab5ac90a | 283 | |
1da177e4 LT |
284 | #else /* !CONFIG_HUGETLB_PAGE */ |
285 | ||
8d9bfb26 | 286 | static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma) |
a1e78772 MG |
287 | { |
288 | } | |
289 | ||
550a7d60 MA |
290 | static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma) |
291 | { | |
292 | } | |
293 | ||
1da177e4 LT |
294 | static inline unsigned long hugetlb_total_pages(void) |
295 | { | |
296 | return 0; | |
297 | } | |
298 | ||
6e8cda4c MWO |
299 | static inline struct address_space *hugetlb_folio_mapping_lock_write( |
300 | struct folio *folio) | |
c0d0381a MK |
301 | { |
302 | return NULL; | |
303 | } | |
304 | ||
34ae204f MK |
305 | static inline int huge_pmd_unshare(struct mm_struct *mm, |
306 | struct vm_area_struct *vma, | |
4ddb4d91 | 307 | unsigned long addr, pte_t *ptep) |
017b1660 MK |
308 | { |
309 | return 0; | |
310 | } | |
311 | ||
312 | static inline void adjust_range_if_pmd_sharing_possible( | |
313 | struct vm_area_struct *vma, | |
314 | unsigned long *start, unsigned long *end) | |
315 | { | |
316 | } | |
317 | ||
2820b0f0 RR |
318 | static inline void hugetlb_zap_begin( |
319 | struct vm_area_struct *vma, | |
320 | unsigned long *start, unsigned long *end) | |
321 | { | |
322 | } | |
323 | ||
324 | static inline void hugetlb_zap_end( | |
325 | struct vm_area_struct *vma, | |
326 | struct zap_details *details) | |
327 | { | |
328 | } | |
329 | ||
1f9dccb2 | 330 | static inline int copy_hugetlb_page_range(struct mm_struct *dst, |
bc70fbf2 PX |
331 | struct mm_struct *src, |
332 | struct vm_area_struct *dst_vma, | |
333 | struct vm_area_struct *src_vma) | |
1f9dccb2 MK |
334 | { |
335 | BUG(); | |
336 | return 0; | |
337 | } | |
338 | ||
550a7d60 MA |
339 | static inline int move_hugetlb_page_tables(struct vm_area_struct *vma, |
340 | struct vm_area_struct *new_vma, | |
341 | unsigned long old_addr, | |
342 | unsigned long new_addr, | |
343 | unsigned long len) | |
344 | { | |
345 | BUG(); | |
346 | return 0; | |
347 | } | |
348 | ||
e1759c21 AD |
349 | static inline void hugetlb_report_meminfo(struct seq_file *m) |
350 | { | |
351 | } | |
1f9dccb2 | 352 | |
7981593b | 353 | static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) |
1f9dccb2 MK |
354 | { |
355 | return 0; | |
356 | } | |
357 | ||
dcadcf1c | 358 | static inline void hugetlb_show_meminfo_node(int nid) |
949f7ec5 DR |
359 | { |
360 | } | |
1f9dccb2 | 361 | |
1f9dccb2 MK |
362 | static inline int prepare_hugepage_range(struct file *file, |
363 | unsigned long addr, unsigned long len) | |
364 | { | |
365 | return -EINVAL; | |
366 | } | |
367 | ||
8d9bfb26 MK |
368 | static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma) |
369 | { | |
370 | } | |
371 | ||
372 | static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma) | |
373 | { | |
374 | } | |
375 | ||
376 | static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma) | |
377 | { | |
378 | } | |
379 | ||
380 | static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma) | |
381 | { | |
382 | } | |
383 | ||
384 | static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma) | |
385 | { | |
386 | return 1; | |
387 | } | |
388 | ||
389 | static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma) | |
390 | { | |
391 | } | |
392 | ||
1f9dccb2 MK |
393 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
394 | unsigned long addr, unsigned long len) | |
395 | { | |
396 | return 0; | |
397 | } | |
398 | ||
399 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |
400 | unsigned long addr, unsigned long end, | |
401 | unsigned long floor, unsigned long ceiling) | |
402 | { | |
403 | BUG(); | |
404 | } | |
405 | ||
714c1891 | 406 | #ifdef CONFIG_USERFAULTFD |
61c50040 | 407 | static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte, |
a734991c AR |
408 | struct vm_area_struct *dst_vma, |
409 | unsigned long dst_addr, | |
410 | unsigned long src_addr, | |
d9712937 | 411 | uffd_flags_t flags, |
0169fd51 | 412 | struct folio **foliop) |
1f9dccb2 MK |
413 | { |
414 | BUG(); | |
415 | return 0; | |
416 | } | |
714c1891 | 417 | #endif /* CONFIG_USERFAULTFD */ |
1f9dccb2 MK |
418 | |
419 | static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, | |
420 | unsigned long sz) | |
421 | { | |
422 | return NULL; | |
423 | } | |
24669e58 | 424 | |
4c640f12 | 425 | static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) |
f40386a4 | 426 | { |
9747b9e9 | 427 | return false; |
f40386a4 | 428 | } |
1da177e4 | 429 | |
04bac040 | 430 | static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) |
25182f05 NH |
431 | { |
432 | return 0; | |
433 | } | |
434 | ||
e591ef7d NH |
435 | static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags, |
436 | bool *migratable_cleared) | |
405ce051 NH |
437 | { |
438 | return 0; | |
439 | } | |
440 | ||
b235448e | 441 | static inline void folio_putback_hugetlb(struct folio *folio) |
1f9dccb2 MK |
442 | { |
443 | } | |
444 | ||
345c62d1 SK |
445 | static inline void move_hugetlb_state(struct folio *old_folio, |
446 | struct folio *new_folio, int reason) | |
1f9dccb2 MK |
447 | { |
448 | } | |
449 | ||
a79390f5 | 450 | static inline long hugetlb_change_protection( |
1f9dccb2 | 451 | struct vm_area_struct *vma, unsigned long address, |
5a90d5a1 PX |
452 | unsigned long end, pgprot_t newprot, |
453 | unsigned long cp_flags) | |
7da4d641 PZ |
454 | { |
455 | return 0; | |
456 | } | |
8f860591 | 457 | |
2820b0f0 | 458 | static inline void __unmap_hugepage_range(struct mmu_gather *tlb, |
d833352a | 459 | struct vm_area_struct *vma, unsigned long start, |
7f4b6065 | 460 | unsigned long end, struct folio *folio, |
05e90bd0 | 461 | zap_flags_t zap_flags) |
d833352a MG |
462 | { |
463 | BUG(); | |
464 | } | |
465 | ||
a953e772 | 466 | static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, |
1f9dccb2 MK |
467 | struct vm_area_struct *vma, unsigned long address, |
468 | unsigned int flags) | |
a953e772 SJ |
469 | { |
470 | BUG(); | |
471 | return 0; | |
472 | } | |
24669e58 | 473 | |
6dfeaff9 PX |
474 | static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } |
475 | ||
ee40c992 RCN |
476 | static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma) |
477 | { | |
478 | } | |
479 | ||
081056dc JH |
480 | static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {} |
481 | ||
1da177e4 | 482 | #endif /* !CONFIG_HUGETLB_PAGE */ |
f30c59e9 AK |
483 | |
484 | #ifndef pgd_write | |
485 | static inline int pgd_write(pgd_t pgd) | |
486 | { | |
487 | BUG(); | |
488 | return 0; | |
489 | } | |
490 | #endif | |
491 | ||
4e52780d EM |
492 | #define HUGETLB_ANON_FILE "anon_hugepage" |
493 | ||
6bfde05b EM |
494 | enum { |
495 | /* | |
496 | * The file will be used as an shm file so shmfs accounting rules | |
497 | * apply | |
498 | */ | |
499 | HUGETLB_SHMFS_INODE = 1, | |
4e52780d EM |
500 | /* |
501 | * The file is being created on the internal vfs mount and shmfs | |
502 | * accounting rules do not apply | |
503 | */ | |
504 | HUGETLB_ANONHUGE_INODE = 2, | |
6bfde05b EM |
505 | }; |
506 | ||
1da177e4 | 507 | #ifdef CONFIG_HUGETLBFS |
1da177e4 | 508 | struct hugetlbfs_sb_info { |
1da177e4 LT |
509 | long max_inodes; /* inodes allowed */ |
510 | long free_inodes; /* inodes free */ | |
511 | spinlock_t stat_lock; | |
a137e1cc | 512 | struct hstate *hstate; |
90481622 | 513 | struct hugepage_subpool *spool; |
4a25220d DH |
514 | kuid_t uid; |
515 | kgid_t gid; | |
516 | umode_t mode; | |
1da177e4 LT |
517 | }; |
518 | ||
1da177e4 LT |
519 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
520 | { | |
521 | return sb->s_fs_info; | |
522 | } | |
523 | ||
da14c1e5 | 524 | struct hugetlbfs_inode_info { |
da14c1e5 | 525 | struct inode vfs_inode; |
ff62a342 | 526 | unsigned int seals; |
da14c1e5 MAL |
527 | }; |
528 | ||
529 | static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) | |
530 | { | |
531 | return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); | |
532 | } | |
533 | ||
f0f37e2f | 534 | extern const struct vm_operations_struct hugetlb_vm_ops; |
af73e4d9 | 535 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
83c1fd76 | 536 | int creat_flags, int page_size_log); |
1da177e4 | 537 | |
886b94d2 | 538 | static inline bool is_file_hugepages(const struct file *file) |
1da177e4 | 539 | { |
886b94d2 | 540 | return file->f_op->fop_flags & FOP_HUGE_PAGES; |
1da177e4 LT |
541 | } |
542 | ||
bb297bb2 CL |
543 | static inline struct hstate *hstate_inode(struct inode *i) |
544 | { | |
545 | return HUGETLBFS_SB(i->i_sb)->hstate; | |
546 | } | |
1da177e4 LT |
547 | #else /* !CONFIG_HUGETLBFS */ |
548 | ||
719ff321 | 549 | #define is_file_hugepages(file) false |
40716e29 | 550 | static inline struct file * |
af73e4d9 | 551 | hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, |
83c1fd76 | 552 | int creat_flags, int page_size_log) |
e9ea0e2d AM |
553 | { |
554 | return ERR_PTR(-ENOSYS); | |
555 | } | |
1da177e4 | 556 | |
bb297bb2 CL |
557 | static inline struct hstate *hstate_inode(struct inode *i) |
558 | { | |
559 | return NULL; | |
560 | } | |
1da177e4 LT |
561 | #endif /* !CONFIG_HUGETLBFS */ |
562 | ||
7bd3f1e1 | 563 | unsigned long |
cc92882e | 564 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
7bd3f1e1 OS |
565 | unsigned long len, unsigned long pgoff, |
566 | unsigned long flags); | |
d2ba27e8 | 567 | |
d6995da3 MK |
568 | /* |
569 | * huegtlb page specific state flags. These flags are located in page.private | |
570 | * of the hugetlb head page. Functions created via the below macros should be | |
571 | * used to manipulate these flags. | |
572 | * | |
573 | * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at | |
574 | * allocation time. Cleared when page is fully instantiated. Free | |
575 | * routine checks flag to restore a reservation on error paths. | |
d95c0337 MK |
576 | * Synchronization: Examined or modified by code that knows it has |
577 | * the only reference to page. i.e. After allocation but before use | |
578 | * or when the page is being freed. | |
8f251a3d MK |
579 | * HPG_migratable - Set after a newly allocated page is added to the page |
580 | * cache and/or page tables. Indicates the page is a candidate for | |
581 | * migration. | |
d95c0337 MK |
582 | * Synchronization: Initially set after new page allocation with no |
583 | * locking. When examined and modified during migration processing | |
584 | * (isolate, migrate, putback) the hugetlb_lock is held. | |
161df60e | 585 | * HPG_temporary - Set on a page that is temporarily allocated from the buddy |
9157c311 MK |
586 | * allocator. Typically used for migration target pages when no pages |
587 | * are available in the pool. The hugetlb free page path will | |
588 | * immediately free pages with this flag set to the buddy allocator. | |
d95c0337 MK |
589 | * Synchronization: Can be set after huge page allocation from buddy when |
590 | * code knows it has only reference. All other examinations and | |
591 | * modifications require hugetlb_lock. | |
6c037149 | 592 | * HPG_freed - Set when page is on the free lists. |
d95c0337 | 593 | * Synchronization: hugetlb_lock held for examination and modification. |
ad2fa371 | 594 | * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. |
161df60e NH |
595 | * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page |
596 | * that is not tracked by raw_hwp_page list. | |
d6995da3 MK |
597 | */ |
598 | enum hugetlb_page_flags { | |
599 | HPG_restore_reserve = 0, | |
8f251a3d | 600 | HPG_migratable, |
9157c311 | 601 | HPG_temporary, |
6c037149 | 602 | HPG_freed, |
ad2fa371 | 603 | HPG_vmemmap_optimized, |
161df60e | 604 | HPG_raw_hwp_unreliable, |
d2d78671 | 605 | HPG_cma, |
d6995da3 MK |
606 | __NR_HPAGEFLAGS, |
607 | }; | |
608 | ||
609 | /* | |
610 | * Macros to create test, set and clear function definitions for | |
611 | * hugetlb specific page flags. | |
612 | */ | |
613 | #ifdef CONFIG_HUGETLB_PAGE | |
614 | #define TESTHPAGEFLAG(uname, flname) \ | |
d03c376d SK |
615 | static __always_inline \ |
616 | bool folio_test_hugetlb_##flname(struct folio *folio) \ | |
617 | { void *private = &folio->private; \ | |
618 | return test_bit(HPG_##flname, private); \ | |
16540dae | 619 | } |
d6995da3 MK |
620 | |
621 | #define SETHPAGEFLAG(uname, flname) \ | |
d03c376d SK |
622 | static __always_inline \ |
623 | void folio_set_hugetlb_##flname(struct folio *folio) \ | |
624 | { void *private = &folio->private; \ | |
625 | set_bit(HPG_##flname, private); \ | |
63818aaf | 626 | } |
d6995da3 MK |
627 | |
628 | #define CLEARHPAGEFLAG(uname, flname) \ | |
d03c376d SK |
629 | static __always_inline \ |
630 | void folio_clear_hugetlb_##flname(struct folio *folio) \ | |
631 | { void *private = &folio->private; \ | |
632 | clear_bit(HPG_##flname, private); \ | |
63818aaf | 633 | } |
d6995da3 MK |
634 | #else |
635 | #define TESTHPAGEFLAG(uname, flname) \ | |
d03c376d SK |
636 | static inline bool \ |
637 | folio_test_hugetlb_##flname(struct folio *folio) \ | |
d6995da3 MK |
638 | { return 0; } |
639 | ||
640 | #define SETHPAGEFLAG(uname, flname) \ | |
d03c376d SK |
641 | static inline void \ |
642 | folio_set_hugetlb_##flname(struct folio *folio) \ | |
d6995da3 MK |
643 | { } |
644 | ||
645 | #define CLEARHPAGEFLAG(uname, flname) \ | |
d03c376d SK |
646 | static inline void \ |
647 | folio_clear_hugetlb_##flname(struct folio *folio) \ | |
d6995da3 MK |
648 | { } |
649 | #endif | |
650 | ||
651 | #define HPAGEFLAG(uname, flname) \ | |
652 | TESTHPAGEFLAG(uname, flname) \ | |
653 | SETHPAGEFLAG(uname, flname) \ | |
654 | CLEARHPAGEFLAG(uname, flname) \ | |
655 | ||
656 | /* | |
657 | * Create functions associated with hugetlb page flags | |
658 | */ | |
659 | HPAGEFLAG(RestoreReserve, restore_reserve) | |
8f251a3d | 660 | HPAGEFLAG(Migratable, migratable) |
9157c311 | 661 | HPAGEFLAG(Temporary, temporary) |
6c037149 | 662 | HPAGEFLAG(Freed, freed) |
ad2fa371 | 663 | HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) |
161df60e | 664 | HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) |
d2d78671 | 665 | HPAGEFLAG(Cma, cma) |
d6995da3 | 666 | |
a5516438 AK |
667 | #ifdef CONFIG_HUGETLB_PAGE |
668 | ||
a3437870 | 669 | #define HSTATE_NAME_LEN 32 |
a5516438 AK |
670 | /* Defines one hugetlb page size */ |
671 | struct hstate { | |
29383967 | 672 | struct mutex resize_lock; |
667574e8 | 673 | struct lock_class_key resize_key; |
e8c5c824 LS |
674 | int next_nid_to_alloc; |
675 | int next_nid_to_free; | |
a5516438 | 676 | unsigned int order; |
79dfc695 | 677 | unsigned int demote_order; |
a5516438 AK |
678 | unsigned long mask; |
679 | unsigned long max_huge_pages; | |
680 | unsigned long nr_huge_pages; | |
681 | unsigned long free_huge_pages; | |
682 | unsigned long resv_huge_pages; | |
683 | unsigned long surplus_huge_pages; | |
684 | unsigned long nr_overcommit_huge_pages; | |
0edaecfa | 685 | struct list_head hugepage_activelist; |
a5516438 | 686 | struct list_head hugepage_freelists[MAX_NUMNODES]; |
b5389086 | 687 | unsigned int max_huge_pages_node[MAX_NUMNODES]; |
a5516438 AK |
688 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; |
689 | unsigned int free_huge_pages_node[MAX_NUMNODES]; | |
690 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; | |
a3437870 | 691 | char name[HSTATE_NAME_LEN]; |
a5516438 AK |
692 | }; |
693 | ||
d2d78671 FL |
694 | struct cma; |
695 | ||
53ba51d2 JT |
696 | struct huge_bootmem_page { |
697 | struct list_head list; | |
698 | struct hstate *hstate; | |
752fe17a | 699 | unsigned long flags; |
d2d78671 | 700 | struct cma *cma; |
53ba51d2 JT |
701 | }; |
702 | ||
752fe17a FL |
703 | #define HUGE_BOOTMEM_HVO 0x0001 |
704 | #define HUGE_BOOTMEM_ZONES_VALID 0x0002 | |
d2d78671 | 705 | #define HUGE_BOOTMEM_CMA 0x0004 |
752fe17a | 706 | |
b1222550 FL |
707 | bool hugetlb_bootmem_page_zones_valid(int nid, struct huge_bootmem_page *m); |
708 | ||
b4c829fa | 709 | int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list); |
04f13d24 | 710 | int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); |
67bab133 | 711 | void wait_for_freed_hugetlb_folios(void); |
d0ce0e47 | 712 | struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, |
30cef82b | 713 | unsigned long addr, bool cow_from_owner); |
e37d3e83 | 714 | struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, |
42d0c3fb BW |
715 | nodemask_t *nmask, gfp_t gfp_mask, |
716 | bool allow_alloc_fallback); | |
26a8ea80 SS |
717 | struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, |
718 | nodemask_t *nmask, gfp_t gfp_mask); | |
719 | ||
9b91c0e2 | 720 | int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, |
ab76ad54 | 721 | pgoff_t idx); |
846be085 | 722 | void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, |
d2d7bb44 | 723 | unsigned long address, struct folio *folio); |
bf50bab2 | 724 | |
53ba51d2 | 725 | /* arch callback */ |
b5389086 ZY |
726 | int __init __alloc_bootmem_huge_page(struct hstate *h, int nid); |
727 | int __init alloc_bootmem_huge_page(struct hstate *h, int nid); | |
728 | bool __init hugetlb_node_alloc_supported(void); | |
53ba51d2 | 729 | |
e5ff2159 | 730 | void __init hugetlb_add_hstate(unsigned order); |
ae94da89 | 731 | bool __init arch_hugetlb_valid_size(unsigned long size); |
e5ff2159 AK |
732 | struct hstate *size_to_hstate(unsigned long size); |
733 | ||
734 | #ifndef HUGE_MAX_HSTATE | |
735 | #define HUGE_MAX_HSTATE 1 | |
736 | #endif | |
737 | ||
738 | extern struct hstate hstates[HUGE_MAX_HSTATE]; | |
739 | extern unsigned int default_hstate_idx; | |
740 | ||
741 | #define default_hstate (hstates[default_hstate_idx]) | |
a5516438 | 742 | |
149562f7 SK |
743 | static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) |
744 | { | |
dad6a5eb | 745 | return folio->_hugetlb_subpool; |
149562f7 SK |
746 | } |
747 | ||
149562f7 SK |
748 | static inline void hugetlb_set_folio_subpool(struct folio *folio, |
749 | struct hugepage_subpool *subpool) | |
750 | { | |
dad6a5eb | 751 | folio->_hugetlb_subpool = subpool; |
d6995da3 MK |
752 | } |
753 | ||
a5516438 AK |
754 | static inline struct hstate *hstate_file(struct file *f) |
755 | { | |
496ad9aa | 756 | return hstate_inode(file_inode(f)); |
a5516438 AK |
757 | } |
758 | ||
af73e4d9 NH |
759 | static inline struct hstate *hstate_sizelog(int page_size_log) |
760 | { | |
761 | if (!page_size_log) | |
762 | return &default_hstate; | |
97ad2be1 | 763 | |
ec4288fe MK |
764 | if (page_size_log < BITS_PER_LONG) |
765 | return size_to_hstate(1UL << page_size_log); | |
766 | ||
767 | return NULL; | |
af73e4d9 NH |
768 | } |
769 | ||
a137e1cc | 770 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
a5516438 | 771 | { |
a137e1cc | 772 | return hstate_file(vma->vm_file); |
a5516438 AK |
773 | } |
774 | ||
6213834c | 775 | static inline unsigned long huge_page_size(const struct hstate *h) |
a5516438 AK |
776 | { |
777 | return (unsigned long)PAGE_SIZE << h->order; | |
778 | } | |
779 | ||
08fba699 MG |
780 | extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); |
781 | ||
3340289d MG |
782 | extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); |
783 | ||
a5516438 AK |
784 | static inline unsigned long huge_page_mask(struct hstate *h) |
785 | { | |
786 | return h->mask; | |
787 | } | |
788 | ||
789 | static inline unsigned int huge_page_order(struct hstate *h) | |
790 | { | |
791 | return h->order; | |
792 | } | |
793 | ||
794 | static inline unsigned huge_page_shift(struct hstate *h) | |
795 | { | |
796 | return h->order + PAGE_SHIFT; | |
797 | } | |
798 | ||
bae7f4ae LC |
799 | static inline bool hstate_is_gigantic(struct hstate *h) |
800 | { | |
5e0a760b | 801 | return huge_page_order(h) > MAX_PAGE_ORDER; |
bae7f4ae LC |
802 | } |
803 | ||
6213834c | 804 | static inline unsigned int pages_per_huge_page(const struct hstate *h) |
a5516438 AK |
805 | { |
806 | return 1 << h->order; | |
807 | } | |
808 | ||
809 | static inline unsigned int blocks_per_huge_page(struct hstate *h) | |
810 | { | |
811 | return huge_page_size(h) / 512; | |
812 | } | |
813 | ||
a08c7193 SK |
814 | static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, |
815 | struct address_space *mapping, pgoff_t idx) | |
816 | { | |
817 | return filemap_lock_folio(mapping, idx << huge_page_order(h)); | |
818 | } | |
819 | ||
a5516438 AK |
820 | #include <asm/hugetlb.h> |
821 | ||
b0eae98c AK |
822 | #ifndef is_hugepage_only_range |
823 | static inline int is_hugepage_only_range(struct mm_struct *mm, | |
824 | unsigned long addr, unsigned long len) | |
825 | { | |
826 | return 0; | |
827 | } | |
828 | #define is_hugepage_only_range is_hugepage_only_range | |
829 | #endif | |
830 | ||
51718e25 MWO |
831 | #ifndef arch_clear_hugetlb_flags |
832 | static inline void arch_clear_hugetlb_flags(struct folio *folio) { } | |
833 | #define arch_clear_hugetlb_flags arch_clear_hugetlb_flags | |
5be99343 AK |
834 | #endif |
835 | ||
d9ed9faa | 836 | #ifndef arch_make_huge_pte |
79c1c594 CL |
837 | static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, |
838 | vm_flags_t flags) | |
d9ed9faa | 839 | { |
16785bd7 | 840 | return pte_mkhuge(entry); |
d9ed9faa CM |
841 | } |
842 | #endif | |
843 | ||
d2d78671 FL |
844 | #ifndef arch_has_huge_bootmem_alloc |
845 | /* | |
846 | * Some architectures do their own bootmem allocation, so they can't use | |
847 | * early CMA allocation. | |
848 | */ | |
849 | static inline bool arch_has_huge_bootmem_alloc(void) | |
850 | { | |
851 | return false; | |
852 | } | |
853 | #endif | |
854 | ||
e51da3a9 SK |
855 | static inline struct hstate *folio_hstate(struct folio *folio) |
856 | { | |
857 | VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); | |
858 | return size_to_hstate(folio_size(folio)); | |
859 | } | |
860 | ||
aa50d3a7 AK |
861 | static inline unsigned hstate_index_to_shift(unsigned index) |
862 | { | |
863 | return hstates[index].order + PAGE_SHIFT; | |
864 | } | |
865 | ||
972dc4de AK |
866 | static inline int hstate_index(struct hstate *h) |
867 | { | |
868 | return h - hstates; | |
869 | } | |
870 | ||
54fa49b2 | 871 | int dissolve_free_hugetlb_folio(struct folio *folio); |
d199483c | 872 | int dissolve_free_hugetlb_folios(unsigned long start_pfn, |
082d5b6b | 873 | unsigned long end_pfn); |
e693de18 | 874 | |
161df60e | 875 | #ifdef CONFIG_MEMORY_FAILURE |
2ff6cece | 876 | extern void folio_clear_hugetlb_hwpoison(struct folio *folio); |
161df60e | 877 | #else |
2ff6cece | 878 | static inline void folio_clear_hugetlb_hwpoison(struct folio *folio) |
161df60e NH |
879 | { |
880 | } | |
881 | #endif | |
882 | ||
c177c81e | 883 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
e693de18 AK |
884 | #ifndef arch_hugetlb_migration_supported |
885 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | |
886 | { | |
94310cbc | 887 | if ((huge_page_shift(h) == PMD_SHIFT) || |
9b553bf5 AK |
888 | (huge_page_shift(h) == PUD_SHIFT) || |
889 | (huge_page_shift(h) == PGDIR_SHIFT)) | |
94310cbc AK |
890 | return true; |
891 | else | |
892 | return false; | |
e693de18 AK |
893 | } |
894 | #endif | |
c177c81e | 895 | #else |
e693de18 AK |
896 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) |
897 | { | |
d70c17d4 | 898 | return false; |
e693de18 | 899 | } |
c177c81e | 900 | #endif |
e693de18 AK |
901 | |
902 | static inline bool hugepage_migration_supported(struct hstate *h) | |
903 | { | |
904 | return arch_hugetlb_migration_supported(h); | |
83467efb | 905 | } |
c8721bbb | 906 | |
7ed2c31d AK |
907 | /* |
908 | * Movability check is different as compared to migration check. | |
909 | * It determines whether or not a huge page should be placed on | |
910 | * movable zone or not. Movability of any huge page should be | |
911 | * required only if huge page size is supported for migration. | |
06c88398 | 912 | * There won't be any reason for the huge page to be movable if |
7ed2c31d AK |
913 | * it is not migratable to start with. Also the size of the huge |
914 | * page should be large enough to be placed under a movable zone | |
915 | * and still feasible enough to be migratable. Just the presence | |
916 | * in movable zone does not make the migration feasible. | |
917 | * | |
918 | * So even though large huge page sizes like the gigantic ones | |
919 | * are migratable they should not be movable because its not | |
920 | * feasible to migrate them from movable zone. | |
921 | */ | |
922 | static inline bool hugepage_movable_supported(struct hstate *h) | |
923 | { | |
924 | if (!hugepage_migration_supported(h)) | |
925 | return false; | |
926 | ||
927 | if (hstate_is_gigantic(h)) | |
928 | return false; | |
929 | return true; | |
930 | } | |
931 | ||
d92bbc27 JK |
932 | /* Movability of hugepages depends on migration support. */ |
933 | static inline gfp_t htlb_alloc_mask(struct hstate *h) | |
934 | { | |
cf54f310 YZ |
935 | gfp_t gfp = __GFP_COMP | __GFP_NOWARN; |
936 | ||
937 | gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER; | |
938 | ||
939 | return gfp; | |
d92bbc27 JK |
940 | } |
941 | ||
19fc7bed JK |
942 | static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) |
943 | { | |
944 | gfp_t modified_mask = htlb_alloc_mask(h); | |
945 | ||
946 | /* Some callers might want to enforce node */ | |
947 | modified_mask |= (gfp_mask & __GFP_THISNODE); | |
948 | ||
41b4dc14 JK |
949 | modified_mask |= (gfp_mask & __GFP_NOWARN); |
950 | ||
19fc7bed JK |
951 | return modified_mask; |
952 | } | |
953 | ||
42d0c3fb BW |
954 | static inline bool htlb_allow_alloc_fallback(int reason) |
955 | { | |
956 | bool allowed_fallback = false; | |
957 | ||
958 | /* | |
959 | * Note: the memory offline, memory failure and migration syscalls will | |
960 | * be allowed to fallback to other nodes due to lack of a better chioce, | |
961 | * that might break the per-node hugetlb pool. While other cases will | |
962 | * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool. | |
963 | */ | |
964 | switch (reason) { | |
965 | case MR_MEMORY_HOTPLUG: | |
966 | case MR_MEMORY_FAILURE: | |
967 | case MR_SYSCALL: | |
968 | case MR_MEMPOLICY_MBIND: | |
969 | allowed_fallback = true; | |
970 | break; | |
971 | default: | |
972 | break; | |
973 | } | |
974 | ||
975 | return allowed_fallback; | |
976 | } | |
977 | ||
cb900f41 KS |
978 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
979 | struct mm_struct *mm, pte_t *pte) | |
980 | { | |
5f75cfbd DH |
981 | const unsigned long size = huge_page_size(h); |
982 | ||
983 | VM_WARN_ON(size == PAGE_SIZE); | |
984 | ||
985 | /* | |
986 | * hugetlb must use the exact same PT locks as core-mm page table | |
987 | * walkers would. When modifying a PTE table, hugetlb must take the | |
988 | * PTE PT lock, when modifying a PMD table, hugetlb must take the PMD | |
989 | * PT lock etc. | |
990 | * | |
991 | * The expectation is that any hugetlb folio smaller than a PMD is | |
992 | * always mapped into a single PTE table and that any hugetlb folio | |
993 | * smaller than a PUD (but at least as big as a PMD) is always mapped | |
994 | * into a single PMD table. | |
995 | * | |
996 | * If that does not hold for an architecture, then that architecture | |
997 | * must disable split PT locks such that all *_lockptr() functions | |
998 | * will give us the same result: the per-MM PT lock. | |
999 | * | |
1000 | * Note that with e.g., CONFIG_PGTABLE_LEVELS=2 where | |
1001 | * PGDIR_SIZE==P4D_SIZE==PUD_SIZE==PMD_SIZE, we'd use pud_lockptr() | |
1002 | * and core-mm would use pmd_lockptr(). However, in such configurations | |
1003 | * split PMD locks are disabled -- they don't make sense on a single | |
1004 | * PGDIR page table -- and the end result is the same. | |
1005 | */ | |
1006 | if (size >= PUD_SIZE) | |
1007 | return pud_lockptr(mm, (pud_t *) pte); | |
1008 | else if (size >= PMD_SIZE || IS_ENABLED(CONFIG_HIGHPTE)) | |
cb900f41 | 1009 | return pmd_lockptr(mm, (pmd_t *) pte); |
5f75cfbd DH |
1010 | /* pte_alloc_huge() only applies with !CONFIG_HIGHPTE */ |
1011 | return ptep_lockptr(mm, pte); | |
cb900f41 KS |
1012 | } |
1013 | ||
2531c8cf DD |
1014 | #ifndef hugepages_supported |
1015 | /* | |
1016 | * Some platform decide whether they support huge pages at boot | |
1017 | * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 | |
1018 | * when there is no such support | |
1019 | */ | |
1020 | #define hugepages_supported() (HPAGE_SHIFT != 0) | |
1021 | #endif | |
457c1b27 | 1022 | |
5d317b2b NH |
1023 | void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); |
1024 | ||
13db8c50 LZ |
1025 | static inline void hugetlb_count_init(struct mm_struct *mm) |
1026 | { | |
1027 | atomic_long_set(&mm->hugetlb_usage, 0); | |
1028 | } | |
1029 | ||
5d317b2b NH |
1030 | static inline void hugetlb_count_add(long l, struct mm_struct *mm) |
1031 | { | |
1032 | atomic_long_add(l, &mm->hugetlb_usage); | |
1033 | } | |
1034 | ||
1035 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | |
1036 | { | |
1037 | atomic_long_sub(l, &mm->hugetlb_usage); | |
1038 | } | |
e5251fd4 | 1039 | |
023bdd00 AK |
1040 | #ifndef huge_ptep_modify_prot_start |
1041 | #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start | |
1042 | static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, | |
1043 | unsigned long addr, pte_t *ptep) | |
1044 | { | |
02410ac7 RR |
1045 | unsigned long psize = huge_page_size(hstate_vma(vma)); |
1046 | ||
1047 | return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, psize); | |
023bdd00 AK |
1048 | } |
1049 | #endif | |
1050 | ||
1051 | #ifndef huge_ptep_modify_prot_commit | |
1052 | #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit | |
1053 | static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, | |
1054 | unsigned long addr, pte_t *ptep, | |
1055 | pte_t old_pte, pte_t pte) | |
1056 | { | |
935d4f0c RR |
1057 | unsigned long psize = huge_page_size(hstate_vma(vma)); |
1058 | ||
1059 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); | |
023bdd00 AK |
1060 | } |
1061 | #endif | |
1062 | ||
a4a00b45 MS |
1063 | #ifdef CONFIG_NUMA |
1064 | void hugetlb_register_node(struct node *node); | |
1065 | void hugetlb_unregister_node(struct node *node); | |
1066 | #endif | |
1067 | ||
b79f8eb4 JY |
1068 | /* |
1069 | * Check if a given raw @page in a hugepage is HWPOISON. | |
1070 | */ | |
1071 | bool is_raw_hwpoison_page_in_hugepage(struct page *page); | |
1072 | ||
7f24cbc9 OS |
1073 | static inline unsigned long huge_page_mask_align(struct file *file) |
1074 | { | |
1075 | return PAGE_MASK & ~huge_page_mask(hstate_file(file)); | |
1076 | } | |
1077 | ||
af73e4d9 | 1078 | #else /* CONFIG_HUGETLB_PAGE */ |
a5516438 | 1079 | struct hstate {}; |
442a5a9a | 1080 | |
7f24cbc9 OS |
1081 | static inline unsigned long huge_page_mask_align(struct file *file) |
1082 | { | |
1083 | return 0; | |
1084 | } | |
1085 | ||
345c62d1 SK |
1086 | static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) |
1087 | { | |
1088 | return NULL; | |
1089 | } | |
1090 | ||
a08c7193 SK |
1091 | static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, |
1092 | struct address_space *mapping, pgoff_t idx) | |
1093 | { | |
1094 | return NULL; | |
1095 | } | |
1096 | ||
b4c829fa | 1097 | static inline int isolate_or_dissolve_huge_folio(struct folio *folio, |
ae37c7ff | 1098 | struct list_head *list) |
369fa227 OS |
1099 | { |
1100 | return -ENOMEM; | |
1101 | } | |
1102 | ||
04f13d24 | 1103 | static inline int replace_free_hugepage_folios(unsigned long start_pfn, |
1104 | unsigned long end_pfn) | |
1105 | { | |
1106 | return 0; | |
1107 | } | |
1108 | ||
67bab133 GY |
1109 | static inline void wait_for_freed_hugetlb_folios(void) |
1110 | { | |
1111 | } | |
1112 | ||
d0ce0e47 | 1113 | static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, |
442a5a9a | 1114 | unsigned long addr, |
30cef82b | 1115 | bool cow_from_owner) |
442a5a9a JG |
1116 | { |
1117 | return NULL; | |
1118 | } | |
1119 | ||
26a8ea80 SS |
1120 | static inline struct folio * |
1121 | alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid, | |
1122 | nodemask_t *nmask, gfp_t gfp_mask) | |
1123 | { | |
1124 | return NULL; | |
1125 | } | |
1126 | ||
e37d3e83 SK |
1127 | static inline struct folio * |
1128 | alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, | |
42d0c3fb BW |
1129 | nodemask_t *nmask, gfp_t gfp_mask, |
1130 | bool allow_alloc_fallback) | |
442a5a9a JG |
1131 | { |
1132 | return NULL; | |
1133 | } | |
1134 | ||
442a5a9a JG |
1135 | static inline int __alloc_bootmem_huge_page(struct hstate *h) |
1136 | { | |
1137 | return 0; | |
1138 | } | |
1139 | ||
1140 | static inline struct hstate *hstate_file(struct file *f) | |
1141 | { | |
1142 | return NULL; | |
1143 | } | |
1144 | ||
1145 | static inline struct hstate *hstate_sizelog(int page_size_log) | |
1146 | { | |
1147 | return NULL; | |
1148 | } | |
1149 | ||
1150 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) | |
1151 | { | |
1152 | return NULL; | |
442a5a9a JG |
1153 | } |
1154 | ||
e51da3a9 SK |
1155 | static inline struct hstate *folio_hstate(struct folio *folio) |
1156 | { | |
1157 | return NULL; | |
1158 | } | |
1159 | ||
2aff7a47 MWO |
1160 | static inline struct hstate *size_to_hstate(unsigned long size) |
1161 | { | |
1162 | return NULL; | |
1163 | } | |
1164 | ||
442a5a9a JG |
1165 | static inline unsigned long huge_page_size(struct hstate *h) |
1166 | { | |
1167 | return PAGE_SIZE; | |
1168 | } | |
1169 | ||
1170 | static inline unsigned long huge_page_mask(struct hstate *h) | |
1171 | { | |
1172 | return PAGE_MASK; | |
1173 | } | |
1174 | ||
1175 | static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) | |
1176 | { | |
1177 | return PAGE_SIZE; | |
1178 | } | |
1179 | ||
1180 | static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) | |
1181 | { | |
1182 | return PAGE_SIZE; | |
1183 | } | |
1184 | ||
1185 | static inline unsigned int huge_page_order(struct hstate *h) | |
1186 | { | |
1187 | return 0; | |
1188 | } | |
1189 | ||
1190 | static inline unsigned int huge_page_shift(struct hstate *h) | |
1191 | { | |
1192 | return PAGE_SHIFT; | |
1193 | } | |
1194 | ||
94310cbc AK |
1195 | static inline bool hstate_is_gigantic(struct hstate *h) |
1196 | { | |
1197 | return false; | |
1198 | } | |
1199 | ||
510a35d4 AR |
1200 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
1201 | { | |
1202 | return 1; | |
1203 | } | |
c3114a84 AK |
1204 | |
1205 | static inline unsigned hstate_index_to_shift(unsigned index) | |
1206 | { | |
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | static inline int hstate_index(struct hstate *h) | |
1211 | { | |
1212 | return 0; | |
1213 | } | |
13d60f4b | 1214 | |
54fa49b2 | 1215 | static inline int dissolve_free_hugetlb_folio(struct folio *folio) |
c3114a84 AK |
1216 | { |
1217 | return 0; | |
1218 | } | |
1219 | ||
d199483c | 1220 | static inline int dissolve_free_hugetlb_folios(unsigned long start_pfn, |
c3114a84 AK |
1221 | unsigned long end_pfn) |
1222 | { | |
1223 | return 0; | |
1224 | } | |
1225 | ||
1226 | static inline bool hugepage_migration_supported(struct hstate *h) | |
1227 | { | |
1228 | return false; | |
1229 | } | |
cb900f41 | 1230 | |
7ed2c31d AK |
1231 | static inline bool hugepage_movable_supported(struct hstate *h) |
1232 | { | |
1233 | return false; | |
1234 | } | |
1235 | ||
d92bbc27 JK |
1236 | static inline gfp_t htlb_alloc_mask(struct hstate *h) |
1237 | { | |
1238 | return 0; | |
1239 | } | |
1240 | ||
19fc7bed JK |
1241 | static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) |
1242 | { | |
1243 | return 0; | |
1244 | } | |
1245 | ||
42d0c3fb BW |
1246 | static inline bool htlb_allow_alloc_fallback(int reason) |
1247 | { | |
1248 | return false; | |
1249 | } | |
1250 | ||
cb900f41 KS |
1251 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
1252 | struct mm_struct *mm, pte_t *pte) | |
1253 | { | |
1254 | return &mm->page_table_lock; | |
1255 | } | |
5d317b2b | 1256 | |
13db8c50 LZ |
1257 | static inline void hugetlb_count_init(struct mm_struct *mm) |
1258 | { | |
1259 | } | |
1260 | ||
5d317b2b NH |
1261 | static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) |
1262 | { | |
1263 | } | |
1264 | ||
1265 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) | |
1266 | { | |
1267 | } | |
e5251fd4 | 1268 | |
5d4af619 BW |
1269 | static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, |
1270 | unsigned long addr, pte_t *ptep) | |
1271 | { | |
c33c7948 RR |
1272 | #ifdef CONFIG_MMU |
1273 | return ptep_get(ptep); | |
1274 | #else | |
5d4af619 | 1275 | return *ptep; |
c33c7948 | 1276 | #endif |
5d4af619 BW |
1277 | } |
1278 | ||
1279 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | |
935d4f0c | 1280 | pte_t *ptep, pte_t pte, unsigned long sz) |
5d4af619 BW |
1281 | { |
1282 | } | |
a4a00b45 MS |
1283 | |
1284 | static inline void hugetlb_register_node(struct node *node) | |
1285 | { | |
1286 | } | |
1287 | ||
1288 | static inline void hugetlb_unregister_node(struct node *node) | |
1289 | { | |
1290 | } | |
24334e78 PX |
1291 | |
1292 | static inline bool hugetlbfs_pagecache_present( | |
1293 | struct hstate *h, struct vm_area_struct *vma, unsigned long address) | |
1294 | { | |
1295 | return false; | |
1296 | } | |
5b47c029 FL |
1297 | |
1298 | static inline void hugetlb_bootmem_alloc(void) | |
1299 | { | |
1300 | } | |
d58b2498 FL |
1301 | |
1302 | static inline bool hugetlb_bootmem_allocated(void) | |
1303 | { | |
1304 | return false; | |
1305 | } | |
af73e4d9 | 1306 | #endif /* CONFIG_HUGETLB_PAGE */ |
a5516438 | 1307 | |
cb900f41 KS |
1308 | static inline spinlock_t *huge_pte_lock(struct hstate *h, |
1309 | struct mm_struct *mm, pte_t *pte) | |
1310 | { | |
1311 | spinlock_t *ptl; | |
1312 | ||
1313 | ptl = huge_pte_lockptr(h, mm, pte); | |
1314 | spin_lock(ptl); | |
1315 | return ptl; | |
1316 | } | |
1317 | ||
cf11e85f RG |
1318 | #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) |
1319 | extern void __init hugetlb_cma_reserve(int order); | |
cf11e85f RG |
1320 | #else |
1321 | static inline __init void hugetlb_cma_reserve(int order) | |
1322 | { | |
1323 | } | |
cf11e85f RG |
1324 | #endif |
1325 | ||
188cac58 | 1326 | #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING |
3489dbb6 MK |
1327 | static inline bool hugetlb_pmd_shared(pte_t *pte) |
1328 | { | |
1329 | return page_count(virt_to_page(pte)) > 1; | |
1330 | } | |
1331 | #else | |
1332 | static inline bool hugetlb_pmd_shared(pte_t *pte) | |
1333 | { | |
1334 | return false; | |
1335 | } | |
1336 | #endif | |
1337 | ||
c1991e07 PX |
1338 | bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); |
1339 | ||
537cf30b PX |
1340 | #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE |
1341 | /* | |
1342 | * ARCHes with special requirements for evicting HUGETLB backing TLB entries can | |
1343 | * implement this. | |
1344 | */ | |
1345 | #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) | |
1346 | #endif | |
1347 | ||
9c67a207 PX |
1348 | static inline bool __vma_shareable_lock(struct vm_area_struct *vma) |
1349 | { | |
1350 | return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; | |
1351 | } | |
1352 | ||
187da0f8 | 1353 | bool __vma_private_lock(struct vm_area_struct *vma); |
bf491692 | 1354 | |
9c67a207 PX |
1355 | /* |
1356 | * Safe version of huge_pte_offset() to check the locks. See comments | |
1357 | * above huge_pte_offset(). | |
1358 | */ | |
1359 | static inline pte_t * | |
1360 | hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) | |
1361 | { | |
188cac58 | 1362 | #if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP) |
9c67a207 PX |
1363 | struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; |
1364 | ||
1365 | /* | |
1366 | * If pmd sharing possible, locking needed to safely walk the | |
1367 | * hugetlb pgtables. More information can be found at the comment | |
1368 | * above huge_pte_offset() in the same file. | |
1369 | * | |
1370 | * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP. | |
1371 | */ | |
1372 | if (__vma_shareable_lock(vma)) | |
1373 | WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) && | |
1374 | !lockdep_is_held( | |
1375 | &vma->vm_file->f_mapping->i_mmap_rwsem)); | |
1376 | #endif | |
1377 | return huge_pte_offset(vma->vm_mm, addr, sz); | |
1378 | } | |
1379 | ||
1da177e4 | 1380 | #endif /* _LINUX_HUGETLB_H */ |