Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
71e3aac0 AA |
2 | #ifndef _LINUX_HUGE_MM_H |
3 | #define _LINUX_HUGE_MM_H | |
4 | ||
16981d76 | 5 | #include <linux/sched/coredump.h> |
226ab561 | 6 | #include <linux/mm_types.h> |
16981d76 | 7 | |
baabda26 DW |
8 | #include <linux/fs.h> /* only for vma_is_dax() */ |
9 | ||
ebfe1b8f RC |
10 | vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); |
11 | int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
12 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | |
8f34f1ea | 13 | struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); |
5db4f15c | 14 | void huge_pmd_set_accessed(struct vm_fault *vmf); |
ebfe1b8f RC |
15 | int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
16 | pud_t *dst_pud, pud_t *src_pud, unsigned long addr, | |
17 | struct vm_area_struct *vma); | |
a00cc7d9 MW |
18 | |
19 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
ebfe1b8f | 20 | void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); |
a00cc7d9 MW |
21 | #else |
22 | static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) | |
23 | { | |
24 | } | |
25 | #endif | |
26 | ||
5db4f15c | 27 | vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); |
ebfe1b8f RC |
28 | struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
29 | unsigned long addr, pmd_t *pmd, | |
30 | unsigned int flags); | |
31 | bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | |
32 | pmd_t *pmd, unsigned long addr, unsigned long next); | |
33 | int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, | |
34 | unsigned long addr); | |
35 | int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, | |
36 | unsigned long addr); | |
37 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |
38 | unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); | |
4a18419f NA |
39 | int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
40 | pmd_t *pmd, unsigned long addr, pgprot_t newprot, | |
41 | unsigned long cp_flags); | |
9a9731b1 THV |
42 | vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, |
43 | pgprot_t pgprot, bool write); | |
44 | ||
45 | /** | |
46 | * vmf_insert_pfn_pmd - insert a pmd size pfn | |
47 | * @vmf: Structure describing the fault | |
48 | * @pfn: pfn to insert | |
49 | * @pgprot: page protection to use | |
50 | * @write: whether it's a write fault | |
51 | * | |
52 | * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. | |
53 | * | |
54 | * Return: vm_fault_t value. | |
55 | */ | |
56 | static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, | |
57 | bool write) | |
58 | { | |
59 | return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); | |
60 | } | |
61 | vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, | |
62 | pgprot_t pgprot, bool write); | |
63 | ||
64 | /** | |
65 | * vmf_insert_pfn_pud - insert a pud size pfn | |
66 | * @vmf: Structure describing the fault | |
67 | * @pfn: pfn to insert | |
68 | * @pgprot: page protection to use | |
69 | * @write: whether it's a write fault | |
70 | * | |
71 | * Insert a pud size pfn. See vmf_insert_pfn() for additional info. | |
72 | * | |
73 | * Return: vm_fault_t value. | |
74 | */ | |
75 | static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, | |
76 | bool write) | |
77 | { | |
78 | return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); | |
79 | } | |
80 | ||
71e3aac0 | 81 | enum transparent_hugepage_flag { |
bae84953 | 82 | TRANSPARENT_HUGEPAGE_NEVER_DAX, |
71e3aac0 AA |
83 | TRANSPARENT_HUGEPAGE_FLAG, |
84 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, | |
444eb2a4 MG |
85 | TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
86 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, | |
21440d7e | 87 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, |
71e3aac0 | 88 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
ba76149f | 89 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, |
79da5407 | 90 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, |
71e3aac0 AA |
91 | }; |
92 | ||
b46e756f KS |
93 | struct kobject; |
94 | struct kobj_attribute; | |
95 | ||
ebfe1b8f RC |
96 | ssize_t single_hugepage_flag_store(struct kobject *kobj, |
97 | struct kobj_attribute *attr, | |
98 | const char *buf, size_t count, | |
99 | enum transparent_hugepage_flag flag); | |
100 | ssize_t single_hugepage_flag_show(struct kobject *kobj, | |
101 | struct kobj_attribute *attr, char *buf, | |
102 | enum transparent_hugepage_flag flag); | |
5a6e75f8 KS |
103 | extern struct kobj_attribute shmem_enabled_attr; |
104 | ||
d8c37c48 NH |
105 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) |
106 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) | |
107 | ||
71e3aac0 | 108 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
fde52796 AK |
109 | #define HPAGE_PMD_SHIFT PMD_SHIFT |
110 | #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) | |
111 | #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) | |
71e3aac0 | 112 | |
a00cc7d9 MW |
113 | #define HPAGE_PUD_SHIFT PUD_SHIFT |
114 | #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) | |
115 | #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) | |
116 | ||
16981d76 DW |
117 | extern unsigned long transparent_hugepage_flags; |
118 | ||
e6be37b2 ML |
119 | static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, |
120 | unsigned long haddr) | |
121 | { | |
122 | /* Don't have to check pgoff for anonymous vma */ | |
123 | if (!vma_is_anonymous(vma)) { | |
124 | if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, | |
125 | HPAGE_PMD_NR)) | |
126 | return false; | |
127 | } | |
128 | ||
129 | if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) | |
130 | return false; | |
131 | return true; | |
132 | } | |
133 | ||
134 | static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, | |
135 | unsigned long vm_flags) | |
136 | { | |
137 | /* Explicitly disabled through madvise. */ | |
138 | if ((vm_flags & VM_NOHUGEPAGE) || | |
139 | test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) | |
140 | return false; | |
141 | return true; | |
142 | } | |
143 | ||
7635d9cb MH |
144 | /* |
145 | * to be used on vmas which are known to support THP. | |
e6be37b2 | 146 | * Use transparent_hugepage_active otherwise |
7635d9cb MH |
147 | */ |
148 | static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) | |
16981d76 | 149 | { |
bae84953 AK |
150 | |
151 | /* | |
152 | * If the hardware/firmware marked hugepage support disabled. | |
153 | */ | |
154 | if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) | |
155 | return false; | |
156 | ||
e6be37b2 | 157 | if (!transhuge_vma_enabled(vma, vma->vm_flags)) |
16981d76 DW |
158 | return false; |
159 | ||
222100ee | 160 | if (vma_is_temporary_stack(vma)) |
16981d76 DW |
161 | return false; |
162 | ||
16981d76 DW |
163 | if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) |
164 | return true; | |
bae84953 | 165 | |
baabda26 DW |
166 | if (vma_is_dax(vma)) |
167 | return true; | |
168 | ||
16981d76 DW |
169 | if (transparent_hugepage_flags & |
170 | (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) | |
171 | return !!(vma->vm_flags & VM_HUGEPAGE); | |
172 | ||
173 | return false; | |
174 | } | |
175 | ||
78d12c19 YS |
176 | static inline bool file_thp_enabled(struct vm_area_struct *vma) |
177 | { | |
178 | struct inode *inode; | |
179 | ||
180 | if (!vma->vm_file) | |
181 | return false; | |
182 | ||
183 | inode = vma->vm_file->f_inode; | |
184 | ||
185 | return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) && | |
186 | (vma->vm_flags & VM_EXEC) && | |
187 | !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); | |
188 | } | |
189 | ||
e6be37b2 | 190 | bool transparent_hugepage_active(struct vm_area_struct *vma); |
43675e6f | 191 | |
79da5407 KS |
192 | #define transparent_hugepage_use_zero_page() \ |
193 | (transparent_hugepage_flags & \ | |
194 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) | |
71e3aac0 | 195 | |
ebfe1b8f RC |
196 | unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, |
197 | unsigned long len, unsigned long pgoff, unsigned long flags); | |
74d2fad1 | 198 | |
ebfe1b8f RC |
199 | void prep_transhuge_page(struct page *page); |
200 | void free_transhuge_page(struct page *page); | |
9a982250 | 201 | |
d4b4084a | 202 | bool can_split_folio(struct folio *folio, int *pextra_pins); |
e9b61f19 KS |
203 | int split_huge_page_to_list(struct page *page, struct list_head *list); |
204 | static inline int split_huge_page(struct page *page) | |
205 | { | |
206 | return split_huge_page_to_list(page, NULL); | |
207 | } | |
9a982250 | 208 | void deferred_split_huge_page(struct page *page); |
eef1b3ba KS |
209 | |
210 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |
af28a988 | 211 | unsigned long address, bool freeze, struct folio *folio); |
eef1b3ba KS |
212 | |
213 | #define split_huge_pmd(__vma, __pmd, __address) \ | |
214 | do { \ | |
215 | pmd_t *____pmd = (__pmd); \ | |
84c3fc4e | 216 | if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ |
5c7fb56e | 217 | || pmd_devmap(*____pmd)) \ |
fec89c10 | 218 | __split_huge_pmd(__vma, __pmd, __address, \ |
33f4751e | 219 | false, NULL); \ |
eef1b3ba | 220 | } while (0) |
ad0bed24 | 221 | |
2a52bcbc | 222 | |
fec89c10 | 223 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
af28a988 | 224 | bool freeze, struct folio *folio); |
2a52bcbc | 225 | |
a00cc7d9 MW |
226 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, |
227 | unsigned long address); | |
228 | ||
229 | #define split_huge_pud(__vma, __pud, __address) \ | |
230 | do { \ | |
231 | pud_t *____pud = (__pud); \ | |
232 | if (pud_trans_huge(*____pud) \ | |
233 | || pud_devmap(*____pud)) \ | |
234 | __split_huge_pud(__vma, __pud, __address); \ | |
235 | } while (0) | |
236 | ||
ebfe1b8f RC |
237 | int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, |
238 | int advice); | |
239 | void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, | |
240 | unsigned long end, long adjust_next); | |
241 | spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); | |
242 | spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); | |
84c3fc4e ZY |
243 | |
244 | static inline int is_swap_pmd(pmd_t pmd) | |
245 | { | |
246 | return !pmd_none(pmd) && !pmd_present(pmd); | |
247 | } | |
248 | ||
c1e8d7c6 | 249 | /* mmap_lock must be held on entry */ |
b6ec57f4 KS |
250 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
251 | struct vm_area_struct *vma) | |
025c5b24 | 252 | { |
84c3fc4e | 253 | if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) |
b6ec57f4 | 254 | return __pmd_trans_huge_lock(pmd, vma); |
025c5b24 | 255 | else |
969e8d7e | 256 | return NULL; |
025c5b24 | 257 | } |
a00cc7d9 MW |
258 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
259 | struct vm_area_struct *vma) | |
260 | { | |
a00cc7d9 MW |
261 | if (pud_trans_huge(*pud) || pud_devmap(*pud)) |
262 | return __pud_trans_huge_lock(pud, vma); | |
263 | else | |
264 | return NULL; | |
265 | } | |
6ffbb458 | 266 | |
5bf34d7c MWO |
267 | /** |
268 | * folio_test_pmd_mappable - Can we map this folio with a PMD? | |
269 | * @folio: The folio to test | |
270 | */ | |
271 | static inline bool folio_test_pmd_mappable(struct folio *folio) | |
272 | { | |
273 | return folio_order(folio) >= HPAGE_PMD_ORDER; | |
274 | } | |
275 | ||
a00cc7d9 | 276 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
df06b37f | 277 | pmd_t *pmd, int flags, struct dev_pagemap **pgmap); |
a00cc7d9 | 278 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, |
df06b37f | 279 | pud_t *pud, int flags, struct dev_pagemap **pgmap); |
a00cc7d9 | 280 | |
5db4f15c | 281 | vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); |
d10e63f2 | 282 | |
56873f43 | 283 | extern struct page *huge_zero_page; |
3b77e8c8 | 284 | extern unsigned long huge_zero_pfn; |
56873f43 WY |
285 | |
286 | static inline bool is_huge_zero_page(struct page *page) | |
287 | { | |
6aa7de05 | 288 | return READ_ONCE(huge_zero_page) == page; |
56873f43 WY |
289 | } |
290 | ||
fc437044 MW |
291 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
292 | { | |
3b77e8c8 | 293 | return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd); |
fc437044 MW |
294 | } |
295 | ||
a00cc7d9 MW |
296 | static inline bool is_huge_zero_pud(pud_t pud) |
297 | { | |
298 | return false; | |
299 | } | |
300 | ||
6fcb52a5 AL |
301 | struct page *mm_get_huge_zero_page(struct mm_struct *mm); |
302 | void mm_put_huge_zero_page(struct mm_struct *mm); | |
fc437044 | 303 | |
10102459 KS |
304 | #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) |
305 | ||
9c670ea3 NH |
306 | static inline bool thp_migration_supported(void) |
307 | { | |
308 | return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); | |
309 | } | |
310 | ||
87eaceb3 YS |
311 | static inline struct list_head *page_deferred_list(struct page *page) |
312 | { | |
313 | /* | |
314 | * Global or memcg deferred list in the second tail pages is | |
315 | * occupied by compound_head. | |
316 | */ | |
317 | return &page[2].deferred_list; | |
318 | } | |
319 | ||
71e3aac0 | 320 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
d8c37c48 NH |
321 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) |
322 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) | |
323 | #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; }) | |
71e3aac0 | 324 | |
a00cc7d9 MW |
325 | #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) |
326 | #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) | |
327 | #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) | |
328 | ||
5bf34d7c MWO |
329 | static inline bool folio_test_pmd_mappable(struct folio *folio) |
330 | { | |
331 | return false; | |
332 | } | |
333 | ||
7635d9cb MH |
334 | static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) |
335 | { | |
336 | return false; | |
337 | } | |
338 | ||
e6be37b2 | 339 | static inline bool transparent_hugepage_active(struct vm_area_struct *vma) |
16981d76 DW |
340 | { |
341 | return false; | |
342 | } | |
71e3aac0 | 343 | |
43675e6f YS |
344 | static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, |
345 | unsigned long haddr) | |
346 | { | |
347 | return false; | |
348 | } | |
349 | ||
e6be37b2 ML |
350 | static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, |
351 | unsigned long vm_flags) | |
352 | { | |
353 | return false; | |
354 | } | |
355 | ||
800d8c63 KS |
356 | static inline void prep_transhuge_page(struct page *page) {} |
357 | ||
71e3aac0 | 358 | #define transparent_hugepage_flags 0UL |
74d2fad1 TK |
359 | |
360 | #define thp_get_unmapped_area NULL | |
361 | ||
b8f593cd | 362 | static inline bool |
d4b4084a | 363 | can_split_folio(struct folio *folio, int *pextra_pins) |
b8f593cd | 364 | { |
b8f593cd HY |
365 | return false; |
366 | } | |
5bc7b8ac SL |
367 | static inline int |
368 | split_huge_page_to_list(struct page *page, struct list_head *list) | |
369 | { | |
370 | return 0; | |
371 | } | |
71e3aac0 AA |
372 | static inline int split_huge_page(struct page *page) |
373 | { | |
374 | return 0; | |
375 | } | |
9a982250 | 376 | static inline void deferred_split_huge_page(struct page *page) {} |
78ddc534 | 377 | #define split_huge_pmd(__vma, __pmd, __address) \ |
e180377f | 378 | do { } while (0) |
2a52bcbc | 379 | |
fd60775a | 380 | static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
af28a988 | 381 | unsigned long address, bool freeze, struct folio *folio) {} |
2a52bcbc | 382 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, |
af28a988 | 383 | unsigned long address, bool freeze, struct folio *folio) {} |
2a52bcbc | 384 | |
a00cc7d9 MW |
385 | #define split_huge_pud(__vma, __pmd, __address) \ |
386 | do { } while (0) | |
387 | ||
60ab3244 AA |
388 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
389 | unsigned long *vm_flags, int advice) | |
0af4e98b AA |
390 | { |
391 | BUG(); | |
392 | return 0; | |
393 | } | |
94fcc585 AA |
394 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, |
395 | unsigned long start, | |
396 | unsigned long end, | |
397 | long adjust_next) | |
398 | { | |
399 | } | |
84c3fc4e ZY |
400 | static inline int is_swap_pmd(pmd_t pmd) |
401 | { | |
402 | return 0; | |
403 | } | |
b6ec57f4 KS |
404 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
405 | struct vm_area_struct *vma) | |
025c5b24 | 406 | { |
b6ec57f4 | 407 | return NULL; |
025c5b24 | 408 | } |
a00cc7d9 MW |
409 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
410 | struct vm_area_struct *vma) | |
411 | { | |
412 | return NULL; | |
413 | } | |
d10e63f2 | 414 | |
5db4f15c | 415 | static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) |
d10e63f2 | 416 | { |
4daae3b4 | 417 | return 0; |
d10e63f2 MG |
418 | } |
419 | ||
56873f43 WY |
420 | static inline bool is_huge_zero_page(struct page *page) |
421 | { | |
422 | return false; | |
423 | } | |
424 | ||
3b77e8c8 HD |
425 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
426 | { | |
427 | return false; | |
428 | } | |
429 | ||
a00cc7d9 MW |
430 | static inline bool is_huge_zero_pud(pud_t pud) |
431 | { | |
432 | return false; | |
433 | } | |
434 | ||
6fcb52a5 | 435 | static inline void mm_put_huge_zero_page(struct mm_struct *mm) |
aa88b68c | 436 | { |
6fcb52a5 | 437 | return; |
aa88b68c | 438 | } |
3565fce3 DW |
439 | |
440 | static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, | |
df06b37f | 441 | unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) |
3565fce3 DW |
442 | { |
443 | return NULL; | |
444 | } | |
a00cc7d9 MW |
445 | |
446 | static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, | |
df06b37f | 447 | unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) |
a00cc7d9 MW |
448 | { |
449 | return NULL; | |
450 | } | |
9c670ea3 NH |
451 | |
452 | static inline bool thp_migration_supported(void) | |
453 | { | |
454 | return false; | |
455 | } | |
71e3aac0 AA |
456 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
457 | ||
346cf613 MWO |
458 | static inline int split_folio_to_list(struct folio *folio, |
459 | struct list_head *list) | |
460 | { | |
461 | return split_huge_page_to_list(&folio->page, list); | |
462 | } | |
463 | ||
71e3aac0 | 464 | #endif /* _LINUX_HUGE_MM_H */ |