Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
71e3aac0 AA |
2 | #ifndef _LINUX_HUGE_MM_H |
3 | #define _LINUX_HUGE_MM_H | |
4 | ||
16981d76 | 5 | #include <linux/sched/coredump.h> |
226ab561 | 6 | #include <linux/mm_types.h> |
16981d76 | 7 | |
baabda26 | 8 | #include <linux/fs.h> /* only for vma_is_dax() */ |
4b989955 | 9 | #include <linux/kobject.h> |
baabda26 | 10 | |
ebfe1b8f RC |
11 | vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); |
12 | int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |
13 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, | |
8f34f1ea | 14 | struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); |
5db4f15c | 15 | void huge_pmd_set_accessed(struct vm_fault *vmf); |
ebfe1b8f RC |
16 | int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
17 | pud_t *dst_pud, pud_t *src_pud, unsigned long addr, | |
18 | struct vm_area_struct *vma); | |
a00cc7d9 MW |
19 | |
20 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
ebfe1b8f | 21 | void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); |
a00cc7d9 MW |
22 | #else |
23 | static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) | |
24 | { | |
25 | } | |
26 | #endif | |
27 | ||
5db4f15c | 28 | vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); |
ebfe1b8f RC |
29 | bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
30 | pmd_t *pmd, unsigned long addr, unsigned long next); | |
31 | int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, | |
32 | unsigned long addr); | |
33 | int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, | |
34 | unsigned long addr); | |
35 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |
36 | unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); | |
4a18419f NA |
37 | int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
38 | pmd_t *pmd, unsigned long addr, pgprot_t newprot, | |
39 | unsigned long cp_flags); | |
9a9731b1 | 40 | |
7b806d22 LS |
41 | vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); |
42 | vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); | |
9a9731b1 | 43 | |
71e3aac0 | 44 | enum transparent_hugepage_flag { |
3c556d24 | 45 | TRANSPARENT_HUGEPAGE_UNSUPPORTED, |
71e3aac0 AA |
46 | TRANSPARENT_HUGEPAGE_FLAG, |
47 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, | |
444eb2a4 MG |
48 | TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
49 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, | |
21440d7e | 50 | TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, |
71e3aac0 | 51 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, |
ba76149f | 52 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, |
79da5407 | 53 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG, |
71e3aac0 AA |
54 | }; |
55 | ||
b46e756f KS |
56 | struct kobject; |
57 | struct kobj_attribute; | |
58 | ||
ebfe1b8f RC |
59 | ssize_t single_hugepage_flag_store(struct kobject *kobj, |
60 | struct kobj_attribute *attr, | |
61 | const char *buf, size_t count, | |
62 | enum transparent_hugepage_flag flag); | |
63 | ssize_t single_hugepage_flag_show(struct kobject *kobj, | |
64 | struct kobj_attribute *attr, char *buf, | |
65 | enum transparent_hugepage_flag flag); | |
5a6e75f8 | 66 | extern struct kobj_attribute shmem_enabled_attr; |
4b989955 | 67 | extern struct kobj_attribute thpsize_shmem_enabled_attr; |
5a6e75f8 | 68 | |
3485b883 | 69 | /* |
19eaf449 RR |
70 | * Mask of all large folio orders supported for anonymous THP; all orders up to |
71 | * and including PMD_ORDER, except order-0 (which is not "huge") and order-1 | |
72 | * (which is a limitation of the THP implementation). | |
3485b883 | 73 | */ |
19eaf449 | 74 | #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1))) |
3485b883 RR |
75 | |
76 | /* | |
d659b715 GS |
77 | * Mask of all large folio orders supported for file THP. Folios in a DAX |
78 | * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to | |
5dd40721 | 79 | * it. Same to PFNMAPs where there's neither page* nor pagecache. |
3485b883 | 80 | */ |
5dd40721 | 81 | #define THP_ORDERS_ALL_SPECIAL \ |
d659b715 GS |
82 | (BIT(PMD_ORDER) | BIT(PUD_ORDER)) |
83 | #define THP_ORDERS_ALL_FILE_DEFAULT \ | |
84 | ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0)) | |
3485b883 RR |
85 | |
86 | /* | |
87 | * Mask of all large folio orders supported for THP. | |
88 | */ | |
d659b715 | 89 | #define THP_ORDERS_ALL \ |
5dd40721 | 90 | (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT) |
3485b883 | 91 | |
e0ffb29b MW |
92 | #define TVA_SMAPS (1 << 0) /* Will be used for procfs */ |
93 | #define TVA_IN_PF (1 << 1) /* Page fault handler */ | |
94 | #define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */ | |
95 | ||
96 | #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \ | |
97 | (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order))) | |
3485b883 | 98 | |
e220917f LC |
99 | #define split_folio(f) split_folio_to_list(f, NULL) |
100 | ||
b979db16 | 101 | #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES |
fde52796 | 102 | #define HPAGE_PMD_SHIFT PMD_SHIFT |
b979db16 PX |
103 | #define HPAGE_PUD_SHIFT PUD_SHIFT |
104 | #else | |
105 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) | |
106 | #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; }) | |
107 | #endif | |
108 | ||
109 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) | |
110 | #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) | |
fde52796 | 111 | #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1)) |
b979db16 | 112 | #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT) |
71e3aac0 | 113 | |
b979db16 PX |
114 | #define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT) |
115 | #define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER) | |
a00cc7d9 | 116 | #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) |
b979db16 PX |
117 | #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) |
118 | ||
246d3aa3 RR |
119 | enum mthp_stat_item { |
120 | MTHP_STAT_ANON_FAULT_ALLOC, | |
121 | MTHP_STAT_ANON_FAULT_FALLBACK, | |
122 | MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, | |
123 | MTHP_STAT_SWPOUT, | |
124 | MTHP_STAT_SWPOUT_FALLBACK, | |
125 | MTHP_STAT_SHMEM_ALLOC, | |
126 | MTHP_STAT_SHMEM_FALLBACK, | |
127 | MTHP_STAT_SHMEM_FALLBACK_CHARGE, | |
128 | MTHP_STAT_SPLIT, | |
129 | MTHP_STAT_SPLIT_FAILED, | |
130 | MTHP_STAT_SPLIT_DEFERRED, | |
5d65c8d7 | 131 | MTHP_STAT_NR_ANON, |
8175ebfd | 132 | MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, |
246d3aa3 RR |
133 | __MTHP_STAT_COUNT |
134 | }; | |
135 | ||
136 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) | |
137 | struct mthp_stat { | |
138 | unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; | |
139 | }; | |
140 | ||
141 | DECLARE_PER_CPU(struct mthp_stat, mthp_stats); | |
142 | ||
5d65c8d7 | 143 | static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta) |
246d3aa3 RR |
144 | { |
145 | if (order <= 0 || order > PMD_ORDER) | |
146 | return; | |
147 | ||
5d65c8d7 BS |
148 | this_cpu_add(mthp_stats.stats[order][item], delta); |
149 | } | |
150 | ||
151 | static inline void count_mthp_stat(int order, enum mthp_stat_item item) | |
152 | { | |
153 | mod_mthp_stat(order, item, 1); | |
246d3aa3 | 154 | } |
5d65c8d7 | 155 | |
246d3aa3 | 156 | #else |
5d65c8d7 BS |
157 | static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta) |
158 | { | |
159 | } | |
160 | ||
246d3aa3 RR |
161 | static inline void count_mthp_stat(int order, enum mthp_stat_item item) |
162 | { | |
163 | } | |
164 | #endif | |
165 | ||
b979db16 | 166 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
a00cc7d9 | 167 | |
16981d76 | 168 | extern unsigned long transparent_hugepage_flags; |
3485b883 RR |
169 | extern unsigned long huge_anon_orders_always; |
170 | extern unsigned long huge_anon_orders_madvise; | |
171 | extern unsigned long huge_anon_orders_inherit; | |
172 | ||
173 | static inline bool hugepage_global_enabled(void) | |
174 | { | |
175 | return transparent_hugepage_flags & | |
176 | ((1<<TRANSPARENT_HUGEPAGE_FLAG) | | |
177 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)); | |
178 | } | |
179 | ||
180 | static inline bool hugepage_global_always(void) | |
181 | { | |
182 | return transparent_hugepage_flags & | |
183 | (1<<TRANSPARENT_HUGEPAGE_FLAG); | |
184 | } | |
185 | ||
3485b883 RR |
186 | static inline int highest_order(unsigned long orders) |
187 | { | |
188 | return fls_long(orders) - 1; | |
189 | } | |
16981d76 | 190 | |
3485b883 RR |
191 | static inline int next_order(unsigned long *orders, int prev) |
192 | { | |
193 | *orders &= ~BIT(prev); | |
194 | return highest_order(*orders); | |
195 | } | |
1064026b | 196 | |
4fa6893f YS |
197 | /* |
198 | * Do the below checks: | |
199 | * - For file vma, check if the linear page offset of vma is | |
3485b883 RR |
200 | * order-aligned within the file. The hugepage is |
201 | * guaranteed to be order-aligned within the file, but we must | |
202 | * check that the order-aligned addresses in the VMA map to | |
203 | * order-aligned offsets within the file, else the hugepage will | |
204 | * not be mappable. | |
205 | * - For all vmas, check if the haddr is in an aligned hugepage | |
4fa6893f YS |
206 | * area. |
207 | */ | |
3485b883 RR |
208 | static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, |
209 | unsigned long addr, int order) | |
e6be37b2 | 210 | { |
3485b883 | 211 | unsigned long hpage_size = PAGE_SIZE << order; |
c453d8c7 YS |
212 | unsigned long haddr; |
213 | ||
e6be37b2 ML |
214 | /* Don't have to check pgoff for anonymous vma */ |
215 | if (!vma_is_anonymous(vma)) { | |
216 | if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, | |
3485b883 | 217 | hpage_size >> PAGE_SHIFT)) |
e6be37b2 ML |
218 | return false; |
219 | } | |
220 | ||
3485b883 | 221 | haddr = ALIGN_DOWN(addr, hpage_size); |
e6be37b2 | 222 | |
3485b883 | 223 | if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end) |
e6be37b2 ML |
224 | return false; |
225 | return true; | |
226 | } | |
227 | ||
3485b883 RR |
228 | /* |
229 | * Filter the bitfield of input orders to the ones suitable for use in the vma. | |
230 | * See thp_vma_suitable_order(). | |
231 | * All orders that pass the checks are returned as a bitfield. | |
232 | */ | |
233 | static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, | |
234 | unsigned long addr, unsigned long orders) | |
235 | { | |
236 | int order; | |
237 | ||
238 | /* | |
239 | * Iterate over orders, highest to lowest, removing orders that don't | |
240 | * meet alignment requirements from the set. Exit loop at first order | |
241 | * that meets requirements, since all lower orders must also meet | |
242 | * requirements. | |
243 | */ | |
244 | ||
245 | order = highest_order(orders); | |
246 | ||
247 | while (orders) { | |
248 | if (thp_vma_suitable_order(vma, addr, order)) | |
249 | break; | |
250 | order = next_order(&orders, order); | |
251 | } | |
252 | ||
253 | return orders; | |
254 | } | |
255 | ||
78d12c19 YS |
256 | static inline bool file_thp_enabled(struct vm_area_struct *vma) |
257 | { | |
258 | struct inode *inode; | |
259 | ||
260 | if (!vma->vm_file) | |
261 | return false; | |
262 | ||
263 | inode = vma->vm_file->f_inode; | |
264 | ||
265 | return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) && | |
78d12c19 YS |
266 | !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); |
267 | } | |
268 | ||
3485b883 | 269 | unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, |
e0ffb29b MW |
270 | unsigned long vm_flags, |
271 | unsigned long tva_flags, | |
3485b883 RR |
272 | unsigned long orders); |
273 | ||
274 | /** | |
275 | * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma | |
276 | * @vma: the vm area to check | |
277 | * @vm_flags: use these vm_flags instead of vma->vm_flags | |
e0ffb29b | 278 | * @tva_flags: Which TVA flags to honour |
3485b883 RR |
279 | * @orders: bitfield of all orders to consider |
280 | * | |
281 | * Calculates the intersection of the requested hugepage orders and the allowed | |
282 | * hugepage orders for the provided vma. Permitted orders are encoded as a set | |
283 | * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3 | |
284 | * corresponds to order-3, etc). Order-0 is never considered a hugepage order. | |
285 | * | |
286 | * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage | |
287 | * orders are allowed. | |
288 | */ | |
289 | static inline | |
290 | unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, | |
e0ffb29b MW |
291 | unsigned long vm_flags, |
292 | unsigned long tva_flags, | |
3485b883 RR |
293 | unsigned long orders) |
294 | { | |
295 | /* Optimization to check if required orders are enabled early. */ | |
e0ffb29b | 296 | if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) { |
3485b883 RR |
297 | unsigned long mask = READ_ONCE(huge_anon_orders_always); |
298 | ||
299 | if (vm_flags & VM_HUGEPAGE) | |
300 | mask |= READ_ONCE(huge_anon_orders_madvise); | |
301 | if (hugepage_global_always() || | |
302 | ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled())) | |
303 | mask |= READ_ONCE(huge_anon_orders_inherit); | |
304 | ||
305 | orders &= mask; | |
306 | if (!orders) | |
307 | return 0; | |
308 | } | |
309 | ||
e0ffb29b | 310 | return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders); |
3485b883 | 311 | } |
43675e6f | 312 | |
4b989955 BW |
313 | struct thpsize { |
314 | struct kobject kobj; | |
315 | struct list_head node; | |
316 | int order; | |
317 | }; | |
318 | ||
319 | #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) | |
320 | ||
79da5407 KS |
321 | #define transparent_hugepage_use_zero_page() \ |
322 | (transparent_hugepage_flags & \ | |
323 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) | |
71e3aac0 | 324 | |
ebfe1b8f RC |
325 | unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, |
326 | unsigned long len, unsigned long pgoff, unsigned long flags); | |
ed48e87c RE |
327 | unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, |
328 | unsigned long len, unsigned long pgoff, unsigned long flags, | |
329 | vm_flags_t vm_flags); | |
74d2fad1 | 330 | |
8710f6ed | 331 | bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins); |
c010d47f ZY |
332 | int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
333 | unsigned int new_order); | |
e220917f LC |
334 | int min_order_for_split(struct folio *folio); |
335 | int split_folio_to_list(struct folio *folio, struct list_head *list); | |
e9b61f19 KS |
336 | static inline int split_huge_page(struct page *page) |
337 | { | |
e220917f LC |
338 | struct folio *folio = page_folio(page); |
339 | int ret = min_order_for_split(folio); | |
340 | ||
341 | if (ret < 0) | |
342 | return ret; | |
343 | ||
344 | /* | |
345 | * split_huge_page() locks the page before splitting and | |
346 | * expects the same page that has been split to be locked when | |
347 | * returned. split_folio(page_folio(page)) cannot be used here | |
348 | * because it converts the page to folio and passes the head | |
349 | * page to be split. | |
350 | */ | |
351 | return split_huge_page_to_list_to_order(page, NULL, ret); | |
e9b61f19 | 352 | } |
8422acdc | 353 | void deferred_split_folio(struct folio *folio, bool partially_mapped); |
eef1b3ba KS |
354 | |
355 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |
af28a988 | 356 | unsigned long address, bool freeze, struct folio *folio); |
eef1b3ba KS |
357 | |
358 | #define split_huge_pmd(__vma, __pmd, __address) \ | |
359 | do { \ | |
360 | pmd_t *____pmd = (__pmd); \ | |
84c3fc4e | 361 | if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ |
5c7fb56e | 362 | || pmd_devmap(*____pmd)) \ |
fec89c10 | 363 | __split_huge_pmd(__vma, __pmd, __address, \ |
33f4751e | 364 | false, NULL); \ |
eef1b3ba | 365 | } while (0) |
ad0bed24 | 366 | |
2a52bcbc | 367 | |
fec89c10 | 368 | void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, |
af28a988 | 369 | bool freeze, struct folio *folio); |
2a52bcbc | 370 | |
a00cc7d9 MW |
371 | void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, |
372 | unsigned long address); | |
373 | ||
cb0f01be PX |
374 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
375 | int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, | |
376 | pud_t *pudp, unsigned long addr, pgprot_t newprot, | |
377 | unsigned long cp_flags); | |
378 | #else | |
379 | static inline int | |
380 | change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, | |
381 | pud_t *pudp, unsigned long addr, pgprot_t newprot, | |
382 | unsigned long cp_flags) { return 0; } | |
383 | #endif | |
384 | ||
a00cc7d9 MW |
385 | #define split_huge_pud(__vma, __pud, __address) \ |
386 | do { \ | |
387 | pud_t *____pud = (__pud); \ | |
388 | if (pud_trans_huge(*____pud) \ | |
389 | || pud_devmap(*____pud)) \ | |
390 | __split_huge_pud(__vma, __pud, __address); \ | |
391 | } while (0) | |
392 | ||
ebfe1b8f RC |
393 | int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, |
394 | int advice); | |
7d8faaf1 ZK |
395 | int madvise_collapse(struct vm_area_struct *vma, |
396 | struct vm_area_struct **prev, | |
397 | unsigned long start, unsigned long end); | |
ebfe1b8f RC |
398 | void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, |
399 | unsigned long end, long adjust_next); | |
400 | spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); | |
401 | spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); | |
84c3fc4e ZY |
402 | |
403 | static inline int is_swap_pmd(pmd_t pmd) | |
404 | { | |
405 | return !pmd_none(pmd) && !pmd_present(pmd); | |
406 | } | |
407 | ||
c1e8d7c6 | 408 | /* mmap_lock must be held on entry */ |
b6ec57f4 KS |
409 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
410 | struct vm_area_struct *vma) | |
025c5b24 | 411 | { |
84c3fc4e | 412 | if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) |
b6ec57f4 | 413 | return __pmd_trans_huge_lock(pmd, vma); |
025c5b24 | 414 | else |
969e8d7e | 415 | return NULL; |
025c5b24 | 416 | } |
a00cc7d9 MW |
417 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
418 | struct vm_area_struct *vma) | |
419 | { | |
a00cc7d9 MW |
420 | if (pud_trans_huge(*pud) || pud_devmap(*pud)) |
421 | return __pud_trans_huge_lock(pud, vma); | |
422 | else | |
423 | return NULL; | |
424 | } | |
6ffbb458 | 425 | |
5bf34d7c MWO |
426 | /** |
427 | * folio_test_pmd_mappable - Can we map this folio with a PMD? | |
428 | * @folio: The folio to test | |
429 | */ | |
430 | static inline bool folio_test_pmd_mappable(struct folio *folio) | |
431 | { | |
432 | return folio_order(folio) >= HPAGE_PMD_ORDER; | |
433 | } | |
434 | ||
a00cc7d9 | 435 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
df06b37f | 436 | pmd_t *pmd, int flags, struct dev_pagemap **pgmap); |
a00cc7d9 | 437 | |
5db4f15c | 438 | vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); |
d10e63f2 | 439 | |
5691753d | 440 | extern struct folio *huge_zero_folio; |
3b77e8c8 | 441 | extern unsigned long huge_zero_pfn; |
56873f43 | 442 | |
5beaee54 MWO |
443 | static inline bool is_huge_zero_folio(const struct folio *folio) |
444 | { | |
5691753d | 445 | return READ_ONCE(huge_zero_folio) == folio; |
5beaee54 MWO |
446 | } |
447 | ||
fc437044 MW |
448 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
449 | { | |
3ce4fee4 | 450 | return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); |
fc437044 MW |
451 | } |
452 | ||
5691753d | 453 | struct folio *mm_get_huge_zero_folio(struct mm_struct *mm); |
632230ff | 454 | void mm_put_huge_zero_folio(struct mm_struct *mm); |
5691753d | 455 | |
10102459 KS |
456 | #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) |
457 | ||
9c670ea3 NH |
458 | static inline bool thp_migration_supported(void) |
459 | { | |
460 | return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); | |
461 | } | |
462 | ||
29e847d2 LY |
463 | void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address, |
464 | pmd_t *pmd, bool freeze, struct folio *folio); | |
735ecdfa LY |
465 | bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr, |
466 | pmd_t *pmdp, struct folio *folio); | |
29e847d2 | 467 | |
71e3aac0 | 468 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
a00cc7d9 | 469 | |
5bf34d7c MWO |
470 | static inline bool folio_test_pmd_mappable(struct folio *folio) |
471 | { | |
472 | return false; | |
473 | } | |
474 | ||
3485b883 RR |
475 | static inline bool thp_vma_suitable_order(struct vm_area_struct *vma, |
476 | unsigned long addr, int order) | |
43675e6f YS |
477 | { |
478 | return false; | |
479 | } | |
480 | ||
3485b883 RR |
481 | static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, |
482 | unsigned long addr, unsigned long orders) | |
e6be37b2 | 483 | { |
3485b883 RR |
484 | return 0; |
485 | } | |
486 | ||
487 | static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, | |
e0ffb29b MW |
488 | unsigned long vm_flags, |
489 | unsigned long tva_flags, | |
3485b883 RR |
490 | unsigned long orders) |
491 | { | |
492 | return 0; | |
e6be37b2 ML |
493 | } |
494 | ||
71e3aac0 | 495 | #define transparent_hugepage_flags 0UL |
74d2fad1 TK |
496 | |
497 | #define thp_get_unmapped_area NULL | |
498 | ||
ed48e87c RE |
499 | static inline unsigned long |
500 | thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, | |
501 | unsigned long len, unsigned long pgoff, | |
502 | unsigned long flags, vm_flags_t vm_flags) | |
503 | { | |
504 | return 0; | |
505 | } | |
506 | ||
b8f593cd | 507 | static inline bool |
8710f6ed | 508 | can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) |
b8f593cd | 509 | { |
b8f593cd HY |
510 | return false; |
511 | } | |
5bc7b8ac | 512 | static inline int |
c010d47f ZY |
513 | split_huge_page_to_list_to_order(struct page *page, struct list_head *list, |
514 | unsigned int new_order) | |
5bc7b8ac SL |
515 | { |
516 | return 0; | |
517 | } | |
71e3aac0 AA |
518 | static inline int split_huge_page(struct page *page) |
519 | { | |
520 | return 0; | |
521 | } | |
e220917f LC |
522 | |
523 | static inline int split_folio_to_list(struct folio *folio, struct list_head *list) | |
524 | { | |
525 | return 0; | |
526 | } | |
527 | ||
8422acdc | 528 | static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {} |
78ddc534 | 529 | #define split_huge_pmd(__vma, __pmd, __address) \ |
e180377f | 530 | do { } while (0) |
2a52bcbc | 531 | |
fd60775a | 532 | static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
af28a988 | 533 | unsigned long address, bool freeze, struct folio *folio) {} |
2a52bcbc | 534 | static inline void split_huge_pmd_address(struct vm_area_struct *vma, |
af28a988 | 535 | unsigned long address, bool freeze, struct folio *folio) {} |
29e847d2 LY |
536 | static inline void split_huge_pmd_locked(struct vm_area_struct *vma, |
537 | unsigned long address, pmd_t *pmd, | |
538 | bool freeze, struct folio *folio) {} | |
2a52bcbc | 539 | |
735ecdfa LY |
540 | static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma, |
541 | unsigned long addr, pmd_t *pmdp, | |
542 | struct folio *folio) | |
543 | { | |
544 | return false; | |
545 | } | |
546 | ||
a00cc7d9 MW |
547 | #define split_huge_pud(__vma, __pmd, __address) \ |
548 | do { } while (0) | |
549 | ||
60ab3244 AA |
550 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
551 | unsigned long *vm_flags, int advice) | |
0af4e98b | 552 | { |
7d8faaf1 | 553 | return -EINVAL; |
0af4e98b | 554 | } |
7d8faaf1 ZK |
555 | |
556 | static inline int madvise_collapse(struct vm_area_struct *vma, | |
557 | struct vm_area_struct **prev, | |
558 | unsigned long start, unsigned long end) | |
559 | { | |
560 | return -EINVAL; | |
561 | } | |
562 | ||
94fcc585 AA |
563 | static inline void vma_adjust_trans_huge(struct vm_area_struct *vma, |
564 | unsigned long start, | |
565 | unsigned long end, | |
566 | long adjust_next) | |
567 | { | |
568 | } | |
84c3fc4e ZY |
569 | static inline int is_swap_pmd(pmd_t pmd) |
570 | { | |
571 | return 0; | |
572 | } | |
b6ec57f4 KS |
573 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
574 | struct vm_area_struct *vma) | |
025c5b24 | 575 | { |
b6ec57f4 | 576 | return NULL; |
025c5b24 | 577 | } |
a00cc7d9 MW |
578 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
579 | struct vm_area_struct *vma) | |
580 | { | |
581 | return NULL; | |
582 | } | |
d10e63f2 | 583 | |
5db4f15c | 584 | static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) |
d10e63f2 | 585 | { |
4daae3b4 | 586 | return 0; |
d10e63f2 MG |
587 | } |
588 | ||
5beaee54 MWO |
589 | static inline bool is_huge_zero_folio(const struct folio *folio) |
590 | { | |
591 | return false; | |
592 | } | |
593 | ||
3b77e8c8 HD |
594 | static inline bool is_huge_zero_pmd(pmd_t pmd) |
595 | { | |
596 | return false; | |
597 | } | |
598 | ||
632230ff | 599 | static inline void mm_put_huge_zero_folio(struct mm_struct *mm) |
aa88b68c | 600 | { |
6fcb52a5 | 601 | return; |
aa88b68c | 602 | } |
3565fce3 DW |
603 | |
604 | static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, | |
df06b37f | 605 | unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) |
3565fce3 DW |
606 | { |
607 | return NULL; | |
608 | } | |
a00cc7d9 | 609 | |
9c670ea3 NH |
610 | static inline bool thp_migration_supported(void) |
611 | { | |
612 | return false; | |
613 | } | |
e7a2ab7b BW |
614 | |
615 | static inline int highest_order(unsigned long orders) | |
616 | { | |
617 | return 0; | |
618 | } | |
619 | ||
620 | static inline int next_order(unsigned long *orders, int prev) | |
621 | { | |
622 | return 0; | |
623 | } | |
cb0f01be PX |
624 | |
625 | static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, | |
626 | unsigned long address) | |
627 | { | |
628 | } | |
629 | ||
630 | static inline int change_huge_pud(struct mmu_gather *tlb, | |
631 | struct vm_area_struct *vma, pud_t *pudp, | |
632 | unsigned long addr, pgprot_t newprot, | |
633 | unsigned long cp_flags) | |
634 | { | |
635 | return 0; | |
636 | } | |
71e3aac0 AA |
637 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
638 | ||
c010d47f ZY |
639 | static inline int split_folio_to_list_to_order(struct folio *folio, |
640 | struct list_head *list, int new_order) | |
346cf613 | 641 | { |
c010d47f | 642 | return split_huge_page_to_list_to_order(&folio->page, list, new_order); |
346cf613 MWO |
643 | } |
644 | ||
c010d47f | 645 | static inline int split_folio_to_order(struct folio *folio, int new_order) |
d788f5b3 | 646 | { |
c010d47f | 647 | return split_folio_to_list_to_order(folio, NULL, new_order); |
d788f5b3 MWO |
648 | } |
649 | ||
71e3aac0 | 650 | #endif /* _LINUX_HUGE_MM_H */ |