mm/hugetlb: rename dissolve_free_huge_pages() to dissolve_free_hugetlb_folios()
[linux-2.6-block.git] / mm / hugetlb.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Generic hugetlb support.
6d49e352 4 * (C) Nadia Yvette Chambers, April 2004
1da177e4 5 */
1da177e4
LT
6#include <linux/list.h>
7#include <linux/init.h>
1da177e4 8#include <linux/mm.h>
e1759c21 9#include <linux/seq_file.h>
1da177e4
LT
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
cddb8a5c 12#include <linux/mmu_notifier.h>
1da177e4 13#include <linux/nodemask.h>
63551ae0 14#include <linux/pagemap.h>
5da7ca86 15#include <linux/mempolicy.h>
3b32123d 16#include <linux/compiler.h>
aea47ff3 17#include <linux/cpuset.h>
3935baa9 18#include <linux/mutex.h>
97ad1087 19#include <linux/memblock.h>
a3437870 20#include <linux/sysfs.h>
5a0e3ad6 21#include <linux/slab.h>
bbe88753 22#include <linux/sched/mm.h>
63489f8e 23#include <linux/mmdebug.h>
174cd4b1 24#include <linux/sched/signal.h>
0fe6e20b 25#include <linux/rmap.h>
c6247f72 26#include <linux/string_helpers.h>
fd6a03ed
NH
27#include <linux/swap.h>
28#include <linux/swapops.h>
8382d914 29#include <linux/jhash.h>
98fa15f3 30#include <linux/numa.h>
c77c0a8a 31#include <linux/llist.h>
cf11e85f 32#include <linux/cma.h>
8cc5fcbb 33#include <linux/migrate.h>
f9317f77 34#include <linux/nospec.h>
662ce1dc 35#include <linux/delayacct.h>
b958d4d0 36#include <linux/memory.h>
af19487f 37#include <linux/mm_inline.h>
c6c21c31 38#include <linux/padata.h>
d6606683 39
63551ae0 40#include <asm/page.h>
ca15ca40 41#include <asm/pgalloc.h>
24669e58 42#include <asm/tlb.h>
63551ae0 43
24669e58 44#include <linux/io.h>
63551ae0 45#include <linux/hugetlb.h>
9dd540e2 46#include <linux/hugetlb_cgroup.h>
9a305230 47#include <linux/node.h>
ab5ac90a 48#include <linux/page_owner.h>
7835e98b 49#include "internal.h"
f41f2ed4 50#include "hugetlb_vmemmap.h"
1da177e4 51
c3f38a38 52int hugetlb_max_hstate __read_mostly;
e5ff2159
AK
53unsigned int default_hstate_idx;
54struct hstate hstates[HUGE_MAX_HSTATE];
cf11e85f 55
dbda8fea 56#ifdef CONFIG_CMA
cf11e85f 57static struct cma *hugetlb_cma[MAX_NUMNODES];
38e719ab 58static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
2f6c57d6 59static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
a01f4390 60{
2f6c57d6 61 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
a01f4390
MK
62 1 << order);
63}
64#else
2f6c57d6 65static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
a01f4390
MK
66{
67 return false;
68}
dbda8fea
BS
69#endif
70static unsigned long hugetlb_cma_size __initdata;
cf11e85f 71
b78b27d0 72__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
53ba51d2 73
e5ff2159
AK
74/* for command line parsing */
75static struct hstate * __initdata parsed_hstate;
76static unsigned long __initdata default_hstate_max_huge_pages;
9fee021d 77static bool __initdata parsed_valid_hugepagesz = true;
282f4214 78static bool __initdata parsed_default_hugepagesz;
b5389086 79static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
e5ff2159 80
3935baa9 81/*
31caf665
NH
82 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
83 * free_huge_pages, and surplus_huge_pages.
3935baa9 84 */
c3f38a38 85DEFINE_SPINLOCK(hugetlb_lock);
0bd0f9fb 86
8382d914
DB
87/*
88 * Serializes faults on the same logical page. This is used to
89 * prevent spurious OOMs when the hugepage pool is fully utilized.
90 */
91static int num_fault_mutexes;
c672c7f2 92struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
8382d914 93
7ca02d0a
MK
94/* Forward declaration */
95static int hugetlb_acct_memory(struct hstate *h, long delta);
8d9bfb26
MK
96static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
97static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
ecfbd733 98static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
b30c14cd
JH
99static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
100 unsigned long start, unsigned long end);
bf491692 101static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
7ca02d0a 102
1d88433b 103static inline bool subpool_is_free(struct hugepage_subpool *spool)
90481622 104{
1d88433b
ML
105 if (spool->count)
106 return false;
107 if (spool->max_hpages != -1)
108 return spool->used_hpages == 0;
109 if (spool->min_hpages != -1)
110 return spool->rsv_hpages == spool->min_hpages;
111
112 return true;
113}
90481622 114
db71ef79
MK
115static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
116 unsigned long irq_flags)
1d88433b 117{
db71ef79 118 spin_unlock_irqrestore(&spool->lock, irq_flags);
90481622
DG
119
120 /* If no pages are used, and no other handles to the subpool
7c8de358 121 * remain, give up any reservations based on minimum size and
7ca02d0a 122 * free the subpool */
1d88433b 123 if (subpool_is_free(spool)) {
7ca02d0a
MK
124 if (spool->min_hpages != -1)
125 hugetlb_acct_memory(spool->hstate,
126 -spool->min_hpages);
90481622 127 kfree(spool);
7ca02d0a 128 }
90481622
DG
129}
130
7ca02d0a
MK
131struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
132 long min_hpages)
90481622
DG
133{
134 struct hugepage_subpool *spool;
135
c6a91820 136 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
90481622
DG
137 if (!spool)
138 return NULL;
139
140 spin_lock_init(&spool->lock);
141 spool->count = 1;
7ca02d0a
MK
142 spool->max_hpages = max_hpages;
143 spool->hstate = h;
144 spool->min_hpages = min_hpages;
145
146 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
147 kfree(spool);
148 return NULL;
149 }
150 spool->rsv_hpages = min_hpages;
90481622
DG
151
152 return spool;
153}
154
155void hugepage_put_subpool(struct hugepage_subpool *spool)
156{
db71ef79
MK
157 unsigned long flags;
158
159 spin_lock_irqsave(&spool->lock, flags);
90481622
DG
160 BUG_ON(!spool->count);
161 spool->count--;
db71ef79 162 unlock_or_release_subpool(spool, flags);
90481622
DG
163}
164
1c5ecae3
MK
165/*
166 * Subpool accounting for allocating and reserving pages.
167 * Return -ENOMEM if there are not enough resources to satisfy the
9e7ee400 168 * request. Otherwise, return the number of pages by which the
1c5ecae3
MK
169 * global pools must be adjusted (upward). The returned value may
170 * only be different than the passed value (delta) in the case where
7c8de358 171 * a subpool minimum size must be maintained.
1c5ecae3
MK
172 */
173static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
90481622
DG
174 long delta)
175{
1c5ecae3 176 long ret = delta;
90481622
DG
177
178 if (!spool)
1c5ecae3 179 return ret;
90481622 180
db71ef79 181 spin_lock_irq(&spool->lock);
1c5ecae3
MK
182
183 if (spool->max_hpages != -1) { /* maximum size accounting */
184 if ((spool->used_hpages + delta) <= spool->max_hpages)
185 spool->used_hpages += delta;
186 else {
187 ret = -ENOMEM;
188 goto unlock_ret;
189 }
90481622 190 }
90481622 191
09a95e29
MK
192 /* minimum size accounting */
193 if (spool->min_hpages != -1 && spool->rsv_hpages) {
1c5ecae3
MK
194 if (delta > spool->rsv_hpages) {
195 /*
196 * Asking for more reserves than those already taken on
197 * behalf of subpool. Return difference.
198 */
199 ret = delta - spool->rsv_hpages;
200 spool->rsv_hpages = 0;
201 } else {
202 ret = 0; /* reserves already accounted for */
203 spool->rsv_hpages -= delta;
204 }
205 }
206
207unlock_ret:
db71ef79 208 spin_unlock_irq(&spool->lock);
90481622
DG
209 return ret;
210}
211
1c5ecae3
MK
212/*
213 * Subpool accounting for freeing and unreserving pages.
214 * Return the number of global page reservations that must be dropped.
215 * The return value may only be different than the passed value (delta)
216 * in the case where a subpool minimum size must be maintained.
217 */
218static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
90481622
DG
219 long delta)
220{
1c5ecae3 221 long ret = delta;
db71ef79 222 unsigned long flags;
1c5ecae3 223
90481622 224 if (!spool)
1c5ecae3 225 return delta;
90481622 226
db71ef79 227 spin_lock_irqsave(&spool->lock, flags);
1c5ecae3
MK
228
229 if (spool->max_hpages != -1) /* maximum size accounting */
230 spool->used_hpages -= delta;
231
09a95e29
MK
232 /* minimum size accounting */
233 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
1c5ecae3
MK
234 if (spool->rsv_hpages + delta <= spool->min_hpages)
235 ret = 0;
236 else
237 ret = spool->rsv_hpages + delta - spool->min_hpages;
238
239 spool->rsv_hpages += delta;
240 if (spool->rsv_hpages > spool->min_hpages)
241 spool->rsv_hpages = spool->min_hpages;
242 }
243
244 /*
245 * If hugetlbfs_put_super couldn't free spool due to an outstanding
246 * quota reference, free it now.
247 */
db71ef79 248 unlock_or_release_subpool(spool, flags);
1c5ecae3
MK
249
250 return ret;
90481622
DG
251}
252
253static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
254{
255 return HUGETLBFS_SB(inode->i_sb)->spool;
256}
257
258static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
259{
496ad9aa 260 return subpool_inode(file_inode(vma->vm_file));
90481622
DG
261}
262
e700898f
MK
263/*
264 * hugetlb vma_lock helper routines
265 */
e700898f
MK
266void hugetlb_vma_lock_read(struct vm_area_struct *vma)
267{
268 if (__vma_shareable_lock(vma)) {
269 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
270
271 down_read(&vma_lock->rw_sema);
bf491692
RR
272 } else if (__vma_private_lock(vma)) {
273 struct resv_map *resv_map = vma_resv_map(vma);
274
275 down_read(&resv_map->rw_sema);
e700898f
MK
276 }
277}
278
279void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
280{
281 if (__vma_shareable_lock(vma)) {
282 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
283
284 up_read(&vma_lock->rw_sema);
bf491692
RR
285 } else if (__vma_private_lock(vma)) {
286 struct resv_map *resv_map = vma_resv_map(vma);
287
288 up_read(&resv_map->rw_sema);
e700898f
MK
289 }
290}
291
292void hugetlb_vma_lock_write(struct vm_area_struct *vma)
293{
294 if (__vma_shareable_lock(vma)) {
295 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
296
297 down_write(&vma_lock->rw_sema);
bf491692
RR
298 } else if (__vma_private_lock(vma)) {
299 struct resv_map *resv_map = vma_resv_map(vma);
300
301 down_write(&resv_map->rw_sema);
e700898f
MK
302 }
303}
304
305void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
306{
307 if (__vma_shareable_lock(vma)) {
308 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
309
310 up_write(&vma_lock->rw_sema);
bf491692
RR
311 } else if (__vma_private_lock(vma)) {
312 struct resv_map *resv_map = vma_resv_map(vma);
313
314 up_write(&resv_map->rw_sema);
e700898f
MK
315 }
316}
317
318int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
319{
e700898f 320
bf491692
RR
321 if (__vma_shareable_lock(vma)) {
322 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
e700898f 323
bf491692
RR
324 return down_write_trylock(&vma_lock->rw_sema);
325 } else if (__vma_private_lock(vma)) {
326 struct resv_map *resv_map = vma_resv_map(vma);
327
328 return down_write_trylock(&resv_map->rw_sema);
329 }
330
331 return 1;
e700898f
MK
332}
333
334void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
335{
336 if (__vma_shareable_lock(vma)) {
337 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
338
339 lockdep_assert_held(&vma_lock->rw_sema);
bf491692
RR
340 } else if (__vma_private_lock(vma)) {
341 struct resv_map *resv_map = vma_resv_map(vma);
342
343 lockdep_assert_held(&resv_map->rw_sema);
e700898f
MK
344 }
345}
346
347void hugetlb_vma_lock_release(struct kref *kref)
348{
349 struct hugetlb_vma_lock *vma_lock = container_of(kref,
350 struct hugetlb_vma_lock, refs);
351
352 kfree(vma_lock);
353}
354
355static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
356{
357 struct vm_area_struct *vma = vma_lock->vma;
358
359 /*
360 * vma_lock structure may or not be released as a result of put,
361 * it certainly will no longer be attached to vma so clear pointer.
362 * Semaphore synchronizes access to vma_lock->vma field.
363 */
364 vma_lock->vma = NULL;
365 vma->vm_private_data = NULL;
366 up_write(&vma_lock->rw_sema);
367 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
368}
369
370static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
371{
372 if (__vma_shareable_lock(vma)) {
373 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
374
375 __hugetlb_vma_unlock_write_put(vma_lock);
bf491692
RR
376 } else if (__vma_private_lock(vma)) {
377 struct resv_map *resv_map = vma_resv_map(vma);
378
379 /* no free for anon vmas, but still need to unlock */
380 up_write(&resv_map->rw_sema);
e700898f
MK
381 }
382}
383
384static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
385{
386 /*
387 * Only present in sharable vmas.
388 */
389 if (!vma || !__vma_shareable_lock(vma))
390 return;
391
392 if (vma->vm_private_data) {
393 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
394
395 down_write(&vma_lock->rw_sema);
396 __hugetlb_vma_unlock_write_put(vma_lock);
397 }
398}
399
400static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
401{
402 struct hugetlb_vma_lock *vma_lock;
403
404 /* Only establish in (flags) sharable vmas */
405 if (!vma || !(vma->vm_flags & VM_MAYSHARE))
406 return;
407
408 /* Should never get here with non-NULL vm_private_data */
409 if (vma->vm_private_data)
410 return;
411
412 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
413 if (!vma_lock) {
414 /*
415 * If we can not allocate structure, then vma can not
416 * participate in pmd sharing. This is only a possible
417 * performance enhancement and memory saving issue.
418 * However, the lock is also used to synchronize page
419 * faults with truncation. If the lock is not present,
420 * unlikely races could leave pages in a file past i_size
421 * until the file is removed. Warn in the unlikely case of
422 * allocation failure.
423 */
424 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
425 return;
426 }
427
428 kref_init(&vma_lock->refs);
429 init_rwsem(&vma_lock->rw_sema);
430 vma_lock->vma = vma;
431 vma->vm_private_data = vma_lock;
432}
433
0db9d74e
MA
434/* Helper that removes a struct file_region from the resv_map cache and returns
435 * it for use.
436 */
437static struct file_region *
438get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
439{
3259914f 440 struct file_region *nrg;
0db9d74e
MA
441
442 VM_BUG_ON(resv->region_cache_count <= 0);
443
444 resv->region_cache_count--;
445 nrg = list_first_entry(&resv->region_cache, struct file_region, link);
0db9d74e
MA
446 list_del(&nrg->link);
447
448 nrg->from = from;
449 nrg->to = to;
450
451 return nrg;
452}
453
075a61d0
MA
454static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
455 struct file_region *rg)
456{
457#ifdef CONFIG_CGROUP_HUGETLB
458 nrg->reservation_counter = rg->reservation_counter;
459 nrg->css = rg->css;
460 if (rg->css)
461 css_get(rg->css);
462#endif
463}
464
465/* Helper that records hugetlb_cgroup uncharge info. */
466static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
467 struct hstate *h,
468 struct resv_map *resv,
469 struct file_region *nrg)
470{
471#ifdef CONFIG_CGROUP_HUGETLB
472 if (h_cg) {
473 nrg->reservation_counter =
474 &h_cg->rsvd_hugepage[hstate_index(h)];
475 nrg->css = &h_cg->css;
d85aecf2
ML
476 /*
477 * The caller will hold exactly one h_cg->css reference for the
478 * whole contiguous reservation region. But this area might be
479 * scattered when there are already some file_regions reside in
480 * it. As a result, many file_regions may share only one css
481 * reference. In order to ensure that one file_region must hold
482 * exactly one h_cg->css reference, we should do css_get for
483 * each file_region and leave the reference held by caller
484 * untouched.
485 */
486 css_get(&h_cg->css);
075a61d0
MA
487 if (!resv->pages_per_hpage)
488 resv->pages_per_hpage = pages_per_huge_page(h);
489 /* pages_per_hpage should be the same for all entries in
490 * a resv_map.
491 */
492 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
493 } else {
494 nrg->reservation_counter = NULL;
495 nrg->css = NULL;
496 }
497#endif
498}
499
d85aecf2
ML
500static void put_uncharge_info(struct file_region *rg)
501{
502#ifdef CONFIG_CGROUP_HUGETLB
503 if (rg->css)
504 css_put(rg->css);
505#endif
506}
507
a9b3f867
MA
508static bool has_same_uncharge_info(struct file_region *rg,
509 struct file_region *org)
510{
511#ifdef CONFIG_CGROUP_HUGETLB
0739eb43 512 return rg->reservation_counter == org->reservation_counter &&
a9b3f867
MA
513 rg->css == org->css;
514
515#else
516 return true;
517#endif
518}
519
520static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
521{
3259914f 522 struct file_region *nrg, *prg;
a9b3f867
MA
523
524 prg = list_prev_entry(rg, link);
525 if (&prg->link != &resv->regions && prg->to == rg->from &&
526 has_same_uncharge_info(prg, rg)) {
527 prg->to = rg->to;
528
529 list_del(&rg->link);
d85aecf2 530 put_uncharge_info(rg);
a9b3f867
MA
531 kfree(rg);
532
7db5e7b6 533 rg = prg;
a9b3f867
MA
534 }
535
536 nrg = list_next_entry(rg, link);
537 if (&nrg->link != &resv->regions && nrg->from == rg->to &&
538 has_same_uncharge_info(nrg, rg)) {
539 nrg->from = rg->from;
540
541 list_del(&rg->link);
d85aecf2 542 put_uncharge_info(rg);
a9b3f867 543 kfree(rg);
a9b3f867
MA
544 }
545}
546
2103cf9c 547static inline long
84448c8e 548hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
2103cf9c
PX
549 long to, struct hstate *h, struct hugetlb_cgroup *cg,
550 long *regions_needed)
551{
552 struct file_region *nrg;
553
554 if (!regions_needed) {
555 nrg = get_file_region_entry_from_cache(map, from, to);
556 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
84448c8e 557 list_add(&nrg->link, rg);
2103cf9c
PX
558 coalesce_file_region(map, nrg);
559 } else
560 *regions_needed += 1;
561
562 return to - from;
563}
564
972a3da3
WY
565/*
566 * Must be called with resv->lock held.
567 *
568 * Calling this with regions_needed != NULL will count the number of pages
569 * to be added but will not modify the linked list. And regions_needed will
570 * indicate the number of file_regions needed in the cache to carry out to add
571 * the regions for this range.
d75c6af9
MA
572 */
573static long add_reservation_in_range(struct resv_map *resv, long f, long t,
075a61d0 574 struct hugetlb_cgroup *h_cg,
972a3da3 575 struct hstate *h, long *regions_needed)
d75c6af9 576{
0db9d74e 577 long add = 0;
d75c6af9 578 struct list_head *head = &resv->regions;
0db9d74e 579 long last_accounted_offset = f;
84448c8e
JK
580 struct file_region *iter, *trg = NULL;
581 struct list_head *rg = NULL;
d75c6af9 582
0db9d74e
MA
583 if (regions_needed)
584 *regions_needed = 0;
d75c6af9 585
0db9d74e 586 /* In this loop, we essentially handle an entry for the range
84448c8e 587 * [last_accounted_offset, iter->from), at every iteration, with some
0db9d74e
MA
588 * bounds checking.
589 */
84448c8e 590 list_for_each_entry_safe(iter, trg, head, link) {
0db9d74e 591 /* Skip irrelevant regions that start before our range. */
84448c8e 592 if (iter->from < f) {
0db9d74e
MA
593 /* If this region ends after the last accounted offset,
594 * then we need to update last_accounted_offset.
595 */
84448c8e
JK
596 if (iter->to > last_accounted_offset)
597 last_accounted_offset = iter->to;
0db9d74e
MA
598 continue;
599 }
d75c6af9 600
0db9d74e
MA
601 /* When we find a region that starts beyond our range, we've
602 * finished.
603 */
84448c8e
JK
604 if (iter->from >= t) {
605 rg = iter->link.prev;
d75c6af9 606 break;
84448c8e 607 }
d75c6af9 608
84448c8e 609 /* Add an entry for last_accounted_offset -> iter->from, and
0db9d74e
MA
610 * update last_accounted_offset.
611 */
84448c8e
JK
612 if (iter->from > last_accounted_offset)
613 add += hugetlb_resv_map_add(resv, iter->link.prev,
2103cf9c 614 last_accounted_offset,
84448c8e 615 iter->from, h, h_cg,
2103cf9c 616 regions_needed);
0db9d74e 617
84448c8e 618 last_accounted_offset = iter->to;
0db9d74e
MA
619 }
620
621 /* Handle the case where our range extends beyond
622 * last_accounted_offset.
623 */
84448c8e
JK
624 if (!rg)
625 rg = head->prev;
2103cf9c
PX
626 if (last_accounted_offset < t)
627 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
628 t, h, h_cg, regions_needed);
0db9d74e 629
0db9d74e
MA
630 return add;
631}
632
633/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
634 */
635static int allocate_file_region_entries(struct resv_map *resv,
636 int regions_needed)
637 __must_hold(&resv->lock)
638{
34665341 639 LIST_HEAD(allocated_regions);
0db9d74e
MA
640 int to_allocate = 0, i = 0;
641 struct file_region *trg = NULL, *rg = NULL;
642
643 VM_BUG_ON(regions_needed < 0);
644
0db9d74e
MA
645 /*
646 * Check for sufficient descriptors in the cache to accommodate
647 * the number of in progress add operations plus regions_needed.
648 *
649 * This is a while loop because when we drop the lock, some other call
650 * to region_add or region_del may have consumed some region_entries,
651 * so we keep looping here until we finally have enough entries for
652 * (adds_in_progress + regions_needed).
653 */
654 while (resv->region_cache_count <
655 (resv->adds_in_progress + regions_needed)) {
656 to_allocate = resv->adds_in_progress + regions_needed -
657 resv->region_cache_count;
658
659 /* At this point, we should have enough entries in the cache
f0953a1b 660 * for all the existing adds_in_progress. We should only be
0db9d74e 661 * needing to allocate for regions_needed.
d75c6af9 662 */
0db9d74e
MA
663 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
664
665 spin_unlock(&resv->lock);
666 for (i = 0; i < to_allocate; i++) {
667 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
668 if (!trg)
669 goto out_of_memory;
670 list_add(&trg->link, &allocated_regions);
d75c6af9 671 }
d75c6af9 672
0db9d74e
MA
673 spin_lock(&resv->lock);
674
d3ec7b6e
WY
675 list_splice(&allocated_regions, &resv->region_cache);
676 resv->region_cache_count += to_allocate;
d75c6af9
MA
677 }
678
0db9d74e 679 return 0;
d75c6af9 680
0db9d74e
MA
681out_of_memory:
682 list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
683 list_del(&rg->link);
684 kfree(rg);
685 }
686 return -ENOMEM;
d75c6af9
MA
687}
688
1dd308a7
MK
689/*
690 * Add the huge page range represented by [f, t) to the reserve
0db9d74e
MA
691 * map. Regions will be taken from the cache to fill in this range.
692 * Sufficient regions should exist in the cache due to the previous
693 * call to region_chg with the same range, but in some cases the cache will not
694 * have sufficient entries due to races with other code doing region_add or
695 * region_del. The extra needed entries will be allocated.
cf3ad20b 696 *
0db9d74e
MA
697 * regions_needed is the out value provided by a previous call to region_chg.
698 *
699 * Return the number of new huge pages added to the map. This number is greater
700 * than or equal to zero. If file_region entries needed to be allocated for
7c8de358 701 * this operation and we were not able to allocate, it returns -ENOMEM.
0db9d74e
MA
702 * region_add of regions of length 1 never allocate file_regions and cannot
703 * fail; region_chg will always allocate at least 1 entry and a region_add for
704 * 1 page will only require at most 1 entry.
1dd308a7 705 */
0db9d74e 706static long region_add(struct resv_map *resv, long f, long t,
075a61d0
MA
707 long in_regions_needed, struct hstate *h,
708 struct hugetlb_cgroup *h_cg)
96822904 709{
0db9d74e 710 long add = 0, actual_regions_needed = 0;
96822904 711
7b24d861 712 spin_lock(&resv->lock);
0db9d74e
MA
713retry:
714
715 /* Count how many regions are actually needed to execute this add. */
972a3da3
WY
716 add_reservation_in_range(resv, f, t, NULL, NULL,
717 &actual_regions_needed);
96822904 718
5e911373 719 /*
0db9d74e
MA
720 * Check for sufficient descriptors in the cache to accommodate
721 * this add operation. Note that actual_regions_needed may be greater
722 * than in_regions_needed, as the resv_map may have been modified since
723 * the region_chg call. In this case, we need to make sure that we
724 * allocate extra entries, such that we have enough for all the
725 * existing adds_in_progress, plus the excess needed for this
726 * operation.
5e911373 727 */
0db9d74e
MA
728 if (actual_regions_needed > in_regions_needed &&
729 resv->region_cache_count <
730 resv->adds_in_progress +
731 (actual_regions_needed - in_regions_needed)) {
732 /* region_add operation of range 1 should never need to
733 * allocate file_region entries.
734 */
735 VM_BUG_ON(t - f <= 1);
5e911373 736
0db9d74e
MA
737 if (allocate_file_region_entries(
738 resv, actual_regions_needed - in_regions_needed)) {
739 return -ENOMEM;
740 }
5e911373 741
0db9d74e 742 goto retry;
5e911373
MK
743 }
744
972a3da3 745 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
0db9d74e
MA
746
747 resv->adds_in_progress -= in_regions_needed;
cf3ad20b 748
7b24d861 749 spin_unlock(&resv->lock);
cf3ad20b 750 return add;
96822904
AW
751}
752
1dd308a7
MK
753/*
754 * Examine the existing reserve map and determine how many
755 * huge pages in the specified range [f, t) are NOT currently
756 * represented. This routine is called before a subsequent
757 * call to region_add that will actually modify the reserve
758 * map to add the specified range [f, t). region_chg does
759 * not change the number of huge pages represented by the
0db9d74e
MA
760 * map. A number of new file_region structures is added to the cache as a
761 * placeholder, for the subsequent region_add call to use. At least 1
762 * file_region structure is added.
763 *
764 * out_regions_needed is the number of regions added to the
765 * resv->adds_in_progress. This value needs to be provided to a follow up call
766 * to region_add or region_abort for proper accounting.
5e911373
MK
767 *
768 * Returns the number of huge pages that need to be added to the existing
769 * reservation map for the range [f, t). This number is greater or equal to
770 * zero. -ENOMEM is returned if a new file_region structure or cache entry
771 * is needed and can not be allocated.
1dd308a7 772 */
0db9d74e
MA
773static long region_chg(struct resv_map *resv, long f, long t,
774 long *out_regions_needed)
96822904 775{
96822904
AW
776 long chg = 0;
777
7b24d861 778 spin_lock(&resv->lock);
5e911373 779
972a3da3 780 /* Count how many hugepages in this range are NOT represented. */
075a61d0 781 chg = add_reservation_in_range(resv, f, t, NULL, NULL,
972a3da3 782 out_regions_needed);
5e911373 783
0db9d74e
MA
784 if (*out_regions_needed == 0)
785 *out_regions_needed = 1;
5e911373 786
0db9d74e
MA
787 if (allocate_file_region_entries(resv, *out_regions_needed))
788 return -ENOMEM;
5e911373 789
0db9d74e 790 resv->adds_in_progress += *out_regions_needed;
7b24d861 791
7b24d861 792 spin_unlock(&resv->lock);
96822904
AW
793 return chg;
794}
795
5e911373
MK
796/*
797 * Abort the in progress add operation. The adds_in_progress field
798 * of the resv_map keeps track of the operations in progress between
799 * calls to region_chg and region_add. Operations are sometimes
800 * aborted after the call to region_chg. In such cases, region_abort
0db9d74e
MA
801 * is called to decrement the adds_in_progress counter. regions_needed
802 * is the value returned by the region_chg call, it is used to decrement
803 * the adds_in_progress counter.
5e911373
MK
804 *
805 * NOTE: The range arguments [f, t) are not needed or used in this
806 * routine. They are kept to make reading the calling code easier as
807 * arguments will match the associated region_chg call.
808 */
0db9d74e
MA
809static void region_abort(struct resv_map *resv, long f, long t,
810 long regions_needed)
5e911373
MK
811{
812 spin_lock(&resv->lock);
813 VM_BUG_ON(!resv->region_cache_count);
0db9d74e 814 resv->adds_in_progress -= regions_needed;
5e911373
MK
815 spin_unlock(&resv->lock);
816}
817
1dd308a7 818/*
feba16e2
MK
819 * Delete the specified range [f, t) from the reserve map. If the
820 * t parameter is LONG_MAX, this indicates that ALL regions after f
821 * should be deleted. Locate the regions which intersect [f, t)
822 * and either trim, delete or split the existing regions.
823 *
824 * Returns the number of huge pages deleted from the reserve map.
825 * In the normal case, the return value is zero or more. In the
826 * case where a region must be split, a new region descriptor must
827 * be allocated. If the allocation fails, -ENOMEM will be returned.
828 * NOTE: If the parameter t == LONG_MAX, then we will never split
829 * a region and possibly return -ENOMEM. Callers specifying
830 * t == LONG_MAX do not need to check for -ENOMEM error.
1dd308a7 831 */
feba16e2 832static long region_del(struct resv_map *resv, long f, long t)
96822904 833{
1406ec9b 834 struct list_head *head = &resv->regions;
96822904 835 struct file_region *rg, *trg;
feba16e2
MK
836 struct file_region *nrg = NULL;
837 long del = 0;
96822904 838
feba16e2 839retry:
7b24d861 840 spin_lock(&resv->lock);
feba16e2 841 list_for_each_entry_safe(rg, trg, head, link) {
dbe409e4
MK
842 /*
843 * Skip regions before the range to be deleted. file_region
844 * ranges are normally of the form [from, to). However, there
845 * may be a "placeholder" entry in the map which is of the form
846 * (from, to) with from == to. Check for placeholder entries
847 * at the beginning of the range to be deleted.
848 */
849 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
feba16e2 850 continue;
dbe409e4 851
feba16e2 852 if (rg->from >= t)
96822904 853 break;
96822904 854
feba16e2
MK
855 if (f > rg->from && t < rg->to) { /* Must split region */
856 /*
857 * Check for an entry in the cache before dropping
858 * lock and attempting allocation.
859 */
860 if (!nrg &&
861 resv->region_cache_count > resv->adds_in_progress) {
862 nrg = list_first_entry(&resv->region_cache,
863 struct file_region,
864 link);
865 list_del(&nrg->link);
866 resv->region_cache_count--;
867 }
96822904 868
feba16e2
MK
869 if (!nrg) {
870 spin_unlock(&resv->lock);
871 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
872 if (!nrg)
873 return -ENOMEM;
874 goto retry;
875 }
876
877 del += t - f;
79aa925b 878 hugetlb_cgroup_uncharge_file_region(
d85aecf2 879 resv, rg, t - f, false);
feba16e2
MK
880
881 /* New entry for end of split region */
882 nrg->from = t;
883 nrg->to = rg->to;
075a61d0
MA
884
885 copy_hugetlb_cgroup_uncharge_info(nrg, rg);
886
feba16e2
MK
887 INIT_LIST_HEAD(&nrg->link);
888
889 /* Original entry is trimmed */
890 rg->to = f;
891
892 list_add(&nrg->link, &rg->link);
893 nrg = NULL;
96822904 894 break;
feba16e2
MK
895 }
896
897 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
898 del += rg->to - rg->from;
075a61d0 899 hugetlb_cgroup_uncharge_file_region(resv, rg,
d85aecf2 900 rg->to - rg->from, true);
feba16e2
MK
901 list_del(&rg->link);
902 kfree(rg);
903 continue;
904 }
905
906 if (f <= rg->from) { /* Trim beginning of region */
075a61d0 907 hugetlb_cgroup_uncharge_file_region(resv, rg,
d85aecf2 908 t - rg->from, false);
075a61d0 909
79aa925b
MK
910 del += t - rg->from;
911 rg->from = t;
912 } else { /* Trim end of region */
075a61d0 913 hugetlb_cgroup_uncharge_file_region(resv, rg,
d85aecf2 914 rg->to - f, false);
79aa925b
MK
915
916 del += rg->to - f;
917 rg->to = f;
feba16e2 918 }
96822904 919 }
7b24d861 920
7b24d861 921 spin_unlock(&resv->lock);
feba16e2
MK
922 kfree(nrg);
923 return del;
96822904
AW
924}
925
b5cec28d
MK
926/*
927 * A rare out of memory error was encountered which prevented removal of
928 * the reserve map region for a page. The huge page itself was free'ed
929 * and removed from the page cache. This routine will adjust the subpool
930 * usage count, and the global reserve count if needed. By incrementing
931 * these counts, the reserve map entry which could not be deleted will
932 * appear as a "reserved" entry instead of simply dangling with incorrect
933 * counts.
934 */
72e2936c 935void hugetlb_fix_reserve_counts(struct inode *inode)
b5cec28d
MK
936{
937 struct hugepage_subpool *spool = subpool_inode(inode);
938 long rsv_adjust;
da56388c 939 bool reserved = false;
b5cec28d
MK
940
941 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
da56388c 942 if (rsv_adjust > 0) {
b5cec28d
MK
943 struct hstate *h = hstate_inode(inode);
944
da56388c
ML
945 if (!hugetlb_acct_memory(h, 1))
946 reserved = true;
947 } else if (!rsv_adjust) {
948 reserved = true;
b5cec28d 949 }
da56388c
ML
950
951 if (!reserved)
952 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
b5cec28d
MK
953}
954
1dd308a7
MK
955/*
956 * Count and return the number of huge pages in the reserve map
957 * that intersect with the range [f, t).
958 */
1406ec9b 959static long region_count(struct resv_map *resv, long f, long t)
84afd99b 960{
1406ec9b 961 struct list_head *head = &resv->regions;
84afd99b
AW
962 struct file_region *rg;
963 long chg = 0;
964
7b24d861 965 spin_lock(&resv->lock);
84afd99b
AW
966 /* Locate each segment we overlap with, and count that overlap. */
967 list_for_each_entry(rg, head, link) {
f2135a4a
WSH
968 long seg_from;
969 long seg_to;
84afd99b
AW
970
971 if (rg->to <= f)
972 continue;
973 if (rg->from >= t)
974 break;
975
976 seg_from = max(rg->from, f);
977 seg_to = min(rg->to, t);
978
979 chg += seg_to - seg_from;
980 }
7b24d861 981 spin_unlock(&resv->lock);
84afd99b
AW
982
983 return chg;
984}
985
e7c4b0bf
AW
986/*
987 * Convert the address within this vma to the page offset within
a08c7193 988 * the mapping, huge page units here.
e7c4b0bf 989 */
a5516438
AK
990static pgoff_t vma_hugecache_offset(struct hstate *h,
991 struct vm_area_struct *vma, unsigned long address)
e7c4b0bf 992{
a5516438
AK
993 return ((address - vma->vm_start) >> huge_page_shift(h)) +
994 (vma->vm_pgoff >> huge_page_order(h));
e7c4b0bf
AW
995}
996
8cfd014e
MWO
997/**
998 * vma_kernel_pagesize - Page size granularity for this VMA.
999 * @vma: The user mapping.
1000 *
1001 * Folios in this VMA will be aligned to, and at least the size of the
1002 * number of bytes returned by this function.
1003 *
1004 * Return: The default size of the folios allocated when backing a VMA.
08fba699
MG
1005 */
1006unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1007{
05ea8860
DW
1008 if (vma->vm_ops && vma->vm_ops->pagesize)
1009 return vma->vm_ops->pagesize(vma);
1010 return PAGE_SIZE;
08fba699 1011}
f340ca0f 1012EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
08fba699 1013
3340289d
MG
1014/*
1015 * Return the page size being used by the MMU to back a VMA. In the majority
1016 * of cases, the page size used by the kernel matches the MMU size. On
09135cc5
DW
1017 * architectures where it differs, an architecture-specific 'strong'
1018 * version of this symbol is required.
3340289d 1019 */
09135cc5 1020__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
3340289d
MG
1021{
1022 return vma_kernel_pagesize(vma);
1023}
3340289d 1024
84afd99b
AW
1025/*
1026 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
1027 * bits of the reservation map pointer, which are always clear due to
1028 * alignment.
1029 */
1030#define HPAGE_RESV_OWNER (1UL << 0)
1031#define HPAGE_RESV_UNMAPPED (1UL << 1)
04f2cbe3 1032#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
84afd99b 1033
a1e78772
MG
1034/*
1035 * These helpers are used to track how many pages are reserved for
1036 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1037 * is guaranteed to have their future faults succeed.
1038 *
8d9bfb26 1039 * With the exception of hugetlb_dup_vma_private() which is called at fork(),
a1e78772
MG
1040 * the reserve counters are updated with the hugetlb_lock held. It is safe
1041 * to reset the VMA at fork() time as it is not in use yet and there is no
1042 * chance of the global counters getting corrupted as a result of the values.
84afd99b
AW
1043 *
1044 * The private mapping reservation is represented in a subtly different
1045 * manner to a shared mapping. A shared mapping has a region map associated
1046 * with the underlying file, this region map represents the backing file
1047 * pages which have ever had a reservation assigned which this persists even
1048 * after the page is instantiated. A private mapping has a region map
1049 * associated with the original mmap which is attached to all VMAs which
1050 * reference it, this region map represents those offsets which have consumed
1051 * reservation ie. where pages have been instantiated.
a1e78772 1052 */
e7c4b0bf
AW
1053static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1054{
1055 return (unsigned long)vma->vm_private_data;
1056}
1057
1058static void set_vma_private_data(struct vm_area_struct *vma,
1059 unsigned long value)
1060{
1061 vma->vm_private_data = (void *)value;
1062}
1063
e9fe92ae
MA
1064static void
1065resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1066 struct hugetlb_cgroup *h_cg,
1067 struct hstate *h)
1068{
1069#ifdef CONFIG_CGROUP_HUGETLB
1070 if (!h_cg || !h) {
1071 resv_map->reservation_counter = NULL;
1072 resv_map->pages_per_hpage = 0;
1073 resv_map->css = NULL;
1074 } else {
1075 resv_map->reservation_counter =
1076 &h_cg->rsvd_hugepage[hstate_index(h)];
1077 resv_map->pages_per_hpage = pages_per_huge_page(h);
1078 resv_map->css = &h_cg->css;
1079 }
1080#endif
1081}
1082
9119a41e 1083struct resv_map *resv_map_alloc(void)
84afd99b
AW
1084{
1085 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
5e911373
MK
1086 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
1087
1088 if (!resv_map || !rg) {
1089 kfree(resv_map);
1090 kfree(rg);
84afd99b 1091 return NULL;
5e911373 1092 }
84afd99b
AW
1093
1094 kref_init(&resv_map->refs);
7b24d861 1095 spin_lock_init(&resv_map->lock);
84afd99b 1096 INIT_LIST_HEAD(&resv_map->regions);
bf491692 1097 init_rwsem(&resv_map->rw_sema);
84afd99b 1098
5e911373 1099 resv_map->adds_in_progress = 0;
e9fe92ae
MA
1100 /*
1101 * Initialize these to 0. On shared mappings, 0's here indicate these
1102 * fields don't do cgroup accounting. On private mappings, these will be
1103 * re-initialized to the proper values, to indicate that hugetlb cgroup
1104 * reservations are to be un-charged from here.
1105 */
1106 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
5e911373
MK
1107
1108 INIT_LIST_HEAD(&resv_map->region_cache);
1109 list_add(&rg->link, &resv_map->region_cache);
1110 resv_map->region_cache_count = 1;
1111
84afd99b
AW
1112 return resv_map;
1113}
1114
9119a41e 1115void resv_map_release(struct kref *ref)
84afd99b
AW
1116{
1117 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
5e911373
MK
1118 struct list_head *head = &resv_map->region_cache;
1119 struct file_region *rg, *trg;
84afd99b
AW
1120
1121 /* Clear out any active regions before we release the map. */
feba16e2 1122 region_del(resv_map, 0, LONG_MAX);
5e911373
MK
1123
1124 /* ... and any entries left in the cache */
1125 list_for_each_entry_safe(rg, trg, head, link) {
1126 list_del(&rg->link);
1127 kfree(rg);
1128 }
1129
1130 VM_BUG_ON(resv_map->adds_in_progress);
1131
84afd99b
AW
1132 kfree(resv_map);
1133}
1134
4e35f483
JK
1135static inline struct resv_map *inode_resv_map(struct inode *inode)
1136{
f27a5136
MK
1137 /*
1138 * At inode evict time, i_mapping may not point to the original
1139 * address space within the inode. This original address space
1140 * contains the pointer to the resv_map. So, always use the
1141 * address space embedded within the inode.
1142 * The VERY common case is inode->mapping == &inode->i_data but,
1143 * this may not be true for device special inodes.
1144 */
600f111e 1145 return (struct resv_map *)(&inode->i_data)->i_private_data;
4e35f483
JK
1146}
1147
84afd99b 1148static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
a1e78772 1149{
81d1b09c 1150 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
4e35f483
JK
1151 if (vma->vm_flags & VM_MAYSHARE) {
1152 struct address_space *mapping = vma->vm_file->f_mapping;
1153 struct inode *inode = mapping->host;
1154
1155 return inode_resv_map(inode);
1156
1157 } else {
84afd99b
AW
1158 return (struct resv_map *)(get_vma_private_data(vma) &
1159 ~HPAGE_RESV_MASK);
4e35f483 1160 }
a1e78772
MG
1161}
1162
84afd99b 1163static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
a1e78772 1164{
81d1b09c
SL
1165 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1166 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
a1e78772 1167
92fe9dcb 1168 set_vma_private_data(vma, (unsigned long)map);
04f2cbe3
MG
1169}
1170
1171static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1172{
81d1b09c
SL
1173 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1174 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
e7c4b0bf
AW
1175
1176 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
04f2cbe3
MG
1177}
1178
1179static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1180{
81d1b09c 1181 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
e7c4b0bf
AW
1182
1183 return (get_vma_private_data(vma) & flag) != 0;
a1e78772
MG
1184}
1185
187da0f8
MK
1186bool __vma_private_lock(struct vm_area_struct *vma)
1187{
1188 return !(vma->vm_flags & VM_MAYSHARE) &&
1189 get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1190 is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1191}
1192
8d9bfb26 1193void hugetlb_dup_vma_private(struct vm_area_struct *vma)
a1e78772 1194{
81d1b09c 1195 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
8d9bfb26
MK
1196 /*
1197 * Clear vm_private_data
612b8a31
MK
1198 * - For shared mappings this is a per-vma semaphore that may be
1199 * allocated in a subsequent call to hugetlb_vm_op_open.
1200 * Before clearing, make sure pointer is not associated with vma
1201 * as this will leak the structure. This is the case when called
1202 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1203 * been called to allocate a new structure.
8d9bfb26
MK
1204 * - For MAP_PRIVATE mappings, this is the reserve map which does
1205 * not apply to children. Faults generated by the children are
1206 * not guaranteed to succeed, even if read-only.
8d9bfb26 1207 */
612b8a31
MK
1208 if (vma->vm_flags & VM_MAYSHARE) {
1209 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1210
1211 if (vma_lock && vma_lock->vma != vma)
1212 vma->vm_private_data = NULL;
1213 } else
1214 vma->vm_private_data = NULL;
a1e78772
MG
1215}
1216
550a7d60
MA
1217/*
1218 * Reset and decrement one ref on hugepage private reservation.
8651a137 1219 * Called with mm->mmap_lock writer semaphore held.
550a7d60
MA
1220 * This function should be only used by move_vma() and operate on
1221 * same sized vma. It should never come here with last ref on the
1222 * reservation.
1223 */
1224void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1225{
1226 /*
1227 * Clear the old hugetlb private page reservation.
1228 * It has already been transferred to new_vma.
1229 *
1230 * During a mremap() operation of a hugetlb vma we call move_vma()
1231 * which copies vma into new_vma and unmaps vma. After the copy
1232 * operation both new_vma and vma share a reference to the resv_map
1233 * struct, and at that point vma is about to be unmapped. We don't
1234 * want to return the reservation to the pool at unmap of vma because
1235 * the reservation still lives on in new_vma, so simply decrement the
1236 * ref here and remove the resv_map reference from this vma.
1237 */
1238 struct resv_map *reservations = vma_resv_map(vma);
1239
afe041c2
BQM
1240 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1241 resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
550a7d60 1242 kref_put(&reservations->refs, resv_map_release);
afe041c2 1243 }
550a7d60 1244
8d9bfb26 1245 hugetlb_dup_vma_private(vma);
550a7d60
MA
1246}
1247
a1e78772 1248/* Returns true if the VMA has associated reserve pages */
559ec2f8 1249static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
a1e78772 1250{
af0ed73e
JK
1251 if (vma->vm_flags & VM_NORESERVE) {
1252 /*
1253 * This address is already reserved by other process(chg == 0),
1254 * so, we should decrement reserved count. Without decrementing,
1255 * reserve count remains after releasing inode, because this
1256 * allocated page will go into page cache and is regarded as
1257 * coming from reserved pool in releasing step. Currently, we
1258 * don't have any other solution to deal with this situation
1259 * properly, so add work-around here.
1260 */
1261 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
559ec2f8 1262 return true;
af0ed73e 1263 else
559ec2f8 1264 return false;
af0ed73e 1265 }
a63884e9
JK
1266
1267 /* Shared mappings always use reserves */
1fb1b0e9
MK
1268 if (vma->vm_flags & VM_MAYSHARE) {
1269 /*
1270 * We know VM_NORESERVE is not set. Therefore, there SHOULD
1271 * be a region map for all pages. The only situation where
1272 * there is no region map is if a hole was punched via
7c8de358 1273 * fallocate. In this case, there really are no reserves to
1fb1b0e9
MK
1274 * use. This situation is indicated if chg != 0.
1275 */
1276 if (chg)
1277 return false;
1278 else
1279 return true;
1280 }
a63884e9
JK
1281
1282 /*
1283 * Only the process that called mmap() has reserves for
1284 * private mappings.
1285 */
67961f9d
MK
1286 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1287 /*
1288 * Like the shared case above, a hole punch or truncate
1289 * could have been performed on the private mapping.
1290 * Examine the value of chg to determine if reserves
1291 * actually exist or were previously consumed.
1292 * Very Subtle - The value of chg comes from a previous
1293 * call to vma_needs_reserves(). The reserve map for
1294 * private mappings has different (opposite) semantics
1295 * than that of shared mappings. vma_needs_reserves()
1296 * has already taken this difference in semantics into
1297 * account. Therefore, the meaning of chg is the same
1298 * as in the shared case above. Code could easily be
1299 * combined, but keeping it separate draws attention to
1300 * subtle differences.
1301 */
1302 if (chg)
1303 return false;
1304 else
1305 return true;
1306 }
a63884e9 1307
559ec2f8 1308 return false;
a1e78772
MG
1309}
1310
240d67a8 1311static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1da177e4 1312{
240d67a8 1313 int nid = folio_nid(folio);
9487ca60
MK
1314
1315 lockdep_assert_held(&hugetlb_lock);
240d67a8 1316 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
b65a4eda 1317
240d67a8 1318 list_move(&folio->lru, &h->hugepage_freelists[nid]);
a5516438
AK
1319 h->free_huge_pages++;
1320 h->free_huge_pages_node[nid]++;
240d67a8 1321 folio_set_hugetlb_freed(folio);
1da177e4
LT
1322}
1323
a36f1e90
SK
1324static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1325 int nid)
bf50bab2 1326{
a36f1e90 1327 struct folio *folio;
1a08ae36 1328 bool pin = !!(current->flags & PF_MEMALLOC_PIN);
bbe88753 1329
9487ca60 1330 lockdep_assert_held(&hugetlb_lock);
a36f1e90
SK
1331 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1332 if (pin && !folio_is_longterm_pinnable(folio))
bbe88753 1333 continue;
bf50bab2 1334
a36f1e90 1335 if (folio_test_hwpoison(folio))
6664bfc8
WY
1336 continue;
1337
a36f1e90
SK
1338 list_move(&folio->lru, &h->hugepage_activelist);
1339 folio_ref_unfreeze(folio, 1);
1340 folio_clear_hugetlb_freed(folio);
6664bfc8
WY
1341 h->free_huge_pages--;
1342 h->free_huge_pages_node[nid]--;
a36f1e90 1343 return folio;
bbe88753
JK
1344 }
1345
6664bfc8 1346 return NULL;
bf50bab2
NH
1347}
1348
a36f1e90
SK
1349static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1350 int nid, nodemask_t *nmask)
94310cbc 1351{
3e59fcb0
MH
1352 unsigned int cpuset_mems_cookie;
1353 struct zonelist *zonelist;
1354 struct zone *zone;
1355 struct zoneref *z;
98fa15f3 1356 int node = NUMA_NO_NODE;
94310cbc 1357
3e59fcb0
MH
1358 zonelist = node_zonelist(nid, gfp_mask);
1359
1360retry_cpuset:
1361 cpuset_mems_cookie = read_mems_allowed_begin();
1362 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
a36f1e90 1363 struct folio *folio;
3e59fcb0
MH
1364
1365 if (!cpuset_zone_allowed(zone, gfp_mask))
1366 continue;
1367 /*
1368 * no need to ask again on the same node. Pool is node rather than
1369 * zone aware
1370 */
1371 if (zone_to_nid(zone) == node)
1372 continue;
1373 node = zone_to_nid(zone);
94310cbc 1374
a36f1e90
SK
1375 folio = dequeue_hugetlb_folio_node_exact(h, node);
1376 if (folio)
1377 return folio;
94310cbc 1378 }
3e59fcb0
MH
1379 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1380 goto retry_cpuset;
1381
94310cbc
AK
1382 return NULL;
1383}
1384
8346d69d
XH
1385static unsigned long available_huge_pages(struct hstate *h)
1386{
1387 return h->free_huge_pages - h->resv_huge_pages;
1388}
1389
ff7d853b 1390static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
a5516438 1391 struct vm_area_struct *vma,
af0ed73e
JK
1392 unsigned long address, int avoid_reserve,
1393 long chg)
1da177e4 1394{
a36f1e90 1395 struct folio *folio = NULL;
480eccf9 1396 struct mempolicy *mpol;
04ec6264 1397 gfp_t gfp_mask;
3e59fcb0 1398 nodemask_t *nodemask;
04ec6264 1399 int nid;
1da177e4 1400
a1e78772
MG
1401 /*
1402 * A child process with MAP_PRIVATE mappings created by their parent
1403 * have no page reserves. This check ensures that reservations are
1404 * not "stolen". The child may still get SIGKILLed
1405 */
8346d69d 1406 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
c0ff7453 1407 goto err;
a1e78772 1408
04f2cbe3 1409 /* If reserves cannot be used, ensure enough pages are in the pool */
8346d69d 1410 if (avoid_reserve && !available_huge_pages(h))
6eab04a8 1411 goto err;
04f2cbe3 1412
04ec6264
VB
1413 gfp_mask = htlb_alloc_mask(h);
1414 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
cfcaa66f
BW
1415
1416 if (mpol_is_preferred_many(mpol)) {
a36f1e90
SK
1417 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1418 nid, nodemask);
cfcaa66f
BW
1419
1420 /* Fallback to all nodes if page==NULL */
1421 nodemask = NULL;
1422 }
1423
a36f1e90
SK
1424 if (!folio)
1425 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1426 nid, nodemask);
cfcaa66f 1427
a36f1e90
SK
1428 if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
1429 folio_set_hugetlb_restore_reserve(folio);
3e59fcb0 1430 h->resv_huge_pages--;
1da177e4 1431 }
cc9a6c87 1432
52cd3b07 1433 mpol_cond_put(mpol);
ff7d853b 1434 return folio;
cc9a6c87
MG
1435
1436err:
cc9a6c87 1437 return NULL;
1da177e4
LT
1438}
1439
1cac6f2c
LC
1440/*
1441 * common helper functions for hstate_next_node_to_{alloc|free}.
1442 * We may have allocated or freed a huge page based on a different
1443 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1444 * be outside of *nodes_allowed. Ensure that we use an allowed
1445 * node for alloc or free.
1446 */
1447static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1448{
0edaf86c 1449 nid = next_node_in(nid, *nodes_allowed);
1cac6f2c
LC
1450 VM_BUG_ON(nid >= MAX_NUMNODES);
1451
1452 return nid;
1453}
1454
1455static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1456{
1457 if (!node_isset(nid, *nodes_allowed))
1458 nid = next_node_allowed(nid, nodes_allowed);
1459 return nid;
1460}
1461
1462/*
1463 * returns the previously saved node ["this node"] from which to
1464 * allocate a persistent huge page for the pool and advance the
1465 * next node from which to allocate, handling wrap at end of node
1466 * mask.
1467 */
2e73ff23 1468static int hstate_next_node_to_alloc(int *next_node,
1cac6f2c
LC
1469 nodemask_t *nodes_allowed)
1470{
1471 int nid;
1472
1473 VM_BUG_ON(!nodes_allowed);
1474
2e73ff23
GL
1475 nid = get_valid_node_allowed(*next_node, nodes_allowed);
1476 *next_node = next_node_allowed(nid, nodes_allowed);
1cac6f2c
LC
1477
1478 return nid;
1479}
1480
1481/*
d5b43e96 1482 * helper for remove_pool_hugetlb_folio() - return the previously saved
1cac6f2c
LC
1483 * node ["this node"] from which to free a huge page. Advance the
1484 * next node id whether or not we find a free huge page to free so
1485 * that the next attempt to free addresses the next node.
1486 */
1487static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1488{
1489 int nid;
1490
1491 VM_BUG_ON(!nodes_allowed);
1492
1493 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1494 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1495
1496 return nid;
1497}
1498
2e73ff23 1499#define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \
1cac6f2c
LC
1500 for (nr_nodes = nodes_weight(*mask); \
1501 nr_nodes > 0 && \
2e73ff23 1502 ((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \
1cac6f2c
LC
1503 nr_nodes--)
1504
1505#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1506 for (nr_nodes = nodes_weight(*mask); \
1507 nr_nodes > 0 && \
1508 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1509 nr_nodes--)
1510
8531fc6f 1511/* used to demote non-gigantic_huge pages as well */
911565b8 1512static void __destroy_compound_gigantic_folio(struct folio *folio,
34d9e35b 1513 unsigned int order, bool demote)
944d9fec
LC
1514{
1515 int i;
1516 int nr_pages = 1 << order;
14455eab 1517 struct page *p;
944d9fec 1518
46f27228 1519 atomic_set(&folio->_entire_mapcount, 0);
05c5323b 1520 atomic_set(&folio->_large_mapcount, 0);
94688e8e 1521 atomic_set(&folio->_pincount, 0);
47e29d32 1522
14455eab 1523 for (i = 1; i < nr_pages; i++) {
911565b8 1524 p = folio_page(folio, i);
6c141973 1525 p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
a01f4390 1526 p->mapping = NULL;
1d798ca3 1527 clear_compound_head(p);
34d9e35b
MK
1528 if (!demote)
1529 set_page_refcounted(p);
944d9fec
LC
1530 }
1531
911565b8 1532 __folio_clear_head(folio);
944d9fec
LC
1533}
1534
911565b8 1535static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
8531fc6f
MK
1536 unsigned int order)
1537{
911565b8 1538 __destroy_compound_gigantic_folio(folio, order, true);
8531fc6f
MK
1539}
1540
1541#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
911565b8 1542static void destroy_compound_gigantic_folio(struct folio *folio,
34d9e35b
MK
1543 unsigned int order)
1544{
911565b8 1545 __destroy_compound_gigantic_folio(folio, order, false);
34d9e35b
MK
1546}
1547
7f325a8d 1548static void free_gigantic_folio(struct folio *folio, unsigned int order)
944d9fec 1549{
cf11e85f
RG
1550 /*
1551 * If the page isn't allocated using the cma allocator,
1552 * cma_release() returns false.
1553 */
dbda8fea 1554#ifdef CONFIG_CMA
7f325a8d
SK
1555 int nid = folio_nid(folio);
1556
1557 if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
cf11e85f 1558 return;
dbda8fea 1559#endif
cf11e85f 1560
7f325a8d 1561 free_contig_range(folio_pfn(folio), 1 << order);
944d9fec
LC
1562}
1563
4eb0716e 1564#ifdef CONFIG_CONTIG_ALLOC
19fc1a7e 1565static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
d9cc948f 1566 int nid, nodemask_t *nodemask)
944d9fec 1567{
19fc1a7e 1568 struct page *page;
04adbc3f 1569 unsigned long nr_pages = pages_per_huge_page(h);
953f064a
LX
1570 if (nid == NUMA_NO_NODE)
1571 nid = numa_mem_id();
944d9fec 1572
dbda8fea
BS
1573#ifdef CONFIG_CMA
1574 {
cf11e85f
RG
1575 int node;
1576
953f064a
LX
1577 if (hugetlb_cma[nid]) {
1578 page = cma_alloc(hugetlb_cma[nid], nr_pages,
1579 huge_page_order(h), true);
cf11e85f 1580 if (page)
19fc1a7e 1581 return page_folio(page);
cf11e85f 1582 }
953f064a
LX
1583
1584 if (!(gfp_mask & __GFP_THISNODE)) {
1585 for_each_node_mask(node, *nodemask) {
1586 if (node == nid || !hugetlb_cma[node])
1587 continue;
1588
1589 page = cma_alloc(hugetlb_cma[node], nr_pages,
1590 huge_page_order(h), true);
1591 if (page)
19fc1a7e 1592 return page_folio(page);
953f064a
LX
1593 }
1594 }
cf11e85f 1595 }
dbda8fea 1596#endif
cf11e85f 1597
19fc1a7e
SK
1598 page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1599 return page ? page_folio(page) : NULL;
944d9fec
LC
1600}
1601
4eb0716e 1602#else /* !CONFIG_CONTIG_ALLOC */
19fc1a7e 1603static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
4eb0716e
AG
1604 int nid, nodemask_t *nodemask)
1605{
1606 return NULL;
1607}
1608#endif /* CONFIG_CONTIG_ALLOC */
944d9fec 1609
e1073d1e 1610#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
19fc1a7e 1611static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
4eb0716e
AG
1612 int nid, nodemask_t *nodemask)
1613{
1614 return NULL;
1615}
7f325a8d
SK
1616static inline void free_gigantic_folio(struct folio *folio,
1617 unsigned int order) { }
911565b8 1618static inline void destroy_compound_gigantic_folio(struct folio *folio,
d00181b9 1619 unsigned int order) { }
944d9fec
LC
1620#endif
1621
6eb4e88a 1622/*
32c87719 1623 * Remove hugetlb folio from lists.
42a346b4
MWO
1624 * If vmemmap exists for the folio, clear the hugetlb flag so that the
1625 * folio appears as just a compound page. Otherwise, wait until after
1626 * allocating vmemmap to clear the flag.
34d9e35b 1627 *
cfd5082b 1628 * A reference is held on the folio, except in the case of demote.
6eb4e88a
MK
1629 *
1630 * Must be called with hugetlb lock held.
1631 */
cfd5082b 1632static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
34d9e35b
MK
1633 bool adjust_surplus,
1634 bool demote)
6eb4e88a 1635{
cfd5082b 1636 int nid = folio_nid(folio);
6eb4e88a 1637
f074732d
SK
1638 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1639 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
6eb4e88a 1640
9487ca60 1641 lockdep_assert_held(&hugetlb_lock);
6eb4e88a
MK
1642 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1643 return;
1644
cfd5082b 1645 list_del(&folio->lru);
6eb4e88a 1646
cfd5082b 1647 if (folio_test_hugetlb_freed(folio)) {
6eb4e88a
MK
1648 h->free_huge_pages--;
1649 h->free_huge_pages_node[nid]--;
1650 }
1651 if (adjust_surplus) {
1652 h->surplus_huge_pages--;
1653 h->surplus_huge_pages_node[nid]--;
1654 }
1655
e32d20c0 1656 /*
42a346b4 1657 * We can only clear the hugetlb flag after allocating vmemmap
32c87719
MK
1658 * pages. Otherwise, someone (memory error handling) may try to write
1659 * to tail struct pages.
1660 */
1661 if (!folio_test_hugetlb_vmemmap_optimized(folio))
42a346b4 1662 __folio_clear_hugetlb(folio);
32c87719
MK
1663
1664 /*
1665 * In the case of demote we do not ref count the page as it will soon
1666 * be turned into a page of smaller size.
e32d20c0 1667 */
34d9e35b 1668 if (!demote)
cfd5082b 1669 folio_ref_unfreeze(folio, 1);
6eb4e88a
MK
1670
1671 h->nr_huge_pages--;
1672 h->nr_huge_pages_node[nid]--;
1673}
1674
cfd5082b 1675static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
34d9e35b
MK
1676 bool adjust_surplus)
1677{
cfd5082b 1678 __remove_hugetlb_folio(h, folio, adjust_surplus, false);
34d9e35b
MK
1679}
1680
cfd5082b 1681static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
8531fc6f
MK
1682 bool adjust_surplus)
1683{
cfd5082b 1684 __remove_hugetlb_folio(h, folio, adjust_surplus, true);
8531fc6f
MK
1685}
1686
2f6c57d6 1687static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
ad2fa371
MS
1688 bool adjust_surplus)
1689{
1690 int zeroed;
2f6c57d6 1691 int nid = folio_nid(folio);
ad2fa371 1692
2f6c57d6 1693 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
ad2fa371
MS
1694
1695 lockdep_assert_held(&hugetlb_lock);
1696
2f6c57d6 1697 INIT_LIST_HEAD(&folio->lru);
ad2fa371
MS
1698 h->nr_huge_pages++;
1699 h->nr_huge_pages_node[nid]++;
1700
1701 if (adjust_surplus) {
1702 h->surplus_huge_pages++;
1703 h->surplus_huge_pages_node[nid]++;
1704 }
1705
d99e3140 1706 __folio_set_hugetlb(folio);
2f6c57d6 1707 folio_change_private(folio, NULL);
a9e1eab2 1708 /*
2f6c57d6
SK
1709 * We have to set hugetlb_vmemmap_optimized again as above
1710 * folio_change_private(folio, NULL) cleared it.
a9e1eab2 1711 */
2f6c57d6 1712 folio_set_hugetlb_vmemmap_optimized(folio);
ad2fa371
MS
1713
1714 /*
2f6c57d6 1715 * This folio is about to be managed by the hugetlb allocator and
b65a4eda
MK
1716 * should have no users. Drop our reference, and check for others
1717 * just in case.
ad2fa371 1718 */
2f6c57d6
SK
1719 zeroed = folio_put_testzero(folio);
1720 if (unlikely(!zeroed))
b65a4eda 1721 /*
454a00c4
MWO
1722 * It is VERY unlikely soneone else has taken a ref
1723 * on the folio. In this case, we simply return as
1724 * free_huge_folio() will be called when this other ref
1725 * is dropped.
b65a4eda
MK
1726 */
1727 return;
1728
51718e25 1729 arch_clear_hugetlb_flags(folio);
240d67a8 1730 enqueue_hugetlb_folio(h, folio);
ad2fa371
MS
1731}
1732
6f6956cf
SK
1733static void __update_and_free_hugetlb_folio(struct hstate *h,
1734 struct folio *folio)
6af2acb6 1735{
42a346b4 1736 bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio);
a5516438 1737
4eb0716e 1738 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
944d9fec 1739 return;
18229df5 1740
161df60e
NH
1741 /*
1742 * If we don't know which subpages are hwpoisoned, we can't free
1743 * the hugepage, so it's leaked intentionally.
1744 */
7f325a8d 1745 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
161df60e
NH
1746 return;
1747
d8f5f7e4 1748 /*
42a346b4 1749 * If folio is not vmemmap optimized (!clear_flag), then the folio
c5ad3233 1750 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio
d8f5f7e4
MK
1751 * can only be passed hugetlb pages and will BUG otherwise.
1752 */
42a346b4 1753 if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) {
ad2fa371
MS
1754 spin_lock_irq(&hugetlb_lock);
1755 /*
1756 * If we cannot allocate vmemmap pages, just refuse to free the
1757 * page and put the page back on the hugetlb free list and treat
1758 * as a surplus page.
1759 */
7f325a8d 1760 add_hugetlb_folio(h, folio, true);
ad2fa371
MS
1761 spin_unlock_irq(&hugetlb_lock);
1762 return;
1763 }
1764
161df60e
NH
1765 /*
1766 * Move PageHWPoison flag from head page to the raw error pages,
1767 * which makes any healthy subpages reusable.
1768 */
911565b8 1769 if (unlikely(folio_test_hwpoison(folio)))
2ff6cece 1770 folio_clear_hugetlb_hwpoison(folio);
161df60e 1771
32c87719
MK
1772 /*
1773 * If vmemmap pages were allocated above, then we need to clear the
42a346b4 1774 * hugetlb flag under the hugetlb lock.
32c87719 1775 */
52ccdde1 1776 if (folio_test_hugetlb(folio)) {
32c87719 1777 spin_lock_irq(&hugetlb_lock);
42a346b4 1778 __folio_clear_hugetlb(folio);
32c87719
MK
1779 spin_unlock_irq(&hugetlb_lock);
1780 }
1781
a01f4390
MK
1782 /*
1783 * Non-gigantic pages demoted from CMA allocated gigantic pages
7f325a8d 1784 * need to be given back to CMA in free_gigantic_folio.
a01f4390
MK
1785 */
1786 if (hstate_is_gigantic(h) ||
2f6c57d6 1787 hugetlb_cma_folio(folio, huge_page_order(h))) {
911565b8 1788 destroy_compound_gigantic_folio(folio, huge_page_order(h));
7f325a8d 1789 free_gigantic_folio(folio, huge_page_order(h));
944d9fec 1790 } else {
b7b098cf
MWO
1791 INIT_LIST_HEAD(&folio->_deferred_list);
1792 folio_put(folio);
944d9fec 1793 }
6af2acb6
AL
1794}
1795
b65d4adb 1796/*
d6ef19e2 1797 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
b65d4adb
MS
1798 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1799 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1800 * the vmemmap pages.
1801 *
1802 * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1803 * freed and frees them one-by-one. As the page->mapping pointer is going
1804 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1805 * structure of a lockless linked list of huge pages to be freed.
1806 */
1807static LLIST_HEAD(hpage_freelist);
1808
1809static void free_hpage_workfn(struct work_struct *work)
1810{
1811 struct llist_node *node;
1812
1813 node = llist_del_all(&hpage_freelist);
1814
1815 while (node) {
3ec145f9 1816 struct folio *folio;
b65d4adb
MS
1817 struct hstate *h;
1818
3ec145f9
MWO
1819 folio = container_of((struct address_space **)node,
1820 struct folio, mapping);
b65d4adb 1821 node = node->next;
3ec145f9 1822 folio->mapping = NULL;
b65d4adb 1823 /*
affd26b1
SK
1824 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1825 * folio_hstate() is going to trigger because a previous call to
9c5ccf2d
MWO
1826 * remove_hugetlb_folio() will clear the hugetlb bit, so do
1827 * not use folio_hstate() directly.
b65d4adb 1828 */
3ec145f9 1829 h = size_to_hstate(folio_size(folio));
b65d4adb 1830
3ec145f9 1831 __update_and_free_hugetlb_folio(h, folio);
b65d4adb
MS
1832
1833 cond_resched();
1834 }
1835}
1836static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1837
1838static inline void flush_free_hpage_work(struct hstate *h)
1839{
6213834c 1840 if (hugetlb_vmemmap_optimizable(h))
b65d4adb
MS
1841 flush_work(&free_hpage_work);
1842}
1843
d6ef19e2 1844static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
b65d4adb
MS
1845 bool atomic)
1846{
d6ef19e2 1847 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
6f6956cf 1848 __update_and_free_hugetlb_folio(h, folio);
b65d4adb
MS
1849 return;
1850 }
1851
1852 /*
1853 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1854 *
1855 * Only call schedule_work() if hpage_freelist is previously
1856 * empty. Otherwise, schedule_work() had been called but the workfn
1857 * hasn't retrieved the list yet.
1858 */
d6ef19e2 1859 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
b65d4adb
MS
1860 schedule_work(&free_hpage_work);
1861}
1862
cfb8c750
MK
1863static void bulk_vmemmap_restore_error(struct hstate *h,
1864 struct list_head *folio_list,
1865 struct list_head *non_hvo_folios)
10c6ec49 1866{
04bbfd84 1867 struct folio *folio, *t_folio;
10c6ec49 1868
cfb8c750
MK
1869 if (!list_empty(non_hvo_folios)) {
1870 /*
1871 * Free any restored hugetlb pages so that restore of the
1872 * entire list can be retried.
1873 * The idea is that in the common case of ENOMEM errors freeing
1874 * hugetlb pages with vmemmap we will free up memory so that we
1875 * can allocate vmemmap for more hugetlb pages.
1876 */
1877 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1878 list_del(&folio->lru);
1879 spin_lock_irq(&hugetlb_lock);
42a346b4 1880 __folio_clear_hugetlb(folio);
cfb8c750
MK
1881 spin_unlock_irq(&hugetlb_lock);
1882 update_and_free_hugetlb_folio(h, folio, false);
1883 cond_resched();
1884 }
1885 } else {
1886 /*
1887 * In the case where there are no folios which can be
1888 * immediately freed, we loop through the list trying to restore
1889 * vmemmap individually in the hope that someone elsewhere may
1890 * have done something to cause success (such as freeing some
1891 * memory). If unable to restore a hugetlb page, the hugetlb
1892 * page is made a surplus page and removed from the list.
1893 * If are able to restore vmemmap and free one hugetlb page, we
1894 * quit processing the list to retry the bulk operation.
1895 */
1896 list_for_each_entry_safe(folio, t_folio, folio_list, lru)
c5ad3233 1897 if (hugetlb_vmemmap_restore_folio(h, folio)) {
cfb8c750 1898 list_del(&folio->lru);
d2cf88c2
MK
1899 spin_lock_irq(&hugetlb_lock);
1900 add_hugetlb_folio(h, folio, true);
1901 spin_unlock_irq(&hugetlb_lock);
cfb8c750
MK
1902 } else {
1903 list_del(&folio->lru);
1904 spin_lock_irq(&hugetlb_lock);
42a346b4 1905 __folio_clear_hugetlb(folio);
cfb8c750
MK
1906 spin_unlock_irq(&hugetlb_lock);
1907 update_and_free_hugetlb_folio(h, folio, false);
1908 cond_resched();
1909 break;
1910 }
d2cf88c2 1911 }
cfb8c750
MK
1912}
1913
1914static void update_and_free_pages_bulk(struct hstate *h,
1915 struct list_head *folio_list)
1916{
1917 long ret;
1918 struct folio *folio, *t_folio;
1919 LIST_HEAD(non_hvo_folios);
d2cf88c2
MK
1920
1921 /*
cfb8c750
MK
1922 * First allocate required vmemmmap (if necessary) for all folios.
1923 * Carefully handle errors and free up any available hugetlb pages
1924 * in an effort to make forward progress.
d2cf88c2 1925 */
cfb8c750
MK
1926retry:
1927 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1928 if (ret < 0) {
1929 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1930 goto retry;
1931 }
1932
1933 /*
1934 * At this point, list should be empty, ret should be >= 0 and there
1935 * should only be pages on the non_hvo_folios list.
1936 * Do note that the non_hvo_folios list could be empty.
1937 * Without HVO enabled, ret will be 0 and there is no need to call
42a346b4 1938 * __folio_clear_hugetlb as this was done previously.
cfb8c750
MK
1939 */
1940 VM_WARN_ON(!list_empty(folio_list));
1941 VM_WARN_ON(ret < 0);
1942 if (!list_empty(&non_hvo_folios) && ret) {
d2cf88c2 1943 spin_lock_irq(&hugetlb_lock);
cfb8c750 1944 list_for_each_entry(folio, &non_hvo_folios, lru)
42a346b4 1945 __folio_clear_hugetlb(folio);
d2cf88c2
MK
1946 spin_unlock_irq(&hugetlb_lock);
1947 }
1948
cfb8c750 1949 list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
d6ef19e2 1950 update_and_free_hugetlb_folio(h, folio, false);
10c6ec49
MK
1951 cond_resched();
1952 }
1953}
1954
e5ff2159
AK
1955struct hstate *size_to_hstate(unsigned long size)
1956{
1957 struct hstate *h;
1958
1959 for_each_hstate(h) {
1960 if (huge_page_size(h) == size)
1961 return h;
1962 }
1963 return NULL;
1964}
1965
454a00c4 1966void free_huge_folio(struct folio *folio)
27a85ef1 1967{
a5516438
AK
1968 /*
1969 * Can't pass hstate in here because it is called from the
42a346b4 1970 * generic mm code.
a5516438 1971 */
0356c4b9
SK
1972 struct hstate *h = folio_hstate(folio);
1973 int nid = folio_nid(folio);
1974 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
07443a85 1975 bool restore_reserve;
db71ef79 1976 unsigned long flags;
27a85ef1 1977
0356c4b9
SK
1978 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1979 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
8ace22bc 1980
0356c4b9
SK
1981 hugetlb_set_folio_subpool(folio, NULL);
1982 if (folio_test_anon(folio))
1983 __ClearPageAnonExclusive(&folio->page);
1984 folio->mapping = NULL;
1985 restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1986 folio_clear_hugetlb_restore_reserve(folio);
27a85ef1 1987
1c5ecae3 1988 /*
d6995da3 1989 * If HPageRestoreReserve was set on page, page allocation consumed a
0919e1b6
MK
1990 * reservation. If the page was associated with a subpool, there
1991 * would have been a page reserved in the subpool before allocation
1992 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
6c26d310 1993 * reservation, do not call hugepage_subpool_put_pages() as this will
0919e1b6 1994 * remove the reserved page from the subpool.
1c5ecae3 1995 */
0919e1b6
MK
1996 if (!restore_reserve) {
1997 /*
1998 * A return code of zero implies that the subpool will be
1999 * under its minimum size if the reservation is not restored
2000 * after page is free. Therefore, force restore_reserve
2001 * operation.
2002 */
2003 if (hugepage_subpool_put_pages(spool, 1) == 0)
2004 restore_reserve = true;
2005 }
1c5ecae3 2006
db71ef79 2007 spin_lock_irqsave(&hugetlb_lock, flags);
0356c4b9 2008 folio_clear_hugetlb_migratable(folio);
d4ab0316
SK
2009 hugetlb_cgroup_uncharge_folio(hstate_index(h),
2010 pages_per_huge_page(h), folio);
2011 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
2012 pages_per_huge_page(h), folio);
8cba9576 2013 mem_cgroup_uncharge(folio);
07443a85
JK
2014 if (restore_reserve)
2015 h->resv_huge_pages++;
2016
0356c4b9 2017 if (folio_test_hugetlb_temporary(folio)) {
cfd5082b 2018 remove_hugetlb_folio(h, folio, false);
db71ef79 2019 spin_unlock_irqrestore(&hugetlb_lock, flags);
d6ef19e2 2020 update_and_free_hugetlb_folio(h, folio, true);
ab5ac90a 2021 } else if (h->surplus_huge_pages_node[nid]) {
0edaecfa 2022 /* remove the page from active list */
cfd5082b 2023 remove_hugetlb_folio(h, folio, true);
db71ef79 2024 spin_unlock_irqrestore(&hugetlb_lock, flags);
d6ef19e2 2025 update_and_free_hugetlb_folio(h, folio, true);
7893d1d5 2026 } else {
51718e25 2027 arch_clear_hugetlb_flags(folio);
240d67a8 2028 enqueue_hugetlb_folio(h, folio);
db71ef79 2029 spin_unlock_irqrestore(&hugetlb_lock, flags);
c77c0a8a 2030 }
c77c0a8a
WL
2031}
2032
d3d99fcc
OS
2033/*
2034 * Must be called with the hugetlb lock held
2035 */
2036static void __prep_account_new_huge_page(struct hstate *h, int nid)
2037{
2038 lockdep_assert_held(&hugetlb_lock);
2039 h->nr_huge_pages++;
2040 h->nr_huge_pages_node[nid]++;
2041}
2042
d67e32f2 2043static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
b7ba30c6 2044{
d99e3140 2045 __folio_set_hugetlb(folio);
de656ed3 2046 INIT_LIST_HEAD(&folio->lru);
de656ed3
SK
2047 hugetlb_set_folio_subpool(folio, NULL);
2048 set_hugetlb_cgroup(folio, NULL);
2049 set_hugetlb_cgroup_rsvd(folio, NULL);
d3d99fcc
OS
2050}
2051
d67e32f2
MK
2052static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
2053{
2054 init_new_hugetlb_folio(h, folio);
c5ad3233 2055 hugetlb_vmemmap_optimize_folio(h, folio);
d67e32f2
MK
2056}
2057
d1c60955 2058static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
d3d99fcc 2059{
de656ed3 2060 __prep_new_hugetlb_folio(h, folio);
db71ef79 2061 spin_lock_irq(&hugetlb_lock);
d3d99fcc 2062 __prep_account_new_huge_page(h, nid);
db71ef79 2063 spin_unlock_irq(&hugetlb_lock);
b7ba30c6
AK
2064}
2065
d1c60955
SK
2066static bool __prep_compound_gigantic_folio(struct folio *folio,
2067 unsigned int order, bool demote)
20a0307c 2068{
7118fc29 2069 int i, j;
20a0307c 2070 int nr_pages = 1 << order;
14455eab 2071 struct page *p;
20a0307c 2072
d1c60955 2073 __folio_clear_reserved(folio);
2b21624f 2074 for (i = 0; i < nr_pages; i++) {
d1c60955 2075 p = folio_page(folio, i);
14455eab 2076
ef5a22be
AA
2077 /*
2078 * For gigantic hugepages allocated through bootmem at
2079 * boot, it's safer to be consistent with the not-gigantic
2080 * hugepages and clear the PG_reserved bit from all tail pages
7c8de358 2081 * too. Otherwise drivers using get_user_pages() to access tail
ef5a22be
AA
2082 * pages may get the reference counting wrong if they see
2083 * PG_reserved set on a tail page (despite the head page not
2084 * having PG_reserved set). Enforcing this consistency between
2085 * head and tail pages allows drivers to optimize away a check
2086 * on the head page when they need know if put_page() is needed
2087 * after get_user_pages().
2088 */
7fb0728a
MK
2089 if (i != 0) /* head page cleared above */
2090 __ClearPageReserved(p);
7118fc29
MK
2091 /*
2092 * Subtle and very unlikely
2093 *
2094 * Gigantic 'page allocators' such as memblock or cma will
2095 * return a set of pages with each page ref counted. We need
2096 * to turn this set of pages into a compound page with tail
2097 * page ref counts set to zero. Code such as speculative page
2098 * cache adding could take a ref on a 'to be' tail page.
2099 * We need to respect any increased ref count, and only set
2100 * the ref count to zero if count is currently 1. If count
416d85ed
MK
2101 * is not 1, we return an error. An error return indicates
2102 * the set of pages can not be converted to a gigantic page.
2103 * The caller who allocated the pages should then discard the
2104 * pages using the appropriate free interface.
34d9e35b
MK
2105 *
2106 * In the case of demote, the ref count will be zero.
7118fc29 2107 */
34d9e35b
MK
2108 if (!demote) {
2109 if (!page_ref_freeze(p, 1)) {
2110 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
2111 goto out_error;
2112 }
2113 } else {
2114 VM_BUG_ON_PAGE(page_count(p), p);
7118fc29 2115 }
2b21624f 2116 if (i != 0)
d1c60955 2117 set_compound_head(p, &folio->page);
20a0307c 2118 }
e3b7bf97 2119 __folio_set_head(folio);
42a346b4 2120 /* we rely on prep_new_hugetlb_folio to set the hugetlb flag */
e3b7bf97 2121 folio_set_order(folio, order);
46f27228 2122 atomic_set(&folio->_entire_mapcount, -1);
05c5323b 2123 atomic_set(&folio->_large_mapcount, -1);
94688e8e 2124 atomic_set(&folio->_pincount, 0);
7118fc29
MK
2125 return true;
2126
2127out_error:
2b21624f
MK
2128 /* undo page modifications made above */
2129 for (j = 0; j < i; j++) {
d1c60955 2130 p = folio_page(folio, j);
2b21624f
MK
2131 if (j != 0)
2132 clear_compound_head(p);
7118fc29
MK
2133 set_page_refcounted(p);
2134 }
2135 /* need to clear PG_reserved on remaining tail pages */
14455eab 2136 for (; j < nr_pages; j++) {
d1c60955 2137 p = folio_page(folio, j);
7118fc29 2138 __ClearPageReserved(p);
14455eab 2139 }
7118fc29 2140 return false;
20a0307c
WF
2141}
2142
d1c60955
SK
2143static bool prep_compound_gigantic_folio(struct folio *folio,
2144 unsigned int order)
34d9e35b 2145{
d1c60955 2146 return __prep_compound_gigantic_folio(folio, order, false);
34d9e35b
MK
2147}
2148
d1c60955 2149static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
8531fc6f
MK
2150 unsigned int order)
2151{
d1c60955 2152 return __prep_compound_gigantic_folio(folio, order, true);
8531fc6f
MK
2153}
2154
c0d0381a
MK
2155/*
2156 * Find and lock address space (mapping) in write mode.
2157 *
336bf30e
MK
2158 * Upon entry, the page is locked which means that page_mapping() is
2159 * stable. Due to locking order, we can only trylock_write. If we can
2160 * not get the lock, simply return NULL to caller.
c0d0381a
MK
2161 */
2162struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
2163{
336bf30e 2164 struct address_space *mapping = page_mapping(hpage);
c0d0381a 2165
c0d0381a
MK
2166 if (!mapping)
2167 return mapping;
2168
c0d0381a
MK
2169 if (i_mmap_trylock_write(mapping))
2170 return mapping;
2171
336bf30e 2172 return NULL;
c0d0381a
MK
2173}
2174
19fc1a7e 2175static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
f60858f9
MK
2176 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2177 nodemask_t *node_alloc_noretry)
1da177e4 2178{
af0fb9df 2179 int order = huge_page_order(h);
f6a8dd98 2180 struct folio *folio;
f60858f9 2181 bool alloc_try_hard = true;
2b21624f 2182 bool retry = true;
f96efd58 2183
f60858f9 2184 /*
f6a8dd98
MWO
2185 * By default we always try hard to allocate the folio with
2186 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating folios in
f60858f9
MK
2187 * a loop (to adjust global huge page counts) and previous allocation
2188 * failed, do not continue to try hard on the same node. Use the
2189 * node_alloc_noretry bitmap to manage this state information.
2190 */
2191 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
2192 alloc_try_hard = false;
2193 gfp_mask |= __GFP_COMP|__GFP_NOWARN;
2194 if (alloc_try_hard)
2195 gfp_mask |= __GFP_RETRY_MAYFAIL;
af0fb9df
MH
2196 if (nid == NUMA_NO_NODE)
2197 nid = numa_mem_id();
2b21624f 2198retry:
f6a8dd98 2199 folio = __folio_alloc(gfp_mask, order, nid, nmask);
2b21624f 2200
f6a8dd98
MWO
2201 if (folio && !folio_ref_freeze(folio, 1)) {
2202 folio_put(folio);
2b21624f
MK
2203 if (retry) { /* retry once */
2204 retry = false;
2205 goto retry;
2206 }
2207 /* WOW! twice in a row. */
f6a8dd98
MWO
2208 pr_warn("HugeTLB unexpected inflated folio ref count\n");
2209 folio = NULL;
2b21624f
MK
2210 }
2211
f60858f9 2212 /*
f6a8dd98
MWO
2213 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a
2214 * folio this indicates an overall state change. Clear bit so
2215 * that we resume normal 'try hard' allocations.
f60858f9 2216 */
f6a8dd98 2217 if (node_alloc_noretry && folio && !alloc_try_hard)
f60858f9
MK
2218 node_clear(nid, *node_alloc_noretry);
2219
2220 /*
f6a8dd98 2221 * If we tried hard to get a folio but failed, set bit so that
f60858f9
MK
2222 * subsequent attempts will not try as hard until there is an
2223 * overall state change.
2224 */
f6a8dd98 2225 if (node_alloc_noretry && !folio && alloc_try_hard)
f60858f9
MK
2226 node_set(nid, *node_alloc_noretry);
2227
f6a8dd98 2228 if (!folio) {
19fc1a7e
SK
2229 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
2230 return NULL;
2231 }
2232
2233 __count_vm_event(HTLB_BUDDY_PGALLOC);
f6a8dd98 2234 return folio;
63b4613c
NA
2235}
2236
d67e32f2
MK
2237static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h,
2238 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2239 nodemask_t *node_alloc_noretry)
0c397dae 2240{
7f325a8d 2241 struct folio *folio;
7118fc29 2242 bool retry = false;
0c397dae 2243
7118fc29 2244retry:
0c397dae 2245 if (hstate_is_gigantic(h))
19fc1a7e 2246 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
0c397dae 2247 else
19fc1a7e 2248 folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
f60858f9 2249 nid, nmask, node_alloc_noretry);
19fc1a7e 2250 if (!folio)
0c397dae 2251 return NULL;
d67e32f2 2252
7118fc29 2253 if (hstate_is_gigantic(h)) {
d1c60955 2254 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
7118fc29
MK
2255 /*
2256 * Rare failure to convert pages to compound page.
2257 * Free pages and try again - ONCE!
2258 */
7f325a8d 2259 free_gigantic_folio(folio, huge_page_order(h));
7118fc29
MK
2260 if (!retry) {
2261 retry = true;
2262 goto retry;
2263 }
7118fc29
MK
2264 return NULL;
2265 }
2266 }
0c397dae 2267
19fc1a7e 2268 return folio;
0c397dae
MH
2269}
2270
d67e32f2
MK
2271static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
2272 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2273 nodemask_t *node_alloc_noretry)
2274{
2275 struct folio *folio;
2276
2277 folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
2278 node_alloc_noretry);
2279 if (folio)
2280 init_new_hugetlb_folio(h, folio);
2281 return folio;
2282}
2283
af0fb9df 2284/*
d67e32f2
MK
2285 * Common helper to allocate a fresh hugetlb page. All specific allocators
2286 * should use this function to get new hugetlb pages
2287 *
2288 * Note that returned page is 'frozen': ref count of head page and all tail
2289 * pages is zero.
af0fb9df 2290 */
d67e32f2
MK
2291static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
2292 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2293 nodemask_t *node_alloc_noretry)
b2261026 2294{
19fc1a7e 2295 struct folio *folio;
d67e32f2
MK
2296
2297 folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
2298 node_alloc_noretry);
2299 if (!folio)
2300 return NULL;
2301
2302 prep_new_hugetlb_folio(h, folio, folio_nid(folio));
2303 return folio;
2304}
2305
2306static void prep_and_add_allocated_folios(struct hstate *h,
2307 struct list_head *folio_list)
2308{
2309 unsigned long flags;
2310 struct folio *folio, *tmp_f;
2311
79359d6d
MK
2312 /* Send list for bulk vmemmap optimization processing */
2313 hugetlb_vmemmap_optimize_folios(h, folio_list);
2314
d67e32f2
MK
2315 /* Add all new pool pages to free lists in one lock cycle */
2316 spin_lock_irqsave(&hugetlb_lock, flags);
2317 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
2318 __prep_account_new_huge_page(h, folio_nid(folio));
2319 enqueue_hugetlb_folio(h, folio);
2320 }
2321 spin_unlock_irqrestore(&hugetlb_lock, flags);
2322}
2323
2324/*
2325 * Allocates a fresh hugetlb page in a node interleaved manner. The page
2326 * will later be added to the appropriate hugetlb pool.
2327 */
2328static struct folio *alloc_pool_huge_folio(struct hstate *h,
2329 nodemask_t *nodes_allowed,
2e73ff23
GL
2330 nodemask_t *node_alloc_noretry,
2331 int *next_node)
d67e32f2 2332{
af0fb9df 2333 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
d67e32f2 2334 int nr_nodes, node;
b2261026 2335
2e73ff23 2336 for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
d67e32f2
MK
2337 struct folio *folio;
2338
2339 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
19fc1a7e 2340 nodes_allowed, node_alloc_noretry);
d67e32f2
MK
2341 if (folio)
2342 return folio;
b2261026
JK
2343 }
2344
d67e32f2 2345 return NULL;
b2261026
JK
2346}
2347
e8c5c824 2348/*
10c6ec49
MK
2349 * Remove huge page from pool from next node to free. Attempt to keep
2350 * persistent huge pages more or less balanced over allowed nodes.
2351 * This routine only 'removes' the hugetlb page. The caller must make
2352 * an additional call to free the page to low level allocators.
e8c5c824
LS
2353 * Called with hugetlb_lock locked.
2354 */
d5b43e96
MWO
2355static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
2356 nodemask_t *nodes_allowed, bool acct_surplus)
e8c5c824 2357{
b2261026 2358 int nr_nodes, node;
04bbfd84 2359 struct folio *folio = NULL;
e8c5c824 2360
9487ca60 2361 lockdep_assert_held(&hugetlb_lock);
b2261026 2362 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
685f3457
LS
2363 /*
2364 * If we're returning unused surplus pages, only examine
2365 * nodes with surplus pages.
2366 */
b2261026
JK
2367 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2368 !list_empty(&h->hugepage_freelists[node])) {
04bbfd84
MWO
2369 folio = list_entry(h->hugepage_freelists[node].next,
2370 struct folio, lru);
cfd5082b 2371 remove_hugetlb_folio(h, folio, acct_surplus);
9a76db09 2372 break;
e8c5c824 2373 }
b2261026 2374 }
e8c5c824 2375
d5b43e96 2376 return folio;
e8c5c824
LS
2377}
2378
c8721bbb 2379/*
54fa49b2
SK
2380 * Dissolve a given free hugetlb folio into free buddy pages. This function
2381 * does nothing for in-use hugetlb folios and non-hugetlb folios.
faf53def
NH
2382 * This function returns values like below:
2383 *
ad2fa371
MS
2384 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2385 * when the system is under memory pressure and the feature of
2386 * freeing unused vmemmap pages associated with each hugetlb page
2387 * is enabled.
2388 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2389 * (allocated or reserved.)
2390 * 0: successfully dissolved free hugepages or the page is not a
2391 * hugepage (considered as already dissolved)
c8721bbb 2392 */
54fa49b2 2393int dissolve_free_hugetlb_folio(struct folio *folio)
c8721bbb 2394{
6bc9b564 2395 int rc = -EBUSY;
082d5b6b 2396
7ffddd49 2397retry:
faf53def 2398 /* Not to disrupt normal path by vainly holding hugetlb_lock */
1a7cdab5 2399 if (!folio_test_hugetlb(folio))
faf53def
NH
2400 return 0;
2401
db71ef79 2402 spin_lock_irq(&hugetlb_lock);
1a7cdab5 2403 if (!folio_test_hugetlb(folio)) {
faf53def
NH
2404 rc = 0;
2405 goto out;
2406 }
2407
1a7cdab5
SK
2408 if (!folio_ref_count(folio)) {
2409 struct hstate *h = folio_hstate(folio);
8346d69d 2410 if (!available_huge_pages(h))
082d5b6b 2411 goto out;
7ffddd49
MS
2412
2413 /*
2414 * We should make sure that the page is already on the free list
2415 * when it is dissolved.
2416 */
1a7cdab5 2417 if (unlikely(!folio_test_hugetlb_freed(folio))) {
db71ef79 2418 spin_unlock_irq(&hugetlb_lock);
7ffddd49
MS
2419 cond_resched();
2420
2421 /*
2422 * Theoretically, we should return -EBUSY when we
2423 * encounter this race. In fact, we have a chance
2424 * to successfully dissolve the page if we do a
2425 * retry. Because the race window is quite small.
2426 * If we seize this opportunity, it is an optimization
2427 * for increasing the success rate of dissolving page.
2428 */
2429 goto retry;
2430 }
2431
cfd5082b 2432 remove_hugetlb_folio(h, folio, false);
c1470b33 2433 h->max_huge_pages--;
db71ef79 2434 spin_unlock_irq(&hugetlb_lock);
ad2fa371
MS
2435
2436 /*
d6ef19e2
SK
2437 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2438 * before freeing the page. update_and_free_hugtlb_folio will fail to
ad2fa371
MS
2439 * free the page if it can not allocate required vmemmap. We
2440 * need to adjust max_huge_pages if the page is not freed.
2441 * Attempt to allocate vmemmmap here so that we can take
2442 * appropriate action on failure.
30a89adf
MK
2443 *
2444 * The folio_test_hugetlb check here is because
2445 * remove_hugetlb_folio will clear hugetlb folio flag for
2446 * non-vmemmap optimized hugetlb folios.
ad2fa371 2447 */
30a89adf 2448 if (folio_test_hugetlb(folio)) {
c5ad3233 2449 rc = hugetlb_vmemmap_restore_folio(h, folio);
30a89adf
MK
2450 if (rc) {
2451 spin_lock_irq(&hugetlb_lock);
2452 add_hugetlb_folio(h, folio, false);
2453 h->max_huge_pages++;
2454 goto out;
2455 }
2456 } else
2457 rc = 0;
ad2fa371 2458
30a89adf 2459 update_and_free_hugetlb_folio(h, folio, false);
ad2fa371 2460 return rc;
c8721bbb 2461 }
082d5b6b 2462out:
db71ef79 2463 spin_unlock_irq(&hugetlb_lock);
082d5b6b 2464 return rc;
c8721bbb
NH
2465}
2466
2467/*
2468 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2469 * make specified memory blocks removable from the system.
2247bb33
GS
2470 * Note that this will dissolve a free gigantic hugepage completely, if any
2471 * part of it lies within the given range.
54fa49b2
SK
2472 * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
2473 * free hugetlb folios that were dissolved before that error are lost.
c8721bbb 2474 */
d199483c 2475int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn)
c8721bbb 2476{
c8721bbb 2477 unsigned long pfn;
54fa49b2 2478 struct folio *folio;
082d5b6b 2479 int rc = 0;
dc2628f3
MS
2480 unsigned int order;
2481 struct hstate *h;
c8721bbb 2482
d0177639 2483 if (!hugepages_supported())
082d5b6b 2484 return rc;
d0177639 2485
dc2628f3
MS
2486 order = huge_page_order(&default_hstate);
2487 for_each_hstate(h)
2488 order = min(order, huge_page_order(h));
2489
2490 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
54fa49b2
SK
2491 folio = pfn_folio(pfn);
2492 rc = dissolve_free_hugetlb_folio(folio);
faf53def
NH
2493 if (rc)
2494 break;
eb03aa00 2495 }
082d5b6b
GS
2496
2497 return rc;
c8721bbb
NH
2498}
2499
ab5ac90a
MH
2500/*
2501 * Allocates a fresh surplus page from the page allocator.
2502 */
3a740e8b
SK
2503static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2504 gfp_t gfp_mask, int nid, nodemask_t *nmask)
7893d1d5 2505{
19fc1a7e 2506 struct folio *folio = NULL;
7893d1d5 2507
bae7f4ae 2508 if (hstate_is_gigantic(h))
aa888a74
AK
2509 return NULL;
2510
db71ef79 2511 spin_lock_irq(&hugetlb_lock);
9980d744
MH
2512 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2513 goto out_unlock;
db71ef79 2514 spin_unlock_irq(&hugetlb_lock);
d1c3fb1f 2515
19fc1a7e
SK
2516 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2517 if (!folio)
0c397dae 2518 return NULL;
d1c3fb1f 2519
db71ef79 2520 spin_lock_irq(&hugetlb_lock);
9980d744
MH
2521 /*
2522 * We could have raced with the pool size change.
2523 * Double check that and simply deallocate the new page
2524 * if we would end up overcommiting the surpluses. Abuse
454a00c4 2525 * temporary page to workaround the nasty free_huge_folio
9980d744
MH
2526 * codeflow
2527 */
2528 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
19fc1a7e 2529 folio_set_hugetlb_temporary(folio);
db71ef79 2530 spin_unlock_irq(&hugetlb_lock);
454a00c4 2531 free_huge_folio(folio);
2bf753e6 2532 return NULL;
7893d1d5 2533 }
9980d744 2534
b65a4eda 2535 h->surplus_huge_pages++;
19fc1a7e 2536 h->surplus_huge_pages_node[folio_nid(folio)]++;
b65a4eda 2537
9980d744 2538out_unlock:
db71ef79 2539 spin_unlock_irq(&hugetlb_lock);
7893d1d5 2540
3a740e8b 2541 return folio;
7893d1d5
AL
2542}
2543
e37d3e83 2544static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
9a4e9f3b 2545 int nid, nodemask_t *nmask)
ab5ac90a 2546{
19fc1a7e 2547 struct folio *folio;
ab5ac90a
MH
2548
2549 if (hstate_is_gigantic(h))
2550 return NULL;
2551
19fc1a7e
SK
2552 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2553 if (!folio)
ab5ac90a
MH
2554 return NULL;
2555
2b21624f 2556 /* fresh huge pages are frozen */
19fc1a7e 2557 folio_ref_unfreeze(folio, 1);
ab5ac90a
MH
2558 /*
2559 * We do not account these pages as surplus because they are only
2560 * temporary and will be released properly on the last reference
2561 */
19fc1a7e 2562 folio_set_hugetlb_temporary(folio);
ab5ac90a 2563
e37d3e83 2564 return folio;
ab5ac90a
MH
2565}
2566
099730d6
DH
2567/*
2568 * Use the VMA's mpolicy to allocate a huge page from the buddy.
2569 */
e0ec90ee 2570static
ff7d853b 2571struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
099730d6
DH
2572 struct vm_area_struct *vma, unsigned long addr)
2573{
3a740e8b 2574 struct folio *folio = NULL;
aaf14e40
MH
2575 struct mempolicy *mpol;
2576 gfp_t gfp_mask = htlb_alloc_mask(h);
2577 int nid;
2578 nodemask_t *nodemask;
2579
2580 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
cfcaa66f
BW
2581 if (mpol_is_preferred_many(mpol)) {
2582 gfp_t gfp = gfp_mask | __GFP_NOWARN;
2583
2584 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
3a740e8b 2585 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
aaf14e40 2586
cfcaa66f
BW
2587 /* Fallback to all nodes if page==NULL */
2588 nodemask = NULL;
2589 }
2590
3a740e8b
SK
2591 if (!folio)
2592 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
cfcaa66f 2593 mpol_cond_put(mpol);
ff7d853b 2594 return folio;
099730d6
DH
2595}
2596
e37d3e83
SK
2597/* folio migration callback function */
2598struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
42d0c3fb 2599 nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
4db9b2ef 2600{
db71ef79 2601 spin_lock_irq(&hugetlb_lock);
8346d69d 2602 if (available_huge_pages(h)) {
a36f1e90 2603 struct folio *folio;
3e59fcb0 2604
a36f1e90
SK
2605 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2606 preferred_nid, nmask);
2607 if (folio) {
db71ef79 2608 spin_unlock_irq(&hugetlb_lock);
e37d3e83 2609 return folio;
4db9b2ef
MH
2610 }
2611 }
db71ef79 2612 spin_unlock_irq(&hugetlb_lock);
4db9b2ef 2613
42d0c3fb
BW
2614 /* We cannot fallback to other nodes, as we could break the per-node pool. */
2615 if (!allow_alloc_fallback)
2616 gfp_mask |= __GFP_THISNODE;
2617
e37d3e83 2618 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
4db9b2ef
MH
2619}
2620
e4e574b7 2621/*
25985edc 2622 * Increase the hugetlb pool such that it can accommodate a reservation
e4e574b7
AL
2623 * of size 'delta'.
2624 */
0a4f3d1b 2625static int gather_surplus_pages(struct hstate *h, long delta)
1b2a1e7b 2626 __must_hold(&hugetlb_lock)
e4e574b7 2627{
34665341 2628 LIST_HEAD(surplus_list);
454a00c4 2629 struct folio *folio, *tmp;
0a4f3d1b
LX
2630 int ret;
2631 long i;
2632 long needed, allocated;
28073b02 2633 bool alloc_ok = true;
e4e574b7 2634
9487ca60 2635 lockdep_assert_held(&hugetlb_lock);
a5516438 2636 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
ac09b3a1 2637 if (needed <= 0) {
a5516438 2638 h->resv_huge_pages += delta;
e4e574b7 2639 return 0;
ac09b3a1 2640 }
e4e574b7
AL
2641
2642 allocated = 0;
e4e574b7
AL
2643
2644 ret = -ENOMEM;
2645retry:
db71ef79 2646 spin_unlock_irq(&hugetlb_lock);
e4e574b7 2647 for (i = 0; i < needed; i++) {
3a740e8b 2648 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2b21624f 2649 NUMA_NO_NODE, NULL);
3a740e8b 2650 if (!folio) {
28073b02
HD
2651 alloc_ok = false;
2652 break;
2653 }
3a740e8b 2654 list_add(&folio->lru, &surplus_list);
69ed779a 2655 cond_resched();
e4e574b7 2656 }
28073b02 2657 allocated += i;
e4e574b7
AL
2658
2659 /*
2660 * After retaking hugetlb_lock, we need to recalculate 'needed'
2661 * because either resv_huge_pages or free_huge_pages may have changed.
2662 */
db71ef79 2663 spin_lock_irq(&hugetlb_lock);
a5516438
AK
2664 needed = (h->resv_huge_pages + delta) -
2665 (h->free_huge_pages + allocated);
28073b02
HD
2666 if (needed > 0) {
2667 if (alloc_ok)
2668 goto retry;
2669 /*
2670 * We were not able to allocate enough pages to
2671 * satisfy the entire reservation so we free what
2672 * we've allocated so far.
2673 */
2674 goto free;
2675 }
e4e574b7
AL
2676 /*
2677 * The surplus_list now contains _at_least_ the number of extra pages
25985edc 2678 * needed to accommodate the reservation. Add the appropriate number
e4e574b7 2679 * of pages to the hugetlb pool and free the extras back to the buddy
ac09b3a1
AL
2680 * allocator. Commit the entire reservation here to prevent another
2681 * process from stealing the pages as they are added to the pool but
2682 * before they are reserved.
e4e574b7
AL
2683 */
2684 needed += allocated;
a5516438 2685 h->resv_huge_pages += delta;
e4e574b7 2686 ret = 0;
a9869b83 2687
19fc3f0a 2688 /* Free the needed pages to the hugetlb pool */
454a00c4 2689 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
19fc3f0a
AL
2690 if ((--needed) < 0)
2691 break;
b65a4eda 2692 /* Add the page to the hugetlb allocator */
454a00c4 2693 enqueue_hugetlb_folio(h, folio);
19fc3f0a 2694 }
28073b02 2695free:
db71ef79 2696 spin_unlock_irq(&hugetlb_lock);
19fc3f0a 2697
b65a4eda
MK
2698 /*
2699 * Free unnecessary surplus pages to the buddy allocator.
454a00c4 2700 * Pages have no ref count, call free_huge_folio directly.
b65a4eda 2701 */
454a00c4
MWO
2702 list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2703 free_huge_folio(folio);
db71ef79 2704 spin_lock_irq(&hugetlb_lock);
e4e574b7
AL
2705
2706 return ret;
2707}
2708
2709/*
e5bbc8a6
MK
2710 * This routine has two main purposes:
2711 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2712 * in unused_resv_pages. This corresponds to the prior adjustments made
2713 * to the associated reservation map.
2714 * 2) Free any unused surplus pages that may have been allocated to satisfy
2715 * the reservation. As many as unused_resv_pages may be freed.
e4e574b7 2716 */
a5516438
AK
2717static void return_unused_surplus_pages(struct hstate *h,
2718 unsigned long unused_resv_pages)
e4e574b7 2719{
e4e574b7 2720 unsigned long nr_pages;
10c6ec49
MK
2721 LIST_HEAD(page_list);
2722
9487ca60 2723 lockdep_assert_held(&hugetlb_lock);
10c6ec49
MK
2724 /* Uncommit the reservation */
2725 h->resv_huge_pages -= unused_resv_pages;
e4e574b7 2726
c0531714 2727 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
e5bbc8a6 2728 goto out;
aa888a74 2729
e5bbc8a6
MK
2730 /*
2731 * Part (or even all) of the reservation could have been backed
2732 * by pre-allocated pages. Only free surplus pages.
2733 */
a5516438 2734 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
e4e574b7 2735
685f3457
LS
2736 /*
2737 * We want to release as many surplus pages as possible, spread
9b5e5d0f
LS
2738 * evenly across all nodes with memory. Iterate across these nodes
2739 * until we can no longer free unreserved surplus pages. This occurs
2740 * when the nodes with surplus pages have no free pages.
d5b43e96 2741 * remove_pool_hugetlb_folio() will balance the freed pages across the
9b5e5d0f 2742 * on-line nodes with memory and will handle the hstate accounting.
685f3457
LS
2743 */
2744 while (nr_pages--) {
d5b43e96
MWO
2745 struct folio *folio;
2746
2747 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2748 if (!folio)
e5bbc8a6 2749 goto out;
10c6ec49 2750
d5b43e96 2751 list_add(&folio->lru, &page_list);
e4e574b7 2752 }
e5bbc8a6
MK
2753
2754out:
db71ef79 2755 spin_unlock_irq(&hugetlb_lock);
10c6ec49 2756 update_and_free_pages_bulk(h, &page_list);
db71ef79 2757 spin_lock_irq(&hugetlb_lock);
e4e574b7
AL
2758}
2759
5e911373 2760
c37f9fb1 2761/*
feba16e2 2762 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
5e911373 2763 * are used by the huge page allocation routines to manage reservations.
cf3ad20b
MK
2764 *
2765 * vma_needs_reservation is called to determine if the huge page at addr
2766 * within the vma has an associated reservation. If a reservation is
2767 * needed, the value 1 is returned. The caller is then responsible for
2768 * managing the global reservation and subpool usage counts. After
2769 * the huge page has been allocated, vma_commit_reservation is called
feba16e2
MK
2770 * to add the page to the reservation map. If the page allocation fails,
2771 * the reservation must be ended instead of committed. vma_end_reservation
2772 * is called in such cases.
cf3ad20b
MK
2773 *
2774 * In the normal case, vma_commit_reservation returns the same value
2775 * as the preceding vma_needs_reservation call. The only time this
2776 * is not the case is if a reserve map was changed between calls. It
2777 * is the responsibility of the caller to notice the difference and
2778 * take appropriate action.
96b96a96
MK
2779 *
2780 * vma_add_reservation is used in error paths where a reservation must
2781 * be restored when a newly allocated huge page must be freed. It is
2782 * to be called after calling vma_needs_reservation to determine if a
2783 * reservation exists.
846be085
MK
2784 *
2785 * vma_del_reservation is used in error paths where an entry in the reserve
2786 * map was created during huge page allocation and must be removed. It is to
2787 * be called after calling vma_needs_reservation to determine if a reservation
2788 * exists.
c37f9fb1 2789 */
5e911373
MK
2790enum vma_resv_mode {
2791 VMA_NEEDS_RESV,
2792 VMA_COMMIT_RESV,
feba16e2 2793 VMA_END_RESV,
96b96a96 2794 VMA_ADD_RESV,
846be085 2795 VMA_DEL_RESV,
5e911373 2796};
cf3ad20b
MK
2797static long __vma_reservation_common(struct hstate *h,
2798 struct vm_area_struct *vma, unsigned long addr,
5e911373 2799 enum vma_resv_mode mode)
c37f9fb1 2800{
4e35f483
JK
2801 struct resv_map *resv;
2802 pgoff_t idx;
cf3ad20b 2803 long ret;
0db9d74e 2804 long dummy_out_regions_needed;
c37f9fb1 2805
4e35f483
JK
2806 resv = vma_resv_map(vma);
2807 if (!resv)
84afd99b 2808 return 1;
c37f9fb1 2809
4e35f483 2810 idx = vma_hugecache_offset(h, vma, addr);
5e911373
MK
2811 switch (mode) {
2812 case VMA_NEEDS_RESV:
0db9d74e
MA
2813 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2814 /* We assume that vma_reservation_* routines always operate on
2815 * 1 page, and that adding to resv map a 1 page entry can only
2816 * ever require 1 region.
2817 */
2818 VM_BUG_ON(dummy_out_regions_needed != 1);
5e911373
MK
2819 break;
2820 case VMA_COMMIT_RESV:
075a61d0 2821 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
0db9d74e
MA
2822 /* region_add calls of range 1 should never fail. */
2823 VM_BUG_ON(ret < 0);
5e911373 2824 break;
feba16e2 2825 case VMA_END_RESV:
0db9d74e 2826 region_abort(resv, idx, idx + 1, 1);
5e911373
MK
2827 ret = 0;
2828 break;
96b96a96 2829 case VMA_ADD_RESV:
0db9d74e 2830 if (vma->vm_flags & VM_MAYSHARE) {
075a61d0 2831 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
0db9d74e
MA
2832 /* region_add calls of range 1 should never fail. */
2833 VM_BUG_ON(ret < 0);
2834 } else {
2835 region_abort(resv, idx, idx + 1, 1);
96b96a96
MK
2836 ret = region_del(resv, idx, idx + 1);
2837 }
2838 break;
846be085
MK
2839 case VMA_DEL_RESV:
2840 if (vma->vm_flags & VM_MAYSHARE) {
2841 region_abort(resv, idx, idx + 1, 1);
2842 ret = region_del(resv, idx, idx + 1);
2843 } else {
2844 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2845 /* region_add calls of range 1 should never fail. */
2846 VM_BUG_ON(ret < 0);
2847 }
2848 break;
5e911373
MK
2849 default:
2850 BUG();
2851 }
84afd99b 2852
846be085 2853 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
cf3ad20b 2854 return ret;
bf3d12b9
ML
2855 /*
2856 * We know private mapping must have HPAGE_RESV_OWNER set.
2857 *
2858 * In most cases, reserves always exist for private mappings.
2859 * However, a file associated with mapping could have been
2860 * hole punched or truncated after reserves were consumed.
2861 * As subsequent fault on such a range will not use reserves.
2862 * Subtle - The reserve map for private mappings has the
2863 * opposite meaning than that of shared mappings. If NO
2864 * entry is in the reserve map, it means a reservation exists.
2865 * If an entry exists in the reserve map, it means the
2866 * reservation has already been consumed. As a result, the
2867 * return value of this routine is the opposite of the
2868 * value returned from reserve map manipulation routines above.
2869 */
2870 if (ret > 0)
2871 return 0;
2872 if (ret == 0)
2873 return 1;
2874 return ret;
c37f9fb1 2875}
cf3ad20b
MK
2876
2877static long vma_needs_reservation(struct hstate *h,
a5516438 2878 struct vm_area_struct *vma, unsigned long addr)
c37f9fb1 2879{
5e911373 2880 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
cf3ad20b 2881}
84afd99b 2882
cf3ad20b
MK
2883static long vma_commit_reservation(struct hstate *h,
2884 struct vm_area_struct *vma, unsigned long addr)
2885{
5e911373
MK
2886 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2887}
2888
feba16e2 2889static void vma_end_reservation(struct hstate *h,
5e911373
MK
2890 struct vm_area_struct *vma, unsigned long addr)
2891{
feba16e2 2892 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
c37f9fb1
AW
2893}
2894
96b96a96
MK
2895static long vma_add_reservation(struct hstate *h,
2896 struct vm_area_struct *vma, unsigned long addr)
2897{
2898 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2899}
2900
846be085
MK
2901static long vma_del_reservation(struct hstate *h,
2902 struct vm_area_struct *vma, unsigned long addr)
2903{
2904 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2905}
2906
96b96a96 2907/*
846be085 2908 * This routine is called to restore reservation information on error paths.
d0ce0e47
SK
2909 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2910 * and the hugetlb mutex should remain held when calling this routine.
846be085
MK
2911 *
2912 * It handles two specific cases:
d2d7bb44
SK
2913 * 1) A reservation was in place and the folio consumed the reservation.
2914 * hugetlb_restore_reserve is set in the folio.
2915 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
d0ce0e47 2916 * not set. However, alloc_hugetlb_folio always updates the reserve map.
846be085 2917 *
454a00c4
MWO
2918 * In case 1, free_huge_folio later in the error path will increment the
2919 * global reserve count. But, free_huge_folio does not have enough context
846be085
MK
2920 * to adjust the reservation map. This case deals primarily with private
2921 * mappings. Adjust the reserve map here to be consistent with global
454a00c4 2922 * reserve count adjustments to be made by free_huge_folio. Make sure the
846be085
MK
2923 * reserve map indicates there is a reservation present.
2924 *
d0ce0e47 2925 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
96b96a96 2926 */
846be085 2927void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
d2d7bb44 2928 unsigned long address, struct folio *folio)
96b96a96 2929{
846be085 2930 long rc = vma_needs_reservation(h, vma, address);
96b96a96 2931
0ffdc38e 2932 if (folio_test_hugetlb_restore_reserve(folio)) {
846be085 2933 if (unlikely(rc < 0))
96b96a96
MK
2934 /*
2935 * Rare out of memory condition in reserve map
0ffdc38e
SK
2936 * manipulation. Clear hugetlb_restore_reserve so
2937 * that global reserve count will not be incremented
454a00c4 2938 * by free_huge_folio. This will make it appear
0ffdc38e 2939 * as though the reservation for this folio was
96b96a96 2940 * consumed. This may prevent the task from
0ffdc38e 2941 * faulting in the folio at a later time. This
96b96a96
MK
2942 * is better than inconsistent global huge page
2943 * accounting of reserve counts.
2944 */
0ffdc38e 2945 folio_clear_hugetlb_restore_reserve(folio);
846be085
MK
2946 else if (rc)
2947 (void)vma_add_reservation(h, vma, address);
2948 else
2949 vma_end_reservation(h, vma, address);
2950 } else {
2951 if (!rc) {
2952 /*
2953 * This indicates there is an entry in the reserve map
d0ce0e47
SK
2954 * not added by alloc_hugetlb_folio. We know it was added
2955 * before the alloc_hugetlb_folio call, otherwise
0ffdc38e 2956 * hugetlb_restore_reserve would be set on the folio.
846be085
MK
2957 * Remove the entry so that a subsequent allocation
2958 * does not consume a reservation.
2959 */
2960 rc = vma_del_reservation(h, vma, address);
2961 if (rc < 0)
96b96a96 2962 /*
846be085
MK
2963 * VERY rare out of memory condition. Since
2964 * we can not delete the entry, set
0ffdc38e
SK
2965 * hugetlb_restore_reserve so that the reserve
2966 * count will be incremented when the folio
846be085
MK
2967 * is freed. This reserve will be consumed
2968 * on a subsequent allocation.
96b96a96 2969 */
0ffdc38e 2970 folio_set_hugetlb_restore_reserve(folio);
846be085
MK
2971 } else if (rc < 0) {
2972 /*
2973 * Rare out of memory condition from
2974 * vma_needs_reservation call. Memory allocation is
2975 * only attempted if a new entry is needed. Therefore,
2976 * this implies there is not an entry in the
2977 * reserve map.
2978 *
2979 * For shared mappings, no entry in the map indicates
2980 * no reservation. We are done.
2981 */
2982 if (!(vma->vm_flags & VM_MAYSHARE))
2983 /*
2984 * For private mappings, no entry indicates
2985 * a reservation is present. Since we can
0ffdc38e
SK
2986 * not add an entry, set hugetlb_restore_reserve
2987 * on the folio so reserve count will be
846be085
MK
2988 * incremented when freed. This reserve will
2989 * be consumed on a subsequent allocation.
2990 */
0ffdc38e 2991 folio_set_hugetlb_restore_reserve(folio);
96b96a96 2992 } else
846be085
MK
2993 /*
2994 * No reservation present, do nothing
2995 */
2996 vma_end_reservation(h, vma, address);
96b96a96
MK
2997 }
2998}
2999
369fa227 3000/*
19fc1a7e
SK
3001 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
3002 * the old one
369fa227 3003 * @h: struct hstate old page belongs to
19fc1a7e 3004 * @old_folio: Old folio to dissolve
ae37c7ff 3005 * @list: List to isolate the page in case we need to
369fa227
OS
3006 * Returns 0 on success, otherwise negated error.
3007 */
19fc1a7e
SK
3008static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
3009 struct folio *old_folio, struct list_head *list)
369fa227
OS
3010{
3011 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
de656ed3 3012 int nid = folio_nid(old_folio);
831bc31a 3013 struct folio *new_folio = NULL;
369fa227
OS
3014 int ret = 0;
3015
369fa227
OS
3016retry:
3017 spin_lock_irq(&hugetlb_lock);
de656ed3 3018 if (!folio_test_hugetlb(old_folio)) {
369fa227 3019 /*
19fc1a7e 3020 * Freed from under us. Drop new_folio too.
369fa227
OS
3021 */
3022 goto free_new;
de656ed3 3023 } else if (folio_ref_count(old_folio)) {
9747b9e9
BW
3024 bool isolated;
3025
369fa227 3026 /*
19fc1a7e 3027 * Someone has grabbed the folio, try to isolate it here.
ae37c7ff 3028 * Fail with -EBUSY if not possible.
369fa227 3029 */
ae37c7ff 3030 spin_unlock_irq(&hugetlb_lock);
9747b9e9
BW
3031 isolated = isolate_hugetlb(old_folio, list);
3032 ret = isolated ? 0 : -EBUSY;
ae37c7ff 3033 spin_lock_irq(&hugetlb_lock);
369fa227 3034 goto free_new;
de656ed3 3035 } else if (!folio_test_hugetlb_freed(old_folio)) {
369fa227 3036 /*
19fc1a7e 3037 * Folio's refcount is 0 but it has not been enqueued in the
369fa227
OS
3038 * freelist yet. Race window is small, so we can succeed here if
3039 * we retry.
3040 */
3041 spin_unlock_irq(&hugetlb_lock);
3042 cond_resched();
3043 goto retry;
3044 } else {
831bc31a
BW
3045 if (!new_folio) {
3046 spin_unlock_irq(&hugetlb_lock);
3047 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
3048 NULL, NULL);
3049 if (!new_folio)
3050 return -ENOMEM;
3051 __prep_new_hugetlb_folio(h, new_folio);
3052 goto retry;
3053 }
3054
369fa227 3055 /*
19fc1a7e 3056 * Ok, old_folio is still a genuine free hugepage. Remove it from
369fa227
OS
3057 * the freelist and decrease the counters. These will be
3058 * incremented again when calling __prep_account_new_huge_page()
240d67a8
SK
3059 * and enqueue_hugetlb_folio() for new_folio. The counters will
3060 * remain stable since this happens under the lock.
369fa227 3061 */
cfd5082b 3062 remove_hugetlb_folio(h, old_folio, false);
369fa227
OS
3063
3064 /*
19fc1a7e 3065 * Ref count on new_folio is already zero as it was dropped
b65a4eda 3066 * earlier. It can be directly added to the pool free list.
369fa227 3067 */
369fa227 3068 __prep_account_new_huge_page(h, nid);
240d67a8 3069 enqueue_hugetlb_folio(h, new_folio);
369fa227
OS
3070
3071 /*
19fc1a7e 3072 * Folio has been replaced, we can safely free the old one.
369fa227
OS
3073 */
3074 spin_unlock_irq(&hugetlb_lock);
d6ef19e2 3075 update_and_free_hugetlb_folio(h, old_folio, false);
369fa227
OS
3076 }
3077
3078 return ret;
3079
3080free_new:
3081 spin_unlock_irq(&hugetlb_lock);
831bc31a
BW
3082 if (new_folio) {
3083 /* Folio has a zero ref count, but needs a ref to be freed */
3084 folio_ref_unfreeze(new_folio, 1);
3085 update_and_free_hugetlb_folio(h, new_folio, false);
3086 }
369fa227
OS
3087
3088 return ret;
3089}
3090
ae37c7ff 3091int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
369fa227
OS
3092{
3093 struct hstate *h;
d5e33bd8 3094 struct folio *folio = page_folio(page);
ae37c7ff 3095 int ret = -EBUSY;
369fa227
OS
3096
3097 /*
3098 * The page might have been dissolved from under our feet, so make sure
3099 * to carefully check the state under the lock.
3100 * Return success when racing as if we dissolved the page ourselves.
3101 */
3102 spin_lock_irq(&hugetlb_lock);
d5e33bd8
SK
3103 if (folio_test_hugetlb(folio)) {
3104 h = folio_hstate(folio);
369fa227
OS
3105 } else {
3106 spin_unlock_irq(&hugetlb_lock);
3107 return 0;
3108 }
3109 spin_unlock_irq(&hugetlb_lock);
3110
3111 /*
3112 * Fence off gigantic pages as there is a cyclic dependency between
3113 * alloc_contig_range and them. Return -ENOMEM as this has the effect
3114 * of bailing out right away without further retrying.
3115 */
3116 if (hstate_is_gigantic(h))
3117 return -ENOMEM;
3118
9747b9e9 3119 if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
ae37c7ff 3120 ret = 0;
d5e33bd8 3121 else if (!folio_ref_count(folio))
19fc1a7e 3122 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
ae37c7ff
OS
3123
3124 return ret;
369fa227
OS
3125}
3126
d0ce0e47 3127struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
04f2cbe3 3128 unsigned long addr, int avoid_reserve)
1da177e4 3129{
90481622 3130 struct hugepage_subpool *spool = subpool_vma(vma);
a5516438 3131 struct hstate *h = hstate_vma(vma);
d4ab0316 3132 struct folio *folio;
8cba9576 3133 long map_chg, map_commit, nr_pages = pages_per_huge_page(h);
d85f69b0 3134 long gbl_chg;
8cba9576 3135 int memcg_charge_ret, ret, idx;
d0ce0e47 3136 struct hugetlb_cgroup *h_cg = NULL;
8cba9576 3137 struct mem_cgroup *memcg;
08cf9faf 3138 bool deferred_reserve;
8cba9576
NP
3139 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
3140
3141 memcg = get_mem_cgroup_from_current();
3142 memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages);
3143 if (memcg_charge_ret == -ENOMEM) {
3144 mem_cgroup_put(memcg);
3145 return ERR_PTR(-ENOMEM);
3146 }
a1e78772 3147
6d76dcf4 3148 idx = hstate_index(h);
a1e78772 3149 /*
d85f69b0
MK
3150 * Examine the region/reserve map to determine if the process
3151 * has a reservation for the page to be allocated. A return
3152 * code of zero indicates a reservation exists (no change).
a1e78772 3153 */
d85f69b0 3154 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
8cba9576
NP
3155 if (map_chg < 0) {
3156 if (!memcg_charge_ret)
3157 mem_cgroup_cancel_charge(memcg, nr_pages);
3158 mem_cgroup_put(memcg);
76dcee75 3159 return ERR_PTR(-ENOMEM);
8cba9576 3160 }
d85f69b0
MK
3161
3162 /*
3163 * Processes that did not create the mapping will have no
3164 * reserves as indicated by the region/reserve map. Check
3165 * that the allocation will not exceed the subpool limit.
3166 * Allocations for MAP_NORESERVE mappings also need to be
3167 * checked against any subpool limit.
3168 */
3169 if (map_chg || avoid_reserve) {
3170 gbl_chg = hugepage_subpool_get_pages(spool, 1);
8cba9576
NP
3171 if (gbl_chg < 0)
3172 goto out_end_reservation;
1da177e4 3173
d85f69b0
MK
3174 /*
3175 * Even though there was no reservation in the region/reserve
3176 * map, there could be reservations associated with the
3177 * subpool that can be used. This would be indicated if the
3178 * return value of hugepage_subpool_get_pages() is zero.
3179 * However, if avoid_reserve is specified we still avoid even
3180 * the subpool reservations.
3181 */
3182 if (avoid_reserve)
3183 gbl_chg = 1;
3184 }
3185
08cf9faf
MA
3186 /* If this allocation is not consuming a reservation, charge it now.
3187 */
6501fe5f 3188 deferred_reserve = map_chg || avoid_reserve;
08cf9faf
MA
3189 if (deferred_reserve) {
3190 ret = hugetlb_cgroup_charge_cgroup_rsvd(
3191 idx, pages_per_huge_page(h), &h_cg);
3192 if (ret)
3193 goto out_subpool_put;
3194 }
3195
6d76dcf4 3196 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
8f34af6f 3197 if (ret)
08cf9faf 3198 goto out_uncharge_cgroup_reservation;
8f34af6f 3199
db71ef79 3200 spin_lock_irq(&hugetlb_lock);
d85f69b0
MK
3201 /*
3202 * glb_chg is passed to indicate whether or not a page must be taken
3203 * from the global free pool (global change). gbl_chg == 0 indicates
3204 * a reservation exists for the allocation.
3205 */
ff7d853b
SK
3206 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
3207 if (!folio) {
db71ef79 3208 spin_unlock_irq(&hugetlb_lock);
ff7d853b
SK
3209 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3210 if (!folio)
8f34af6f 3211 goto out_uncharge_cgroup;
12df140f 3212 spin_lock_irq(&hugetlb_lock);
a88c7695 3213 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
ff7d853b 3214 folio_set_hugetlb_restore_reserve(folio);
a88c7695
NH
3215 h->resv_huge_pages--;
3216 }
ff7d853b
SK
3217 list_add(&folio->lru, &h->hugepage_activelist);
3218 folio_ref_unfreeze(folio, 1);
81a6fcae 3219 /* Fall through */
68842c9b 3220 }
ff7d853b
SK
3221
3222 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
08cf9faf
MA
3223 /* If allocation is not consuming a reservation, also store the
3224 * hugetlb_cgroup pointer on the page.
3225 */
3226 if (deferred_reserve) {
3227 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
ff7d853b 3228 h_cg, folio);
08cf9faf
MA
3229 }
3230
db71ef79 3231 spin_unlock_irq(&hugetlb_lock);
348ea204 3232
ff7d853b 3233 hugetlb_set_folio_subpool(folio, spool);
90d8b7e6 3234
d85f69b0
MK
3235 map_commit = vma_commit_reservation(h, vma, addr);
3236 if (unlikely(map_chg > map_commit)) {
33039678
MK
3237 /*
3238 * The page was added to the reservation map between
3239 * vma_needs_reservation and vma_commit_reservation.
3240 * This indicates a race with hugetlb_reserve_pages.
3241 * Adjust for the subpool count incremented above AND
3242 * in hugetlb_reserve_pages for the same page. Also,
3243 * the reservation count added in hugetlb_reserve_pages
3244 * no longer applies.
3245 */
3246 long rsv_adjust;
3247
3248 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3249 hugetlb_acct_memory(h, -rsv_adjust);
b76b4690
PX
3250 if (deferred_reserve) {
3251 spin_lock_irq(&hugetlb_lock);
d4ab0316
SK
3252 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
3253 pages_per_huge_page(h), folio);
b76b4690
PX
3254 spin_unlock_irq(&hugetlb_lock);
3255 }
33039678 3256 }
8cba9576
NP
3257
3258 if (!memcg_charge_ret)
3259 mem_cgroup_commit_charge(folio, memcg);
3260 mem_cgroup_put(memcg);
3261
d0ce0e47 3262 return folio;
8f34af6f
JZ
3263
3264out_uncharge_cgroup:
3265 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
08cf9faf
MA
3266out_uncharge_cgroup_reservation:
3267 if (deferred_reserve)
3268 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3269 h_cg);
8f34af6f 3270out_subpool_put:
d85f69b0 3271 if (map_chg || avoid_reserve)
8f34af6f 3272 hugepage_subpool_put_pages(spool, 1);
8cba9576 3273out_end_reservation:
feba16e2 3274 vma_end_reservation(h, vma, addr);
8cba9576
NP
3275 if (!memcg_charge_ret)
3276 mem_cgroup_cancel_charge(memcg, nr_pages);
3277 mem_cgroup_put(memcg);
8f34af6f 3278 return ERR_PTR(-ENOSPC);
b45b5bd6
DG
3279}
3280
b5389086 3281int alloc_bootmem_huge_page(struct hstate *h, int nid)
e24a1307 3282 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
b5389086 3283int __alloc_bootmem_huge_page(struct hstate *h, int nid)
aa888a74 3284{
b5389086 3285 struct huge_bootmem_page *m = NULL; /* initialize for clang */
b78b27d0 3286 int nr_nodes, node = nid;
aa888a74 3287
b5389086
ZY
3288 /* do node specific alloc */
3289 if (nid != NUMA_NO_NODE) {
3290 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3291 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3292 if (!m)
3293 return 0;
3294 goto found;
3295 }
3296 /* allocate from next node when distributing huge pages */
2e73ff23 3297 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) {
b5389086 3298 m = memblock_alloc_try_nid_raw(
8b89a116 3299 huge_page_size(h), huge_page_size(h),
97ad1087 3300 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
b5389086
ZY
3301 /*
3302 * Use the beginning of the huge page to store the
3303 * huge_bootmem_page struct (until gather_bootmem
3304 * puts them into the mem_map).
3305 */
3306 if (!m)
3307 return 0;
3308 goto found;
aa888a74 3309 }
aa888a74
AK
3310
3311found:
fde1c4ec
UA
3312
3313 /*
3314 * Only initialize the head struct page in memmap_init_reserved_pages,
3315 * rest of the struct pages will be initialized by the HugeTLB
3316 * subsystem itself.
3317 * The head struct page is used to get folio information by the HugeTLB
3318 * subsystem like zone id and node id.
3319 */
3320 memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
3321 huge_page_size(h) - PAGE_SIZE);
aa888a74 3322 /* Put them into a private list first because mem_map is not up yet */
330d6e48 3323 INIT_LIST_HEAD(&m->list);
b78b27d0 3324 list_add(&m->list, &huge_boot_pages[node]);
aa888a74
AK
3325 m->hstate = h;
3326 return 1;
3327}
3328
fde1c4ec
UA
3329/* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
3330static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3331 unsigned long start_page_number,
3332 unsigned long end_page_number)
3333{
3334 enum zone_type zone = zone_idx(folio_zone(folio));
3335 int nid = folio_nid(folio);
3336 unsigned long head_pfn = folio_pfn(folio);
3337 unsigned long pfn, end_pfn = head_pfn + end_page_number;
3338 int ret;
3339
3340 for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) {
3341 struct page *page = pfn_to_page(pfn);
3342
3343 __init_single_page(page, pfn, zone, nid);
3344 prep_compound_tail((struct page *)folio, pfn - head_pfn);
3345 ret = page_ref_freeze(page, 1);
3346 VM_BUG_ON(!ret);
3347 }
3348}
3349
3350static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3351 struct hstate *h,
3352 unsigned long nr_pages)
3353{
3354 int ret;
3355
3356 /* Prepare folio head */
3357 __folio_clear_reserved(folio);
3358 __folio_set_head(folio);
a48bf7b4 3359 ret = folio_ref_freeze(folio, 1);
fde1c4ec
UA
3360 VM_BUG_ON(!ret);
3361 /* Initialize the necessary tail struct pages */
3362 hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
3363 prep_compound_head((struct page *)folio, huge_page_order(h));
3364}
3365
79359d6d
MK
3366static void __init prep_and_add_bootmem_folios(struct hstate *h,
3367 struct list_head *folio_list)
3368{
3369 unsigned long flags;
3370 struct folio *folio, *tmp_f;
3371
3372 /* Send list for bulk vmemmap optimization processing */
3373 hugetlb_vmemmap_optimize_folios(h, folio_list);
3374
79359d6d
MK
3375 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3376 if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3377 /*
3378 * If HVO fails, initialize all tail struct pages
3379 * We do not worry about potential long lock hold
3380 * time as this is early in boot and there should
3381 * be no contention.
3382 */
3383 hugetlb_folio_init_tail_vmemmap(folio,
3384 HUGETLB_VMEMMAP_RESERVE_PAGES,
3385 pages_per_huge_page(h));
3386 }
b78b27d0
GL
3387 /* Subdivide locks to achieve better parallel performance */
3388 spin_lock_irqsave(&hugetlb_lock, flags);
79359d6d
MK
3389 __prep_account_new_huge_page(h, folio_nid(folio));
3390 enqueue_hugetlb_folio(h, folio);
b78b27d0 3391 spin_unlock_irqrestore(&hugetlb_lock, flags);
79359d6d 3392 }
79359d6d
MK
3393}
3394
48b8d744
MK
3395/*
3396 * Put bootmem huge pages into the standard lists after mem_map is up.
5e0a760b 3397 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
48b8d744 3398 */
b78b27d0 3399static void __init gather_bootmem_prealloc_node(unsigned long nid)
aa888a74 3400{
d67e32f2 3401 LIST_HEAD(folio_list);
aa888a74 3402 struct huge_bootmem_page *m;
d67e32f2 3403 struct hstate *h = NULL, *prev_h = NULL;
aa888a74 3404
b78b27d0 3405 list_for_each_entry(m, &huge_boot_pages[nid], list) {
40d18ebf 3406 struct page *page = virt_to_page(m);
fde1c4ec 3407 struct folio *folio = (void *)page;
d67e32f2
MK
3408
3409 h = m->hstate;
3410 /*
3411 * It is possible to have multiple huge page sizes (hstates)
3412 * in this list. If so, process each size separately.
3413 */
3414 if (h != prev_h && prev_h != NULL)
79359d6d 3415 prep_and_add_bootmem_folios(prev_h, &folio_list);
d67e32f2 3416 prev_h = h;
ee8f248d 3417
48b8d744 3418 VM_BUG_ON(!hstate_is_gigantic(h));
d1c60955 3419 WARN_ON(folio_ref_count(folio) != 1);
fde1c4ec
UA
3420
3421 hugetlb_folio_init_vmemmap(folio, h,
3422 HUGETLB_VMEMMAP_RESERVE_PAGES);
79359d6d 3423 init_new_hugetlb_folio(h, folio);
d67e32f2 3424 list_add(&folio->lru, &folio_list);
af0fb9df 3425
b0320c7b 3426 /*
48b8d744
MK
3427 * We need to restore the 'stolen' pages to totalram_pages
3428 * in order to fix confusing memory reports from free(1) and
3429 * other side-effects, like CommitLimit going negative.
b0320c7b 3430 */
48b8d744 3431 adjust_managed_page_count(page, pages_per_huge_page(h));
520495fe 3432 cond_resched();
aa888a74 3433 }
d67e32f2 3434
79359d6d 3435 prep_and_add_bootmem_folios(h, &folio_list);
aa888a74 3436}
fde1c4ec 3437
b78b27d0
GL
3438static void __init gather_bootmem_prealloc_parallel(unsigned long start,
3439 unsigned long end, void *arg)
3440{
3441 int nid;
3442
3443 for (nid = start; nid < end; nid++)
3444 gather_bootmem_prealloc_node(nid);
3445}
3446
3447static void __init gather_bootmem_prealloc(void)
3448{
3449 struct padata_mt_job job = {
3450 .thread_fn = gather_bootmem_prealloc_parallel,
3451 .fn_arg = NULL,
3452 .start = 0,
3453 .size = num_node_state(N_MEMORY),
3454 .align = 1,
3455 .min_chunk = 1,
3456 .max_threads = num_node_state(N_MEMORY),
3457 .numa_aware = true,
3458 };
3459
3460 padata_do_multithreaded(&job);
3461}
3462
b5389086
ZY
3463static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3464{
3465 unsigned long i;
3466 char buf[32];
3467
3468 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3469 if (hstate_is_gigantic(h)) {
3470 if (!alloc_bootmem_huge_page(h, nid))
3471 break;
3472 } else {
19fc1a7e 3473 struct folio *folio;
b5389086
ZY
3474 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3475
19fc1a7e 3476 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
b5389086 3477 &node_states[N_MEMORY], NULL);
19fc1a7e 3478 if (!folio)
b5389086 3479 break;
454a00c4 3480 free_huge_folio(folio); /* free it into the hugepage allocator */
b5389086
ZY
3481 }
3482 cond_resched();
3483 }
3484 if (i == h->max_huge_pages_node[nid])
3485 return;
3486
3487 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3488 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3489 h->max_huge_pages_node[nid], buf, nid, i);
3490 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3491 h->max_huge_pages_node[nid] = i;
3492}
aa888a74 3493
fc37bbb3
GL
3494static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
3495{
3496 int i;
3497 bool node_specific_alloc = false;
3498
3499 for_each_online_node(i) {
3500 if (h->max_huge_pages_node[i] > 0) {
3501 hugetlb_hstate_alloc_pages_onenode(h, i);
3502 node_specific_alloc = true;
3503 }
3504 }
3505
3506 return node_specific_alloc;
3507}
3508
3509static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
3510{
3511 if (allocated < h->max_huge_pages) {
3512 char buf[32];
3513
3514 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3515 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3516 h->max_huge_pages, buf, allocated);
3517 h->max_huge_pages = allocated;
3518 }
3519}
3520
c6c21c31
GL
3521static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
3522{
3523 struct hstate *h = (struct hstate *)arg;
3524 int i, num = end - start;
3525 nodemask_t node_alloc_noretry;
3526 LIST_HEAD(folio_list);
3527 int next_node = first_online_node;
3528
3529 /* Bit mask controlling how hard we retry per-node allocations.*/
3530 nodes_clear(node_alloc_noretry);
3531
3532 for (i = 0; i < num; ++i) {
3533 struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3534 &node_alloc_noretry, &next_node);
3535 if (!folio)
3536 break;
3537
3538 list_move(&folio->lru, &folio_list);
3539 cond_resched();
3540 }
3541
3542 prep_and_add_allocated_folios(h, &folio_list);
3543}
3544
d5c3eb3f
GL
3545static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
3546{
3547 unsigned long i;
3548
3549 for (i = 0; i < h->max_huge_pages; ++i) {
3550 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3551 break;
3552 cond_resched();
3553 }
3554
3555 return i;
3556}
3557
3558static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
3559{
c6c21c31
GL
3560 struct padata_mt_job job = {
3561 .fn_arg = h,
3562 .align = 1,
3563 .numa_aware = true
3564 };
d5c3eb3f 3565
c6c21c31
GL
3566 job.thread_fn = hugetlb_pages_alloc_boot_node;
3567 job.start = 0;
3568 job.size = h->max_huge_pages;
d5c3eb3f 3569
c6c21c31
GL
3570 /*
3571 * job.max_threads is twice the num_node_state(N_MEMORY),
3572 *
3573 * Tests below indicate that a multiplier of 2 significantly improves
3574 * performance, and although larger values also provide improvements,
3575 * the gains are marginal.
3576 *
3577 * Therefore, choosing 2 as the multiplier strikes a good balance between
3578 * enhancing parallel processing capabilities and maintaining efficient
3579 * resource management.
3580 *
3581 * +------------+-------+-------+-------+-------+-------+
3582 * | multiplier | 1 | 2 | 3 | 4 | 5 |
3583 * +------------+-------+-------+-------+-------+-------+
3584 * | 256G 2node | 358ms | 215ms | 157ms | 134ms | 126ms |
3585 * | 2T 4node | 979ms | 679ms | 543ms | 489ms | 481ms |
3586 * | 50G 2node | 71ms | 44ms | 37ms | 30ms | 31ms |
3587 * +------------+-------+-------+-------+-------+-------+
3588 */
3589 job.max_threads = num_node_state(N_MEMORY) * 2;
3590 job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2;
3591 padata_do_multithreaded(&job);
d5c3eb3f 3592
c6c21c31 3593 return h->nr_huge_pages;
d5c3eb3f
GL
3594}
3595
d67e32f2
MK
3596/*
3597 * NOTE: this routine is called in different contexts for gigantic and
3598 * non-gigantic pages.
3599 * - For gigantic pages, this is called early in the boot process and
3600 * pages are allocated from memblock allocated or something similar.
3601 * Gigantic pages are actually added to pools later with the routine
3602 * gather_bootmem_prealloc.
3603 * - For non-gigantic pages, this is called later in the boot process after
3604 * all of mm is up and functional. Pages are allocated from buddy and
3605 * then added to hugetlb pools.
3606 */
8faa8b07 3607static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1da177e4 3608{
d5c3eb3f 3609 unsigned long allocated;
b78b27d0 3610 static bool initialized __initdata;
b5389086
ZY
3611
3612 /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3613 if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3614 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3615 return;
3616 }
3617
b78b27d0
GL
3618 /* hugetlb_hstate_alloc_pages will be called many times, initialize huge_boot_pages once */
3619 if (!initialized) {
3620 int i = 0;
3621
3622 for (i = 0; i < MAX_NUMNODES; i++)
3623 INIT_LIST_HEAD(&huge_boot_pages[i]);
3624 initialized = true;
3625 }
3626
b5389086 3627 /* do node specific alloc */
fc37bbb3 3628 if (hugetlb_hstate_alloc_pages_specific_nodes(h))
b5389086
ZY
3629 return;
3630
3631 /* below will do all node balanced alloc */
d5c3eb3f
GL
3632 if (hstate_is_gigantic(h))
3633 allocated = hugetlb_gigantic_pages_alloc_boot(h);
3634 else
3635 allocated = hugetlb_pages_alloc_boot(h);
d67e32f2 3636
d5c3eb3f 3637 hugetlb_hstate_alloc_pages_errcheck(allocated, h);
e5ff2159
AK
3638}
3639
3640static void __init hugetlb_init_hstates(void)
3641{
79dfc695 3642 struct hstate *h, *h2;
e5ff2159
AK
3643
3644 for_each_hstate(h) {
8faa8b07 3645 /* oversize hugepages were init'ed in early boot */
bae7f4ae 3646 if (!hstate_is_gigantic(h))
8faa8b07 3647 hugetlb_hstate_alloc_pages(h);
79dfc695
MK
3648
3649 /*
3650 * Set demote order for each hstate. Note that
3651 * h->demote_order is initially 0.
3652 * - We can not demote gigantic pages if runtime freeing
3653 * is not supported, so skip this.
a01f4390
MK
3654 * - If CMA allocation is possible, we can not demote
3655 * HUGETLB_PAGE_ORDER or smaller size pages.
79dfc695
MK
3656 */
3657 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3658 continue;
a01f4390
MK
3659 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3660 continue;
79dfc695
MK
3661 for_each_hstate(h2) {
3662 if (h2 == h)
3663 continue;
3664 if (h2->order < h->order &&
3665 h2->order > h->demote_order)
3666 h->demote_order = h2->order;
3667 }
e5ff2159
AK
3668 }
3669}
3670
3671static void __init report_hugepages(void)
3672{
3673 struct hstate *h;
3674
3675 for_each_hstate(h) {
4abd32db 3676 char buf[32];
c6247f72
MW
3677
3678 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
6213834c 3679 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
c6247f72 3680 buf, h->free_huge_pages);
6213834c
MS
3681 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3682 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
e5ff2159
AK
3683 }
3684}
3685
1da177e4 3686#ifdef CONFIG_HIGHMEM
6ae11b27
LS
3687static void try_to_free_low(struct hstate *h, unsigned long count,
3688 nodemask_t *nodes_allowed)
1da177e4 3689{
4415cc8d 3690 int i;
1121828a 3691 LIST_HEAD(page_list);
4415cc8d 3692
9487ca60 3693 lockdep_assert_held(&hugetlb_lock);
bae7f4ae 3694 if (hstate_is_gigantic(h))
aa888a74
AK
3695 return;
3696
1121828a
MK
3697 /*
3698 * Collect pages to be freed on a list, and free after dropping lock
3699 */
6ae11b27 3700 for_each_node_mask(i, *nodes_allowed) {
04bbfd84 3701 struct folio *folio, *next;
a5516438 3702 struct list_head *freel = &h->hugepage_freelists[i];
04bbfd84 3703 list_for_each_entry_safe(folio, next, freel, lru) {
a5516438 3704 if (count >= h->nr_huge_pages)
1121828a 3705 goto out;
04bbfd84 3706 if (folio_test_highmem(folio))
1da177e4 3707 continue;
04bbfd84
MWO
3708 remove_hugetlb_folio(h, folio, false);
3709 list_add(&folio->lru, &page_list);
1da177e4
LT
3710 }
3711 }
1121828a
MK
3712
3713out:
db71ef79 3714 spin_unlock_irq(&hugetlb_lock);
10c6ec49 3715 update_and_free_pages_bulk(h, &page_list);
db71ef79 3716 spin_lock_irq(&hugetlb_lock);
1da177e4
LT
3717}
3718#else
6ae11b27
LS
3719static inline void try_to_free_low(struct hstate *h, unsigned long count,
3720 nodemask_t *nodes_allowed)
1da177e4
LT
3721{
3722}
3723#endif
3724
20a0307c
WF
3725/*
3726 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3727 * balanced by operating on them in a round-robin fashion.
3728 * Returns 1 if an adjustment was made.
3729 */
6ae11b27
LS
3730static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3731 int delta)
20a0307c 3732{
b2261026 3733 int nr_nodes, node;
20a0307c 3734
9487ca60 3735 lockdep_assert_held(&hugetlb_lock);
20a0307c 3736 VM_BUG_ON(delta != -1 && delta != 1);
20a0307c 3737
b2261026 3738 if (delta < 0) {
2e73ff23 3739 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
b2261026
JK
3740 if (h->surplus_huge_pages_node[node])
3741 goto found;
e8c5c824 3742 }
b2261026
JK
3743 } else {
3744 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3745 if (h->surplus_huge_pages_node[node] <
3746 h->nr_huge_pages_node[node])
3747 goto found;
e8c5c824 3748 }
b2261026
JK
3749 }
3750 return 0;
20a0307c 3751
b2261026
JK
3752found:
3753 h->surplus_huge_pages += delta;
3754 h->surplus_huge_pages_node[node] += delta;
3755 return 1;
20a0307c
WF
3756}
3757
a5516438 3758#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
fd875dca 3759static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
4eb0716e 3760 nodemask_t *nodes_allowed)
1da177e4 3761{
d67e32f2
MK
3762 unsigned long min_count;
3763 unsigned long allocated;
3764 struct folio *folio;
10c6ec49 3765 LIST_HEAD(page_list);
f60858f9
MK
3766 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3767
3768 /*
3769 * Bit mask controlling how hard we retry per-node allocations.
3770 * If we can not allocate the bit mask, do not attempt to allocate
3771 * the requested huge pages.
3772 */
3773 if (node_alloc_noretry)
3774 nodes_clear(*node_alloc_noretry);
3775 else
3776 return -ENOMEM;
1da177e4 3777
29383967
MK
3778 /*
3779 * resize_lock mutex prevents concurrent adjustments to number of
3780 * pages in hstate via the proc/sysfs interfaces.
3781 */
3782 mutex_lock(&h->resize_lock);
b65d4adb 3783 flush_free_hpage_work(h);
db71ef79 3784 spin_lock_irq(&hugetlb_lock);
4eb0716e 3785
fd875dca
MK
3786 /*
3787 * Check for a node specific request.
3788 * Changing node specific huge page count may require a corresponding
3789 * change to the global count. In any case, the passed node mask
3790 * (nodes_allowed) will restrict alloc/free to the specified node.
3791 */
3792 if (nid != NUMA_NO_NODE) {
3793 unsigned long old_count = count;
3794
b72b3c9c
XH
3795 count += persistent_huge_pages(h) -
3796 (h->nr_huge_pages_node[nid] -
3797 h->surplus_huge_pages_node[nid]);
fd875dca
MK
3798 /*
3799 * User may have specified a large count value which caused the
3800 * above calculation to overflow. In this case, they wanted
3801 * to allocate as many huge pages as possible. Set count to
3802 * largest possible value to align with their intention.
3803 */
3804 if (count < old_count)
3805 count = ULONG_MAX;
3806 }
3807
4eb0716e
AG
3808 /*
3809 * Gigantic pages runtime allocation depend on the capability for large
3810 * page range allocation.
3811 * If the system does not provide this feature, return an error when
3812 * the user tries to allocate gigantic pages but let the user free the
3813 * boottime allocated gigantic pages.
3814 */
3815 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3816 if (count > persistent_huge_pages(h)) {
db71ef79 3817 spin_unlock_irq(&hugetlb_lock);
29383967 3818 mutex_unlock(&h->resize_lock);
f60858f9 3819 NODEMASK_FREE(node_alloc_noretry);
4eb0716e
AG
3820 return -EINVAL;
3821 }
3822 /* Fall through to decrease pool */
3823 }
aa888a74 3824
7893d1d5
AL
3825 /*
3826 * Increase the pool size
3827 * First take pages out of surplus state. Then make up the
3828 * remaining difference by allocating fresh huge pages.
d1c3fb1f 3829 *
3a740e8b 3830 * We might race with alloc_surplus_hugetlb_folio() here and be unable
d1c3fb1f
NA
3831 * to convert a surplus huge page to a normal huge page. That is
3832 * not critical, though, it just means the overall size of the
3833 * pool might be one hugepage larger than it needs to be, but
3834 * within all the constraints specified by the sysctls.
7893d1d5 3835 */
a5516438 3836 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
6ae11b27 3837 if (!adjust_pool_surplus(h, nodes_allowed, -1))
7893d1d5
AL
3838 break;
3839 }
3840
d67e32f2
MK
3841 allocated = 0;
3842 while (count > (persistent_huge_pages(h) + allocated)) {
7893d1d5
AL
3843 /*
3844 * If this allocation races such that we no longer need the
454a00c4 3845 * page, free_huge_folio will handle it by freeing the page
7893d1d5
AL
3846 * and reducing the surplus.
3847 */
db71ef79 3848 spin_unlock_irq(&hugetlb_lock);
649920c6
JH
3849
3850 /* yield cpu to avoid soft lockup */
3851 cond_resched();
3852
d67e32f2 3853 folio = alloc_pool_huge_folio(h, nodes_allowed,
2e73ff23
GL
3854 node_alloc_noretry,
3855 &h->next_nid_to_alloc);
d67e32f2
MK
3856 if (!folio) {
3857 prep_and_add_allocated_folios(h, &page_list);
3858 spin_lock_irq(&hugetlb_lock);
7893d1d5 3859 goto out;
d67e32f2
MK
3860 }
3861
3862 list_add(&folio->lru, &page_list);
3863 allocated++;
7893d1d5 3864
536240f2 3865 /* Bail for signals. Probably ctrl-c from user */
d67e32f2
MK
3866 if (signal_pending(current)) {
3867 prep_and_add_allocated_folios(h, &page_list);
3868 spin_lock_irq(&hugetlb_lock);
536240f2 3869 goto out;
d67e32f2
MK
3870 }
3871
3872 spin_lock_irq(&hugetlb_lock);
3873 }
3874
3875 /* Add allocated pages to the pool */
3876 if (!list_empty(&page_list)) {
3877 spin_unlock_irq(&hugetlb_lock);
3878 prep_and_add_allocated_folios(h, &page_list);
3879 spin_lock_irq(&hugetlb_lock);
7893d1d5 3880 }
7893d1d5
AL
3881
3882 /*
3883 * Decrease the pool size
3884 * First return free pages to the buddy allocator (being careful
3885 * to keep enough around to satisfy reservations). Then place
3886 * pages into surplus state as needed so the pool will shrink
3887 * to the desired size as pages become free.
d1c3fb1f
NA
3888 *
3889 * By placing pages into the surplus state independent of the
3890 * overcommit value, we are allowing the surplus pool size to
3891 * exceed overcommit. There are few sane options here. Since
3a740e8b 3892 * alloc_surplus_hugetlb_folio() is checking the global counter,
d1c3fb1f
NA
3893 * though, we'll note that we're not allowed to exceed surplus
3894 * and won't grow the pool anywhere else. Not until one of the
3895 * sysctls are changed, or the surplus pages go out of use.
7893d1d5 3896 */
a5516438 3897 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
6b0c880d 3898 min_count = max(count, min_count);
6ae11b27 3899 try_to_free_low(h, min_count, nodes_allowed);
10c6ec49
MK
3900
3901 /*
3902 * Collect pages to be removed on list without dropping lock
3903 */
a5516438 3904 while (min_count < persistent_huge_pages(h)) {
d5b43e96
MWO
3905 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3906 if (!folio)
1da177e4 3907 break;
10c6ec49 3908
d5b43e96 3909 list_add(&folio->lru, &page_list);
1da177e4 3910 }
10c6ec49 3911 /* free the pages after dropping lock */
db71ef79 3912 spin_unlock_irq(&hugetlb_lock);
10c6ec49 3913 update_and_free_pages_bulk(h, &page_list);
b65d4adb 3914 flush_free_hpage_work(h);
db71ef79 3915 spin_lock_irq(&hugetlb_lock);
10c6ec49 3916
a5516438 3917 while (count < persistent_huge_pages(h)) {
6ae11b27 3918 if (!adjust_pool_surplus(h, nodes_allowed, 1))
7893d1d5
AL
3919 break;
3920 }
3921out:
4eb0716e 3922 h->max_huge_pages = persistent_huge_pages(h);
db71ef79 3923 spin_unlock_irq(&hugetlb_lock);
29383967 3924 mutex_unlock(&h->resize_lock);
4eb0716e 3925
f60858f9
MK
3926 NODEMASK_FREE(node_alloc_noretry);
3927
4eb0716e 3928 return 0;
1da177e4
LT
3929}
3930
bdd7be07 3931static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
8531fc6f 3932{
bdd7be07 3933 int i, nid = folio_nid(folio);
8531fc6f 3934 struct hstate *target_hstate;
31731452 3935 struct page *subpage;
bdd7be07 3936 struct folio *inner_folio;
8531fc6f
MK
3937 int rc = 0;
3938
3939 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3940
cfd5082b 3941 remove_hugetlb_folio_for_demote(h, folio, false);
8531fc6f
MK
3942 spin_unlock_irq(&hugetlb_lock);
3943
d8f5f7e4
MK
3944 /*
3945 * If vmemmap already existed for folio, the remove routine above would
3946 * have cleared the hugetlb folio flag. Hence the folio is technically
c5ad3233 3947 * no longer a hugetlb folio. hugetlb_vmemmap_restore_folio can only be
d8f5f7e4
MK
3948 * passed hugetlb folios and will BUG otherwise.
3949 */
3950 if (folio_test_hugetlb(folio)) {
c5ad3233 3951 rc = hugetlb_vmemmap_restore_folio(h, folio);
d8f5f7e4
MK
3952 if (rc) {
3953 /* Allocation of vmemmmap failed, we can not demote folio */
3954 spin_lock_irq(&hugetlb_lock);
3955 folio_ref_unfreeze(folio, 1);
3956 add_hugetlb_folio(h, folio, false);
3957 return rc;
3958 }
8531fc6f
MK
3959 }
3960
3961 /*
911565b8 3962 * Use destroy_compound_hugetlb_folio_for_demote for all huge page
bdd7be07 3963 * sizes as it will not ref count folios.
8531fc6f 3964 */
911565b8 3965 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
8531fc6f
MK
3966
3967 /*
3968 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3969 * Without the mutex, pages added to target hstate could be marked
3970 * as surplus.
3971 *
3972 * Note that we already hold h->resize_lock. To prevent deadlock,
3973 * use the convention of always taking larger size hstate mutex first.
3974 */
3975 mutex_lock(&target_hstate->resize_lock);
3976 for (i = 0; i < pages_per_huge_page(h);
3977 i += pages_per_huge_page(target_hstate)) {
bdd7be07
SK
3978 subpage = folio_page(folio, i);
3979 inner_folio = page_folio(subpage);
8531fc6f 3980 if (hstate_is_gigantic(target_hstate))
bdd7be07 3981 prep_compound_gigantic_folio_for_demote(inner_folio,
8531fc6f
MK
3982 target_hstate->order);
3983 else
31731452 3984 prep_compound_page(subpage, target_hstate->order);
bdd7be07
SK
3985 folio_change_private(inner_folio, NULL);
3986 prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
454a00c4 3987 free_huge_folio(inner_folio);
8531fc6f
MK
3988 }
3989 mutex_unlock(&target_hstate->resize_lock);
3990
3991 spin_lock_irq(&hugetlb_lock);
3992
3993 /*
3994 * Not absolutely necessary, but for consistency update max_huge_pages
3995 * based on pool changes for the demoted page.
3996 */
3997 h->max_huge_pages--;
a43a83c7
ML
3998 target_hstate->max_huge_pages +=
3999 pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
8531fc6f
MK
4000
4001 return rc;
4002}
4003
79dfc695
MK
4004static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
4005 __must_hold(&hugetlb_lock)
4006{
8531fc6f 4007 int nr_nodes, node;
bdd7be07 4008 struct folio *folio;
79dfc695
MK
4009
4010 lockdep_assert_held(&hugetlb_lock);
4011
4012 /* We should never get here if no demote order */
4013 if (!h->demote_order) {
4014 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
4015 return -EINVAL; /* internal error */
4016 }
4017
8531fc6f 4018 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
bdd7be07
SK
4019 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
4020 if (folio_test_hwpoison(folio))
5a317412 4021 continue;
bdd7be07 4022 return demote_free_hugetlb_folio(h, folio);
8531fc6f
MK
4023 }
4024 }
4025
5a317412
MK
4026 /*
4027 * Only way to get here is if all pages on free lists are poisoned.
4028 * Return -EBUSY so that caller will not retry.
4029 */
4030 return -EBUSY;
79dfc695
MK
4031}
4032
a3437870
NA
4033#define HSTATE_ATTR_RO(_name) \
4034 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
4035
79dfc695
MK
4036#define HSTATE_ATTR_WO(_name) \
4037 static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
4038
a3437870 4039#define HSTATE_ATTR(_name) \
98bc26ac 4040 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
a3437870
NA
4041
4042static struct kobject *hugepages_kobj;
4043static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
4044
9a305230
LS
4045static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
4046
4047static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
a3437870
NA
4048{
4049 int i;
9a305230 4050
a3437870 4051 for (i = 0; i < HUGE_MAX_HSTATE; i++)
9a305230
LS
4052 if (hstate_kobjs[i] == kobj) {
4053 if (nidp)
4054 *nidp = NUMA_NO_NODE;
a3437870 4055 return &hstates[i];
9a305230
LS
4056 }
4057
4058 return kobj_to_node_hstate(kobj, nidp);
a3437870
NA
4059}
4060
06808b08 4061static ssize_t nr_hugepages_show_common(struct kobject *kobj,
a3437870
NA
4062 struct kobj_attribute *attr, char *buf)
4063{
9a305230
LS
4064 struct hstate *h;
4065 unsigned long nr_huge_pages;
4066 int nid;
4067
4068 h = kobj_to_hstate(kobj, &nid);
4069 if (nid == NUMA_NO_NODE)
4070 nr_huge_pages = h->nr_huge_pages;
4071 else
4072 nr_huge_pages = h->nr_huge_pages_node[nid];
4073
ae7a927d 4074 return sysfs_emit(buf, "%lu\n", nr_huge_pages);
a3437870 4075}
adbe8726 4076
238d3c13
DR
4077static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4078 struct hstate *h, int nid,
4079 unsigned long count, size_t len)
a3437870
NA
4080{
4081 int err;
2d0adf7e 4082 nodemask_t nodes_allowed, *n_mask;
a3437870 4083
2d0adf7e
OS
4084 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
4085 return -EINVAL;
adbe8726 4086
9a305230
LS
4087 if (nid == NUMA_NO_NODE) {
4088 /*
4089 * global hstate attribute
4090 */
4091 if (!(obey_mempolicy &&
2d0adf7e
OS
4092 init_nodemask_of_mempolicy(&nodes_allowed)))
4093 n_mask = &node_states[N_MEMORY];
4094 else
4095 n_mask = &nodes_allowed;
4096 } else {
9a305230 4097 /*
fd875dca
MK
4098 * Node specific request. count adjustment happens in
4099 * set_max_huge_pages() after acquiring hugetlb_lock.
9a305230 4100 */
2d0adf7e
OS
4101 init_nodemask_of_node(&nodes_allowed, nid);
4102 n_mask = &nodes_allowed;
fd875dca 4103 }
9a305230 4104
2d0adf7e 4105 err = set_max_huge_pages(h, count, nid, n_mask);
06808b08 4106
4eb0716e 4107 return err ? err : len;
06808b08
LS
4108}
4109
238d3c13
DR
4110static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
4111 struct kobject *kobj, const char *buf,
4112 size_t len)
4113{
4114 struct hstate *h;
4115 unsigned long count;
4116 int nid;
4117 int err;
4118
4119 err = kstrtoul(buf, 10, &count);
4120 if (err)
4121 return err;
4122
4123 h = kobj_to_hstate(kobj, &nid);
4124 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
4125}
4126
06808b08
LS
4127static ssize_t nr_hugepages_show(struct kobject *kobj,
4128 struct kobj_attribute *attr, char *buf)
4129{
4130 return nr_hugepages_show_common(kobj, attr, buf);
4131}
4132
4133static ssize_t nr_hugepages_store(struct kobject *kobj,
4134 struct kobj_attribute *attr, const char *buf, size_t len)
4135{
238d3c13 4136 return nr_hugepages_store_common(false, kobj, buf, len);
a3437870
NA
4137}
4138HSTATE_ATTR(nr_hugepages);
4139
06808b08
LS
4140#ifdef CONFIG_NUMA
4141
4142/*
4143 * hstate attribute for optionally mempolicy-based constraint on persistent
4144 * huge page alloc/free.
4145 */
4146static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
ae7a927d
JP
4147 struct kobj_attribute *attr,
4148 char *buf)
06808b08
LS
4149{
4150 return nr_hugepages_show_common(kobj, attr, buf);
4151}
4152
4153static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
4154 struct kobj_attribute *attr, const char *buf, size_t len)
4155{
238d3c13 4156 return nr_hugepages_store_common(true, kobj, buf, len);
06808b08
LS
4157}
4158HSTATE_ATTR(nr_hugepages_mempolicy);
4159#endif
4160
4161
a3437870
NA
4162static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
4163 struct kobj_attribute *attr, char *buf)
4164{
9a305230 4165 struct hstate *h = kobj_to_hstate(kobj, NULL);
ae7a927d 4166 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
a3437870 4167}
adbe8726 4168
a3437870
NA
4169static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
4170 struct kobj_attribute *attr, const char *buf, size_t count)
4171{
4172 int err;
4173 unsigned long input;
9a305230 4174 struct hstate *h = kobj_to_hstate(kobj, NULL);
a3437870 4175
bae7f4ae 4176 if (hstate_is_gigantic(h))
adbe8726
EM
4177 return -EINVAL;
4178
3dbb95f7 4179 err = kstrtoul(buf, 10, &input);
a3437870 4180 if (err)
73ae31e5 4181 return err;
a3437870 4182
db71ef79 4183 spin_lock_irq(&hugetlb_lock);
a3437870 4184 h->nr_overcommit_huge_pages = input;
db71ef79 4185 spin_unlock_irq(&hugetlb_lock);
a3437870
NA
4186
4187 return count;
4188}
4189HSTATE_ATTR(nr_overcommit_hugepages);
4190
4191static ssize_t free_hugepages_show(struct kobject *kobj,
4192 struct kobj_attribute *attr, char *buf)
4193{
9a305230
LS
4194 struct hstate *h;
4195 unsigned long free_huge_pages;
4196 int nid;
4197
4198 h = kobj_to_hstate(kobj, &nid);
4199 if (nid == NUMA_NO_NODE)
4200 free_huge_pages = h->free_huge_pages;
4201 else
4202 free_huge_pages = h->free_huge_pages_node[nid];
4203
ae7a927d 4204 return sysfs_emit(buf, "%lu\n", free_huge_pages);
a3437870
NA
4205}
4206HSTATE_ATTR_RO(free_hugepages);
4207
4208static ssize_t resv_hugepages_show(struct kobject *kobj,
4209 struct kobj_attribute *attr, char *buf)
4210{
9a305230 4211 struct hstate *h = kobj_to_hstate(kobj, NULL);
ae7a927d 4212 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
a3437870
NA
4213}
4214HSTATE_ATTR_RO(resv_hugepages);
4215
4216static ssize_t surplus_hugepages_show(struct kobject *kobj,
4217 struct kobj_attribute *attr, char *buf)
4218{
9a305230
LS
4219 struct hstate *h;
4220 unsigned long surplus_huge_pages;
4221 int nid;
4222
4223 h = kobj_to_hstate(kobj, &nid);
4224 if (nid == NUMA_NO_NODE)
4225 surplus_huge_pages = h->surplus_huge_pages;
4226 else
4227 surplus_huge_pages = h->surplus_huge_pages_node[nid];
4228
ae7a927d 4229 return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
a3437870
NA
4230}
4231HSTATE_ATTR_RO(surplus_hugepages);
4232
79dfc695
MK
4233static ssize_t demote_store(struct kobject *kobj,
4234 struct kobj_attribute *attr, const char *buf, size_t len)
4235{
4236 unsigned long nr_demote;
4237 unsigned long nr_available;
4238 nodemask_t nodes_allowed, *n_mask;
4239 struct hstate *h;
8eeda55f 4240 int err;
79dfc695
MK
4241 int nid;
4242
4243 err = kstrtoul(buf, 10, &nr_demote);
4244 if (err)
4245 return err;
4246 h = kobj_to_hstate(kobj, &nid);
4247
4248 if (nid != NUMA_NO_NODE) {
4249 init_nodemask_of_node(&nodes_allowed, nid);
4250 n_mask = &nodes_allowed;
4251 } else {
4252 n_mask = &node_states[N_MEMORY];
4253 }
4254
4255 /* Synchronize with other sysfs operations modifying huge pages */
4256 mutex_lock(&h->resize_lock);
4257 spin_lock_irq(&hugetlb_lock);
4258
4259 while (nr_demote) {
4260 /*
4261 * Check for available pages to demote each time thorough the
4262 * loop as demote_pool_huge_page will drop hugetlb_lock.
79dfc695
MK
4263 */
4264 if (nid != NUMA_NO_NODE)
4265 nr_available = h->free_huge_pages_node[nid];
4266 else
4267 nr_available = h->free_huge_pages;
4268 nr_available -= h->resv_huge_pages;
4269 if (!nr_available)
4270 break;
4271
4272 err = demote_pool_huge_page(h, n_mask);
4273 if (err)
4274 break;
4275
4276 nr_demote--;
4277 }
4278
4279 spin_unlock_irq(&hugetlb_lock);
4280 mutex_unlock(&h->resize_lock);
4281
4282 if (err)
4283 return err;
4284 return len;
4285}
4286HSTATE_ATTR_WO(demote);
4287
4288static ssize_t demote_size_show(struct kobject *kobj,
4289 struct kobj_attribute *attr, char *buf)
4290{
12658abf 4291 struct hstate *h = kobj_to_hstate(kobj, NULL);
79dfc695
MK
4292 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
4293
4294 return sysfs_emit(buf, "%lukB\n", demote_size);
4295}
4296
4297static ssize_t demote_size_store(struct kobject *kobj,
4298 struct kobj_attribute *attr,
4299 const char *buf, size_t count)
4300{
4301 struct hstate *h, *demote_hstate;
4302 unsigned long demote_size;
4303 unsigned int demote_order;
79dfc695
MK
4304
4305 demote_size = (unsigned long)memparse(buf, NULL);
4306
4307 demote_hstate = size_to_hstate(demote_size);
4308 if (!demote_hstate)
4309 return -EINVAL;
4310 demote_order = demote_hstate->order;
a01f4390
MK
4311 if (demote_order < HUGETLB_PAGE_ORDER)
4312 return -EINVAL;
79dfc695
MK
4313
4314 /* demote order must be smaller than hstate order */
12658abf 4315 h = kobj_to_hstate(kobj, NULL);
79dfc695
MK
4316 if (demote_order >= h->order)
4317 return -EINVAL;
4318
4319 /* resize_lock synchronizes access to demote size and writes */
4320 mutex_lock(&h->resize_lock);
4321 h->demote_order = demote_order;
4322 mutex_unlock(&h->resize_lock);
4323
4324 return count;
4325}
4326HSTATE_ATTR(demote_size);
4327
a3437870
NA
4328static struct attribute *hstate_attrs[] = {
4329 &nr_hugepages_attr.attr,
4330 &nr_overcommit_hugepages_attr.attr,
4331 &free_hugepages_attr.attr,
4332 &resv_hugepages_attr.attr,
4333 &surplus_hugepages_attr.attr,
06808b08
LS
4334#ifdef CONFIG_NUMA
4335 &nr_hugepages_mempolicy_attr.attr,
4336#endif
a3437870
NA
4337 NULL,
4338};
4339
67e5ed96 4340static const struct attribute_group hstate_attr_group = {
a3437870
NA
4341 .attrs = hstate_attrs,
4342};
4343
79dfc695
MK
4344static struct attribute *hstate_demote_attrs[] = {
4345 &demote_size_attr.attr,
4346 &demote_attr.attr,
4347 NULL,
4348};
4349
4350static const struct attribute_group hstate_demote_attr_group = {
4351 .attrs = hstate_demote_attrs,
4352};
4353
094e9539
JM
4354static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
4355 struct kobject **hstate_kobjs,
67e5ed96 4356 const struct attribute_group *hstate_attr_group)
a3437870
NA
4357{
4358 int retval;
972dc4de 4359 int hi = hstate_index(h);
a3437870 4360
9a305230
LS
4361 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
4362 if (!hstate_kobjs[hi])
a3437870
NA
4363 return -ENOMEM;
4364
9a305230 4365 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
cc2205a6 4366 if (retval) {
9a305230 4367 kobject_put(hstate_kobjs[hi]);
cc2205a6 4368 hstate_kobjs[hi] = NULL;
3a6bdda0 4369 return retval;
cc2205a6 4370 }
a3437870 4371
79dfc695 4372 if (h->demote_order) {
01088a60
ML
4373 retval = sysfs_create_group(hstate_kobjs[hi],
4374 &hstate_demote_attr_group);
4375 if (retval) {
79dfc695 4376 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
01088a60
ML
4377 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
4378 kobject_put(hstate_kobjs[hi]);
4379 hstate_kobjs[hi] = NULL;
4380 return retval;
4381 }
79dfc695
MK
4382 }
4383
01088a60 4384 return 0;
a3437870
NA
4385}
4386
9a305230 4387#ifdef CONFIG_NUMA
a4a00b45 4388static bool hugetlb_sysfs_initialized __ro_after_init;
9a305230
LS
4389
4390/*
4391 * node_hstate/s - associate per node hstate attributes, via their kobjects,
10fbcf4c
KS
4392 * with node devices in node_devices[] using a parallel array. The array
4393 * index of a node device or _hstate == node id.
4394 * This is here to avoid any static dependency of the node device driver, in
9a305230
LS
4395 * the base kernel, on the hugetlb module.
4396 */
4397struct node_hstate {
4398 struct kobject *hugepages_kobj;
4399 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
4400};
b4e289a6 4401static struct node_hstate node_hstates[MAX_NUMNODES];
9a305230
LS
4402
4403/*
10fbcf4c 4404 * A subset of global hstate attributes for node devices
9a305230
LS
4405 */
4406static struct attribute *per_node_hstate_attrs[] = {
4407 &nr_hugepages_attr.attr,
4408 &free_hugepages_attr.attr,
4409 &surplus_hugepages_attr.attr,
4410 NULL,
4411};
4412
67e5ed96 4413static const struct attribute_group per_node_hstate_attr_group = {
9a305230
LS
4414 .attrs = per_node_hstate_attrs,
4415};
4416
4417/*
10fbcf4c 4418 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
9a305230
LS
4419 * Returns node id via non-NULL nidp.
4420 */
4421static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4422{
4423 int nid;
4424
4425 for (nid = 0; nid < nr_node_ids; nid++) {
4426 struct node_hstate *nhs = &node_hstates[nid];
4427 int i;
4428 for (i = 0; i < HUGE_MAX_HSTATE; i++)
4429 if (nhs->hstate_kobjs[i] == kobj) {
4430 if (nidp)
4431 *nidp = nid;
4432 return &hstates[i];
4433 }
4434 }
4435
4436 BUG();
4437 return NULL;
4438}
4439
4440/*
10fbcf4c 4441 * Unregister hstate attributes from a single node device.
9a305230
LS
4442 * No-op if no hstate attributes attached.
4443 */
a4a00b45 4444void hugetlb_unregister_node(struct node *node)
9a305230
LS
4445{
4446 struct hstate *h;
10fbcf4c 4447 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
4448
4449 if (!nhs->hugepages_kobj)
9b5e5d0f 4450 return; /* no hstate attributes */
9a305230 4451
972dc4de
AK
4452 for_each_hstate(h) {
4453 int idx = hstate_index(h);
01088a60
ML
4454 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
4455
4456 if (!hstate_kobj)
4457 continue;
4458 if (h->demote_order)
4459 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
4460 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
4461 kobject_put(hstate_kobj);
4462 nhs->hstate_kobjs[idx] = NULL;
972dc4de 4463 }
9a305230
LS
4464
4465 kobject_put(nhs->hugepages_kobj);
4466 nhs->hugepages_kobj = NULL;
4467}
4468
9a305230
LS
4469
4470/*
10fbcf4c 4471 * Register hstate attributes for a single node device.
9a305230
LS
4472 * No-op if attributes already registered.
4473 */
a4a00b45 4474void hugetlb_register_node(struct node *node)
9a305230
LS
4475{
4476 struct hstate *h;
10fbcf4c 4477 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
4478 int err;
4479
a4a00b45
MS
4480 if (!hugetlb_sysfs_initialized)
4481 return;
4482
9a305230
LS
4483 if (nhs->hugepages_kobj)
4484 return; /* already allocated */
4485
4486 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
10fbcf4c 4487 &node->dev.kobj);
9a305230
LS
4488 if (!nhs->hugepages_kobj)
4489 return;
4490
4491 for_each_hstate(h) {
4492 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
4493 nhs->hstate_kobjs,
4494 &per_node_hstate_attr_group);
4495 if (err) {
282f4214 4496 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
ffb22af5 4497 h->name, node->dev.id);
9a305230
LS
4498 hugetlb_unregister_node(node);
4499 break;
4500 }
4501 }
4502}
4503
4504/*
9b5e5d0f 4505 * hugetlb init time: register hstate attributes for all registered node
10fbcf4c
KS
4506 * devices of nodes that have memory. All on-line nodes should have
4507 * registered their associated device by this time.
9a305230 4508 */
7d9ca000 4509static void __init hugetlb_register_all_nodes(void)
9a305230
LS
4510{
4511 int nid;
4512
a4a00b45 4513 for_each_online_node(nid)
b958d4d0 4514 hugetlb_register_node(node_devices[nid]);
9a305230
LS
4515}
4516#else /* !CONFIG_NUMA */
4517
4518static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4519{
4520 BUG();
4521 if (nidp)
4522 *nidp = -1;
4523 return NULL;
4524}
4525
9a305230
LS
4526static void hugetlb_register_all_nodes(void) { }
4527
4528#endif
4529
263b8998
ML
4530#ifdef CONFIG_CMA
4531static void __init hugetlb_cma_check(void);
4532#else
4533static inline __init void hugetlb_cma_check(void)
4534{
4535}
4536#endif
4537
a4a00b45
MS
4538static void __init hugetlb_sysfs_init(void)
4539{
4540 struct hstate *h;
4541 int err;
4542
4543 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4544 if (!hugepages_kobj)
4545 return;
4546
4547 for_each_hstate(h) {
4548 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4549 hstate_kobjs, &hstate_attr_group);
4550 if (err)
4551 pr_err("HugeTLB: Unable to add hstate %s", h->name);
4552 }
4553
4554#ifdef CONFIG_NUMA
4555 hugetlb_sysfs_initialized = true;
4556#endif
4557 hugetlb_register_all_nodes();
4558}
4559
962de548
KW
4560#ifdef CONFIG_SYSCTL
4561static void hugetlb_sysctl_init(void);
4562#else
4563static inline void hugetlb_sysctl_init(void) { }
4564#endif
4565
a3437870
NA
4566static int __init hugetlb_init(void)
4567{
8382d914
DB
4568 int i;
4569
d6995da3
MK
4570 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4571 __NR_HPAGEFLAGS);
4572
c2833a5b
MK
4573 if (!hugepages_supported()) {
4574 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4575 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
0ef89d25 4576 return 0;
c2833a5b 4577 }
a3437870 4578
282f4214
MK
4579 /*
4580 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4581 * architectures depend on setup being done here.
4582 */
4583 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4584 if (!parsed_default_hugepagesz) {
4585 /*
4586 * If we did not parse a default huge page size, set
4587 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4588 * number of huge pages for this default size was implicitly
4589 * specified, set that here as well.
4590 * Note that the implicit setting will overwrite an explicit
4591 * setting. A warning will be printed in this case.
4592 */
4593 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4594 if (default_hstate_max_huge_pages) {
4595 if (default_hstate.max_huge_pages) {
4596 char buf[32];
4597
4598 string_get_size(huge_page_size(&default_hstate),
4599 1, STRING_UNITS_2, buf, 32);
4600 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4601 default_hstate.max_huge_pages, buf);
4602 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4603 default_hstate_max_huge_pages);
4604 }
4605 default_hstate.max_huge_pages =
4606 default_hstate_max_huge_pages;
b5389086 4607
0a7a0f6f 4608 for_each_online_node(i)
b5389086
ZY
4609 default_hstate.max_huge_pages_node[i] =
4610 default_hugepages_in_node[i];
d715cf80 4611 }
f8b74815 4612 }
a3437870 4613
cf11e85f 4614 hugetlb_cma_check();
a3437870 4615 hugetlb_init_hstates();
aa888a74 4616 gather_bootmem_prealloc();
a3437870
NA
4617 report_hugepages();
4618
4619 hugetlb_sysfs_init();
7179e7bf 4620 hugetlb_cgroup_file_init();
962de548 4621 hugetlb_sysctl_init();
9a305230 4622
8382d914
DB
4623#ifdef CONFIG_SMP
4624 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4625#else
4626 num_fault_mutexes = 1;
4627#endif
c672c7f2 4628 hugetlb_fault_mutex_table =
6da2ec56
KC
4629 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4630 GFP_KERNEL);
c672c7f2 4631 BUG_ON(!hugetlb_fault_mutex_table);
8382d914
DB
4632
4633 for (i = 0; i < num_fault_mutexes; i++)
c672c7f2 4634 mutex_init(&hugetlb_fault_mutex_table[i]);
a3437870
NA
4635 return 0;
4636}
3e89e1c5 4637subsys_initcall(hugetlb_init);
a3437870 4638
ae94da89
MK
4639/* Overwritten by architectures with more huge page sizes */
4640bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
9fee021d 4641{
ae94da89 4642 return size == HPAGE_SIZE;
9fee021d
VT
4643}
4644
d00181b9 4645void __init hugetlb_add_hstate(unsigned int order)
a3437870
NA
4646{
4647 struct hstate *h;
8faa8b07
AK
4648 unsigned long i;
4649
a3437870 4650 if (size_to_hstate(PAGE_SIZE << order)) {
a3437870
NA
4651 return;
4652 }
47d38344 4653 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
59838b25 4654 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
47d38344 4655 h = &hstates[hugetlb_max_hstate++];
29383967 4656 mutex_init(&h->resize_lock);
a3437870 4657 h->order = order;
aca78307 4658 h->mask = ~(huge_page_size(h) - 1);
8faa8b07
AK
4659 for (i = 0; i < MAX_NUMNODES; ++i)
4660 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
0edaecfa 4661 INIT_LIST_HEAD(&h->hugepage_activelist);
54f18d35
AM
4662 h->next_nid_to_alloc = first_memory_node;
4663 h->next_nid_to_free = first_memory_node;
a3437870 4664 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
c2c3a60a 4665 huge_page_size(h)/SZ_1K);
8faa8b07 4666
a3437870
NA
4667 parsed_hstate = h;
4668}
4669
b5389086
ZY
4670bool __init __weak hugetlb_node_alloc_supported(void)
4671{
4672 return true;
4673}
f87442f4
PL
4674
4675static void __init hugepages_clear_pages_in_node(void)
4676{
4677 if (!hugetlb_max_hstate) {
4678 default_hstate_max_huge_pages = 0;
4679 memset(default_hugepages_in_node, 0,
10395680 4680 sizeof(default_hugepages_in_node));
f87442f4
PL
4681 } else {
4682 parsed_hstate->max_huge_pages = 0;
4683 memset(parsed_hstate->max_huge_pages_node, 0,
10395680 4684 sizeof(parsed_hstate->max_huge_pages_node));
f87442f4
PL
4685 }
4686}
4687
282f4214
MK
4688/*
4689 * hugepages command line processing
4690 * hugepages normally follows a valid hugepagsz or default_hugepagsz
4691 * specification. If not, ignore the hugepages value. hugepages can also
4692 * be the first huge page command line option in which case it implicitly
4693 * specifies the number of huge pages for the default size.
4694 */
4695static int __init hugepages_setup(char *s)
a3437870
NA
4696{
4697 unsigned long *mhp;
8faa8b07 4698 static unsigned long *last_mhp;
b5389086
ZY
4699 int node = NUMA_NO_NODE;
4700 int count;
4701 unsigned long tmp;
4702 char *p = s;
a3437870 4703
9fee021d 4704 if (!parsed_valid_hugepagesz) {
282f4214 4705 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
9fee021d 4706 parsed_valid_hugepagesz = true;
f81f6e4b 4707 return 1;
9fee021d 4708 }
282f4214 4709
a3437870 4710 /*
282f4214
MK
4711 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4712 * yet, so this hugepages= parameter goes to the "default hstate".
4713 * Otherwise, it goes with the previously parsed hugepagesz or
4714 * default_hugepagesz.
a3437870 4715 */
9fee021d 4716 else if (!hugetlb_max_hstate)
a3437870
NA
4717 mhp = &default_hstate_max_huge_pages;
4718 else
4719 mhp = &parsed_hstate->max_huge_pages;
4720
8faa8b07 4721 if (mhp == last_mhp) {
282f4214 4722 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
f81f6e4b 4723 return 1;
8faa8b07
AK
4724 }
4725
b5389086
ZY
4726 while (*p) {
4727 count = 0;
4728 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4729 goto invalid;
4730 /* Parameter is node format */
4731 if (p[count] == ':') {
4732 if (!hugetlb_node_alloc_supported()) {
4733 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
f81f6e4b 4734 return 1;
b5389086 4735 }
0a7a0f6f 4736 if (tmp >= MAX_NUMNODES || !node_online(tmp))
e79ce983 4737 goto invalid;
0a7a0f6f 4738 node = array_index_nospec(tmp, MAX_NUMNODES);
b5389086 4739 p += count + 1;
b5389086
ZY
4740 /* Parse hugepages */
4741 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4742 goto invalid;
4743 if (!hugetlb_max_hstate)
4744 default_hugepages_in_node[node] = tmp;
4745 else
4746 parsed_hstate->max_huge_pages_node[node] = tmp;
4747 *mhp += tmp;
4748 /* Go to parse next node*/
4749 if (p[count] == ',')
4750 p += count + 1;
4751 else
4752 break;
4753 } else {
4754 if (p != s)
4755 goto invalid;
4756 *mhp = tmp;
4757 break;
4758 }
4759 }
a3437870 4760
8faa8b07
AK
4761 /*
4762 * Global state is always initialized later in hugetlb_init.
04adbc3f 4763 * But we need to allocate gigantic hstates here early to still
8faa8b07
AK
4764 * use the bootmem allocator.
4765 */
04adbc3f 4766 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
8faa8b07
AK
4767 hugetlb_hstate_alloc_pages(parsed_hstate);
4768
4769 last_mhp = mhp;
4770
a3437870 4771 return 1;
b5389086
ZY
4772
4773invalid:
4774 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
f87442f4 4775 hugepages_clear_pages_in_node();
f81f6e4b 4776 return 1;
a3437870 4777}
282f4214 4778__setup("hugepages=", hugepages_setup);
e11bfbfc 4779
282f4214
MK
4780/*
4781 * hugepagesz command line processing
4782 * A specific huge page size can only be specified once with hugepagesz.
4783 * hugepagesz is followed by hugepages on the command line. The global
4784 * variable 'parsed_valid_hugepagesz' is used to determine if prior
4785 * hugepagesz argument was valid.
4786 */
359f2544 4787static int __init hugepagesz_setup(char *s)
e11bfbfc 4788{
359f2544 4789 unsigned long size;
282f4214
MK
4790 struct hstate *h;
4791
4792 parsed_valid_hugepagesz = false;
359f2544
MK
4793 size = (unsigned long)memparse(s, NULL);
4794
4795 if (!arch_hugetlb_valid_size(size)) {
282f4214 4796 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
f81f6e4b 4797 return 1;
359f2544
MK
4798 }
4799
282f4214
MK
4800 h = size_to_hstate(size);
4801 if (h) {
4802 /*
4803 * hstate for this size already exists. This is normally
4804 * an error, but is allowed if the existing hstate is the
4805 * default hstate. More specifically, it is only allowed if
4806 * the number of huge pages for the default hstate was not
4807 * previously specified.
4808 */
4809 if (!parsed_default_hugepagesz || h != &default_hstate ||
4810 default_hstate.max_huge_pages) {
4811 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
f81f6e4b 4812 return 1;
282f4214
MK
4813 }
4814
4815 /*
4816 * No need to call hugetlb_add_hstate() as hstate already
4817 * exists. But, do set parsed_hstate so that a following
4818 * hugepages= parameter will be applied to this hstate.
4819 */
4820 parsed_hstate = h;
4821 parsed_valid_hugepagesz = true;
4822 return 1;
38237830
MK
4823 }
4824
359f2544 4825 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
282f4214 4826 parsed_valid_hugepagesz = true;
e11bfbfc
NP
4827 return 1;
4828}
359f2544
MK
4829__setup("hugepagesz=", hugepagesz_setup);
4830
282f4214
MK
4831/*
4832 * default_hugepagesz command line input
4833 * Only one instance of default_hugepagesz allowed on command line.
4834 */
ae94da89 4835static int __init default_hugepagesz_setup(char *s)
e11bfbfc 4836{
ae94da89 4837 unsigned long size;
b5389086 4838 int i;
ae94da89 4839
282f4214 4840 parsed_valid_hugepagesz = false;
282f4214
MK
4841 if (parsed_default_hugepagesz) {
4842 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
f81f6e4b 4843 return 1;
282f4214
MK
4844 }
4845
ae94da89
MK
4846 size = (unsigned long)memparse(s, NULL);
4847
4848 if (!arch_hugetlb_valid_size(size)) {
282f4214 4849 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
f81f6e4b 4850 return 1;
ae94da89
MK
4851 }
4852
282f4214
MK
4853 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4854 parsed_valid_hugepagesz = true;
4855 parsed_default_hugepagesz = true;
4856 default_hstate_idx = hstate_index(size_to_hstate(size));
4857
4858 /*
4859 * The number of default huge pages (for this size) could have been
4860 * specified as the first hugetlb parameter: hugepages=X. If so,
4861 * then default_hstate_max_huge_pages is set. If the default huge
5e0a760b 4862 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
282f4214
MK
4863 * allocated here from bootmem allocator.
4864 */
4865 if (default_hstate_max_huge_pages) {
4866 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
0a7a0f6f 4867 for_each_online_node(i)
b5389086
ZY
4868 default_hstate.max_huge_pages_node[i] =
4869 default_hugepages_in_node[i];
282f4214
MK
4870 if (hstate_is_gigantic(&default_hstate))
4871 hugetlb_hstate_alloc_pages(&default_hstate);
4872 default_hstate_max_huge_pages = 0;
4873 }
4874
e11bfbfc
NP
4875 return 1;
4876}
ae94da89 4877__setup("default_hugepagesz=", default_hugepagesz_setup);
a3437870 4878
d2226ebd
FT
4879static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4880{
4881#ifdef CONFIG_NUMA
4882 struct mempolicy *mpol = get_task_policy(current);
4883
4884 /*
4885 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4886 * (from policy_nodemask) specifically for hugetlb case
4887 */
4888 if (mpol->mode == MPOL_BIND &&
4889 (apply_policy_zone(mpol, gfp_zone(gfp)) &&
4890 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4891 return &mpol->nodes;
4892#endif
4893 return NULL;
4894}
4895
8ca39e68 4896static unsigned int allowed_mems_nr(struct hstate *h)
8a213460
NA
4897{
4898 int node;
4899 unsigned int nr = 0;
d2226ebd 4900 nodemask_t *mbind_nodemask;
8ca39e68
MS
4901 unsigned int *array = h->free_huge_pages_node;
4902 gfp_t gfp_mask = htlb_alloc_mask(h);
4903
d2226ebd 4904 mbind_nodemask = policy_mbind_nodemask(gfp_mask);
8ca39e68 4905 for_each_node_mask(node, cpuset_current_mems_allowed) {
d2226ebd 4906 if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
8ca39e68
MS
4907 nr += array[node];
4908 }
8a213460
NA
4909
4910 return nr;
4911}
4912
4913#ifdef CONFIG_SYSCTL
17743798
MS
4914static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
4915 void *buffer, size_t *length,
4916 loff_t *ppos, unsigned long *out)
4917{
4918 struct ctl_table dup_table;
4919
4920 /*
4921 * In order to avoid races with __do_proc_doulongvec_minmax(), we
4922 * can duplicate the @table and alter the duplicate of it.
4923 */
4924 dup_table = *table;
4925 dup_table.data = out;
4926
4927 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
4928}
4929
06808b08
LS
4930static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
4931 struct ctl_table *table, int write,
32927393 4932 void *buffer, size_t *length, loff_t *ppos)
1da177e4 4933{
e5ff2159 4934 struct hstate *h = &default_hstate;
238d3c13 4935 unsigned long tmp = h->max_huge_pages;
08d4a246 4936 int ret;
e5ff2159 4937
457c1b27 4938 if (!hugepages_supported())
86613628 4939 return -EOPNOTSUPP;
457c1b27 4940
17743798
MS
4941 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4942 &tmp);
08d4a246
MH
4943 if (ret)
4944 goto out;
e5ff2159 4945
238d3c13
DR
4946 if (write)
4947 ret = __nr_hugepages_store_common(obey_mempolicy, h,
4948 NUMA_NO_NODE, tmp, *length);
08d4a246
MH
4949out:
4950 return ret;
1da177e4 4951}
396faf03 4952
962de548 4953static int hugetlb_sysctl_handler(struct ctl_table *table, int write,
32927393 4954 void *buffer, size_t *length, loff_t *ppos)
06808b08
LS
4955{
4956
4957 return hugetlb_sysctl_handler_common(false, table, write,
4958 buffer, length, ppos);
4959}
4960
4961#ifdef CONFIG_NUMA
962de548 4962static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
32927393 4963 void *buffer, size_t *length, loff_t *ppos)
06808b08
LS
4964{
4965 return hugetlb_sysctl_handler_common(true, table, write,
4966 buffer, length, ppos);
4967}
4968#endif /* CONFIG_NUMA */
4969
962de548 4970static int hugetlb_overcommit_handler(struct ctl_table *table, int write,
32927393 4971 void *buffer, size_t *length, loff_t *ppos)
a3d0c6aa 4972{
a5516438 4973 struct hstate *h = &default_hstate;
e5ff2159 4974 unsigned long tmp;
08d4a246 4975 int ret;
e5ff2159 4976
457c1b27 4977 if (!hugepages_supported())
86613628 4978 return -EOPNOTSUPP;
457c1b27 4979
c033a93c 4980 tmp = h->nr_overcommit_huge_pages;
e5ff2159 4981
bae7f4ae 4982 if (write && hstate_is_gigantic(h))
adbe8726
EM
4983 return -EINVAL;
4984
17743798
MS
4985 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4986 &tmp);
08d4a246
MH
4987 if (ret)
4988 goto out;
e5ff2159
AK
4989
4990 if (write) {
db71ef79 4991 spin_lock_irq(&hugetlb_lock);
e5ff2159 4992 h->nr_overcommit_huge_pages = tmp;
db71ef79 4993 spin_unlock_irq(&hugetlb_lock);
e5ff2159 4994 }
08d4a246
MH
4995out:
4996 return ret;
a3d0c6aa
NA
4997}
4998
962de548
KW
4999static struct ctl_table hugetlb_table[] = {
5000 {
5001 .procname = "nr_hugepages",
5002 .data = NULL,
5003 .maxlen = sizeof(unsigned long),
5004 .mode = 0644,
5005 .proc_handler = hugetlb_sysctl_handler,
5006 },
5007#ifdef CONFIG_NUMA
5008 {
5009 .procname = "nr_hugepages_mempolicy",
5010 .data = NULL,
5011 .maxlen = sizeof(unsigned long),
5012 .mode = 0644,
5013 .proc_handler = &hugetlb_mempolicy_sysctl_handler,
5014 },
5015#endif
5016 {
5017 .procname = "hugetlb_shm_group",
5018 .data = &sysctl_hugetlb_shm_group,
5019 .maxlen = sizeof(gid_t),
5020 .mode = 0644,
5021 .proc_handler = proc_dointvec,
5022 },
5023 {
5024 .procname = "nr_overcommit_hugepages",
5025 .data = NULL,
5026 .maxlen = sizeof(unsigned long),
5027 .mode = 0644,
5028 .proc_handler = hugetlb_overcommit_handler,
5029 },
962de548
KW
5030};
5031
5032static void hugetlb_sysctl_init(void)
5033{
5034 register_sysctl_init("vm", hugetlb_table);
5035}
1da177e4
LT
5036#endif /* CONFIG_SYSCTL */
5037
e1759c21 5038void hugetlb_report_meminfo(struct seq_file *m)
1da177e4 5039{
fcb2b0c5
RG
5040 struct hstate *h;
5041 unsigned long total = 0;
5042
457c1b27
NA
5043 if (!hugepages_supported())
5044 return;
fcb2b0c5
RG
5045
5046 for_each_hstate(h) {
5047 unsigned long count = h->nr_huge_pages;
5048
aca78307 5049 total += huge_page_size(h) * count;
fcb2b0c5
RG
5050
5051 if (h == &default_hstate)
5052 seq_printf(m,
5053 "HugePages_Total: %5lu\n"
5054 "HugePages_Free: %5lu\n"
5055 "HugePages_Rsvd: %5lu\n"
5056 "HugePages_Surp: %5lu\n"
5057 "Hugepagesize: %8lu kB\n",
5058 count,
5059 h->free_huge_pages,
5060 h->resv_huge_pages,
5061 h->surplus_huge_pages,
aca78307 5062 huge_page_size(h) / SZ_1K);
fcb2b0c5
RG
5063 }
5064
aca78307 5065 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K);
1da177e4
LT
5066}
5067
7981593b 5068int hugetlb_report_node_meminfo(char *buf, int len, int nid)
1da177e4 5069{
a5516438 5070 struct hstate *h = &default_hstate;
7981593b 5071
457c1b27
NA
5072 if (!hugepages_supported())
5073 return 0;
7981593b
JP
5074
5075 return sysfs_emit_at(buf, len,
5076 "Node %d HugePages_Total: %5u\n"
5077 "Node %d HugePages_Free: %5u\n"
5078 "Node %d HugePages_Surp: %5u\n",
5079 nid, h->nr_huge_pages_node[nid],
5080 nid, h->free_huge_pages_node[nid],
5081 nid, h->surplus_huge_pages_node[nid]);
1da177e4
LT
5082}
5083
dcadcf1c 5084void hugetlb_show_meminfo_node(int nid)
949f7ec5
DR
5085{
5086 struct hstate *h;
949f7ec5 5087
457c1b27
NA
5088 if (!hugepages_supported())
5089 return;
5090
dcadcf1c
GL
5091 for_each_hstate(h)
5092 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
5093 nid,
5094 h->nr_huge_pages_node[nid],
5095 h->free_huge_pages_node[nid],
5096 h->surplus_huge_pages_node[nid],
5097 huge_page_size(h) / SZ_1K);
949f7ec5
DR
5098}
5099
5d317b2b
NH
5100void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
5101{
5102 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
6c1aa2d3 5103 K(atomic_long_read(&mm->hugetlb_usage)));
5d317b2b
NH
5104}
5105
1da177e4
LT
5106/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
5107unsigned long hugetlb_total_pages(void)
5108{
d0028588
WL
5109 struct hstate *h;
5110 unsigned long nr_total_pages = 0;
5111
5112 for_each_hstate(h)
5113 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
5114 return nr_total_pages;
1da177e4 5115}
1da177e4 5116
a5516438 5117static int hugetlb_acct_memory(struct hstate *h, long delta)
fc1b8a73
MG
5118{
5119 int ret = -ENOMEM;
5120
0aa7f354
ML
5121 if (!delta)
5122 return 0;
5123
db71ef79 5124 spin_lock_irq(&hugetlb_lock);
fc1b8a73
MG
5125 /*
5126 * When cpuset is configured, it breaks the strict hugetlb page
5127 * reservation as the accounting is done on a global variable. Such
5128 * reservation is completely rubbish in the presence of cpuset because
5129 * the reservation is not checked against page availability for the
5130 * current cpuset. Application can still potentially OOM'ed by kernel
5131 * with lack of free htlb page in cpuset that the task is in.
5132 * Attempt to enforce strict accounting with cpuset is almost
5133 * impossible (or too ugly) because cpuset is too fluid that
5134 * task or memory node can be dynamically moved between cpusets.
5135 *
5136 * The change of semantics for shared hugetlb mapping with cpuset is
5137 * undesirable. However, in order to preserve some of the semantics,
5138 * we fall back to check against current free page availability as
5139 * a best attempt and hopefully to minimize the impact of changing
5140 * semantics that cpuset has.
8ca39e68
MS
5141 *
5142 * Apart from cpuset, we also have memory policy mechanism that
5143 * also determines from which node the kernel will allocate memory
5144 * in a NUMA system. So similar to cpuset, we also should consider
5145 * the memory policy of the current task. Similar to the description
5146 * above.
fc1b8a73
MG
5147 */
5148 if (delta > 0) {
a5516438 5149 if (gather_surplus_pages(h, delta) < 0)
fc1b8a73
MG
5150 goto out;
5151
8ca39e68 5152 if (delta > allowed_mems_nr(h)) {
a5516438 5153 return_unused_surplus_pages(h, delta);
fc1b8a73
MG
5154 goto out;
5155 }
5156 }
5157
5158 ret = 0;
5159 if (delta < 0)
a5516438 5160 return_unused_surplus_pages(h, (unsigned long) -delta);
fc1b8a73
MG
5161
5162out:
db71ef79 5163 spin_unlock_irq(&hugetlb_lock);
fc1b8a73
MG
5164 return ret;
5165}
5166
84afd99b
AW
5167static void hugetlb_vm_op_open(struct vm_area_struct *vma)
5168{
f522c3ac 5169 struct resv_map *resv = vma_resv_map(vma);
84afd99b
AW
5170
5171 /*
612b8a31 5172 * HPAGE_RESV_OWNER indicates a private mapping.
84afd99b
AW
5173 * This new VMA should share its siblings reservation map if present.
5174 * The VMA will only ever have a valid reservation map pointer where
5175 * it is being copied for another still existing VMA. As that VMA
25985edc 5176 * has a reference to the reservation map it cannot disappear until
84afd99b
AW
5177 * after this open call completes. It is therefore safe to take a
5178 * new reference here without additional locking.
5179 */
09a26e83
MK
5180 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
5181 resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
f522c3ac 5182 kref_get(&resv->refs);
09a26e83 5183 }
8d9bfb26 5184
131a79b4
MK
5185 /*
5186 * vma_lock structure for sharable mappings is vma specific.
612b8a31
MK
5187 * Clear old pointer (if copied via vm_area_dup) and allocate
5188 * new structure. Before clearing, make sure vma_lock is not
5189 * for this vma.
131a79b4
MK
5190 */
5191 if (vma->vm_flags & VM_MAYSHARE) {
612b8a31
MK
5192 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
5193
5194 if (vma_lock) {
5195 if (vma_lock->vma != vma) {
5196 vma->vm_private_data = NULL;
5197 hugetlb_vma_lock_alloc(vma);
5198 } else
5199 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
5200 } else
5201 hugetlb_vma_lock_alloc(vma);
131a79b4 5202 }
84afd99b
AW
5203}
5204
a1e78772
MG
5205static void hugetlb_vm_op_close(struct vm_area_struct *vma)
5206{
a5516438 5207 struct hstate *h = hstate_vma(vma);
8d9bfb26 5208 struct resv_map *resv;
90481622 5209 struct hugepage_subpool *spool = subpool_vma(vma);
4e35f483 5210 unsigned long reserve, start, end;
1c5ecae3 5211 long gbl_reserve;
84afd99b 5212
8d9bfb26
MK
5213 hugetlb_vma_lock_free(vma);
5214
5215 resv = vma_resv_map(vma);
4e35f483
JK
5216 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5217 return;
84afd99b 5218
4e35f483
JK
5219 start = vma_hugecache_offset(h, vma, vma->vm_start);
5220 end = vma_hugecache_offset(h, vma, vma->vm_end);
84afd99b 5221
4e35f483 5222 reserve = (end - start) - region_count(resv, start, end);
e9fe92ae 5223 hugetlb_cgroup_uncharge_counter(resv, start, end);
4e35f483 5224 if (reserve) {
1c5ecae3
MK
5225 /*
5226 * Decrement reserve counts. The global reserve count may be
5227 * adjusted if the subpool has a minimum size.
5228 */
5229 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
5230 hugetlb_acct_memory(h, -gbl_reserve);
84afd99b 5231 }
e9fe92ae
MA
5232
5233 kref_put(&resv->refs, resv_map_release);
a1e78772
MG
5234}
5235
31383c68
DW
5236static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
5237{
5238 if (addr & ~(huge_page_mask(hstate_vma(vma))))
5239 return -EINVAL;
b30c14cd
JH
5240
5241 /*
5242 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
5243 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
5244 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
5245 */
5246 if (addr & ~PUD_MASK) {
5247 /*
5248 * hugetlb_vm_op_split is called right before we attempt to
5249 * split the VMA. We will need to unshare PMDs in the old and
5250 * new VMAs, so let's unshare before we split.
5251 */
5252 unsigned long floor = addr & PUD_MASK;
5253 unsigned long ceil = floor + PUD_SIZE;
5254
5255 if (floor >= vma->vm_start && ceil <= vma->vm_end)
5256 hugetlb_unshare_pmds(vma, floor, ceil);
5257 }
5258
31383c68
DW
5259 return 0;
5260}
5261
05ea8860
DW
5262static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
5263{
aca78307 5264 return huge_page_size(hstate_vma(vma));
05ea8860
DW
5265}
5266
1da177e4
LT
5267/*
5268 * We cannot handle pagefaults against hugetlb pages at all. They cause
5269 * handle_mm_fault() to try to instantiate regular-sized pages in the
6c26d310 5270 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
1da177e4
LT
5271 * this far.
5272 */
b3ec9f33 5273static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
1da177e4
LT
5274{
5275 BUG();
d0217ac0 5276 return 0;
1da177e4
LT
5277}
5278
eec3636a
JC
5279/*
5280 * When a new function is introduced to vm_operations_struct and added
5281 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
5282 * This is because under System V memory model, mappings created via
5283 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
5284 * their original vm_ops are overwritten with shm_vm_ops.
5285 */
f0f37e2f 5286const struct vm_operations_struct hugetlb_vm_ops = {
d0217ac0 5287 .fault = hugetlb_vm_op_fault,
84afd99b 5288 .open = hugetlb_vm_op_open,
a1e78772 5289 .close = hugetlb_vm_op_close,
dd3b614f 5290 .may_split = hugetlb_vm_op_split,
05ea8860 5291 .pagesize = hugetlb_vm_op_pagesize,
1da177e4
LT
5292};
5293
1e8f889b
DG
5294static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
5295 int writable)
63551ae0
DG
5296{
5297 pte_t entry;
79c1c594 5298 unsigned int shift = huge_page_shift(hstate_vma(vma));
63551ae0 5299
1e8f889b 5300 if (writable) {
106c992a
GS
5301 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
5302 vma->vm_page_prot)));
63551ae0 5303 } else {
106c992a
GS
5304 entry = huge_pte_wrprotect(mk_huge_pte(page,
5305 vma->vm_page_prot));
63551ae0
DG
5306 }
5307 entry = pte_mkyoung(entry);
79c1c594 5308 entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
63551ae0
DG
5309
5310 return entry;
5311}
5312
1e8f889b
DG
5313static void set_huge_ptep_writable(struct vm_area_struct *vma,
5314 unsigned long address, pte_t *ptep)
5315{
5316 pte_t entry;
5317
106c992a 5318 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
32f84528 5319 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4b3073e1 5320 update_mmu_cache(vma, address, ptep);
1e8f889b
DG
5321}
5322
d5ed7444 5323bool is_hugetlb_entry_migration(pte_t pte)
4a705fef
NH
5324{
5325 swp_entry_t swp;
5326
5327 if (huge_pte_none(pte) || pte_present(pte))
d5ed7444 5328 return false;
4a705fef 5329 swp = pte_to_swp_entry(pte);
d79d176a 5330 if (is_migration_entry(swp))
d5ed7444 5331 return true;
4a705fef 5332 else
d5ed7444 5333 return false;
4a705fef
NH
5334}
5335
52526ca7 5336bool is_hugetlb_entry_hwpoisoned(pte_t pte)
4a705fef
NH
5337{
5338 swp_entry_t swp;
5339
5340 if (huge_pte_none(pte) || pte_present(pte))
3e5c3600 5341 return false;
4a705fef 5342 swp = pte_to_swp_entry(pte);
d79d176a 5343 if (is_hwpoison_entry(swp))
3e5c3600 5344 return true;
4a705fef 5345 else
3e5c3600 5346 return false;
4a705fef 5347}
1e8f889b 5348
4eae4efa 5349static void
ea4c353d 5350hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
935d4f0c 5351 struct folio *new_folio, pte_t old, unsigned long sz)
4eae4efa 5352{
5a2f8d22
PX
5353 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
5354
ea4c353d 5355 __folio_mark_uptodate(new_folio);
9d5fafd5 5356 hugetlb_add_new_anon_rmap(new_folio, vma, addr);
5a2f8d22
PX
5357 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
5358 newpte = huge_pte_mkuffd_wp(newpte);
935d4f0c 5359 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
4eae4efa 5360 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
ea4c353d 5361 folio_set_hugetlb_migratable(new_folio);
4eae4efa
PX
5362}
5363
63551ae0 5364int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
bc70fbf2
PX
5365 struct vm_area_struct *dst_vma,
5366 struct vm_area_struct *src_vma)
63551ae0 5367{
3aa4ed80 5368 pte_t *src_pte, *dst_pte, entry;
ad27ce20 5369 struct folio *pte_folio;
1c59827d 5370 unsigned long addr;
bc70fbf2
PX
5371 bool cow = is_cow_mapping(src_vma->vm_flags);
5372 struct hstate *h = hstate_vma(src_vma);
a5516438 5373 unsigned long sz = huge_page_size(h);
4eae4efa 5374 unsigned long npages = pages_per_huge_page(h);
ac46d4f3 5375 struct mmu_notifier_range range;
e95a9851 5376 unsigned long last_addr_mask;
e8569dd2 5377 int ret = 0;
1e8f889b 5378
ac46d4f3 5379 if (cow) {
7d4a8be0 5380 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
bc70fbf2
PX
5381 src_vma->vm_start,
5382 src_vma->vm_end);
ac46d4f3 5383 mmu_notifier_invalidate_range_start(&range);
e727bfd5 5384 vma_assert_write_locked(src_vma);
623a1ddf 5385 raw_write_seqcount_begin(&src->write_protect_seq);
40549ba8
MK
5386 } else {
5387 /*
5388 * For shared mappings the vma lock must be held before
9c67a207 5389 * calling hugetlb_walk() in the src vma. Otherwise, the
40549ba8
MK
5390 * returned ptep could go away if part of a shared pmd and
5391 * another thread calls huge_pmd_unshare.
5392 */
5393 hugetlb_vma_lock_read(src_vma);
ac46d4f3 5394 }
e8569dd2 5395
e95a9851 5396 last_addr_mask = hugetlb_mask_last_page(h);
bc70fbf2 5397 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
cb900f41 5398 spinlock_t *src_ptl, *dst_ptl;
9c67a207 5399 src_pte = hugetlb_walk(src_vma, addr, sz);
e95a9851
MK
5400 if (!src_pte) {
5401 addr |= last_addr_mask;
c74df32c 5402 continue;
e95a9851 5403 }
bc70fbf2 5404 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
e8569dd2
AS
5405 if (!dst_pte) {
5406 ret = -ENOMEM;
5407 break;
5408 }
c5c99429 5409
5e41540c
MK
5410 /*
5411 * If the pagetables are shared don't copy or take references.
5e41540c 5412 *
3aa4ed80 5413 * dst_pte == src_pte is the common case of src/dest sharing.
5e41540c 5414 * However, src could have 'unshared' and dst shares with
3aa4ed80
ML
5415 * another vma. So page_count of ptep page is checked instead
5416 * to reliably determine whether pte is shared.
5e41540c 5417 */
3aa4ed80 5418 if (page_count(virt_to_page(dst_pte)) > 1) {
e95a9851 5419 addr |= last_addr_mask;
c5c99429 5420 continue;
e95a9851 5421 }
c5c99429 5422
cb900f41
KS
5423 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5424 src_ptl = huge_pte_lockptr(h, src, src_pte);
5425 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4a705fef 5426 entry = huge_ptep_get(src_pte);
4eae4efa 5427again:
3aa4ed80 5428 if (huge_pte_none(entry)) {
5e41540c 5429 /*
3aa4ed80 5430 * Skip if src entry none.
5e41540c 5431 */
4a705fef 5432 ;
c2cb0dcc 5433 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
5a2f8d22 5434 if (!userfaultfd_wp(dst_vma))
c2cb0dcc 5435 entry = huge_pte_clear_uffd_wp(entry);
935d4f0c 5436 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
c2cb0dcc 5437 } else if (unlikely(is_hugetlb_entry_migration(entry))) {
4a705fef 5438 swp_entry_t swp_entry = pte_to_swp_entry(entry);
5a2f8d22 5439 bool uffd_wp = pte_swp_uffd_wp(entry);
4a705fef 5440
6c287605 5441 if (!is_readable_migration_entry(swp_entry) && cow) {
4a705fef
NH
5442 /*
5443 * COW mappings require pages in both
5444 * parent and child to be set to read.
5445 */
4dd845b5
AP
5446 swp_entry = make_readable_migration_entry(
5447 swp_offset(swp_entry));
4a705fef 5448 entry = swp_entry_to_pte(swp_entry);
bc70fbf2 5449 if (userfaultfd_wp(src_vma) && uffd_wp)
5a2f8d22 5450 entry = pte_swp_mkuffd_wp(entry);
935d4f0c 5451 set_huge_pte_at(src, addr, src_pte, entry, sz);
4a705fef 5452 }
5a2f8d22 5453 if (!userfaultfd_wp(dst_vma))
bc70fbf2 5454 entry = huge_pte_clear_uffd_wp(entry);
935d4f0c 5455 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
bc70fbf2 5456 } else if (unlikely(is_pte_marker(entry))) {
af19487f
AR
5457 pte_marker marker = copy_pte_marker(
5458 pte_to_swp_entry(entry), dst_vma);
5459
5460 if (marker)
5461 set_huge_pte_at(dst, addr, dst_pte,
935d4f0c 5462 make_pte_marker(marker), sz);
4a705fef 5463 } else {
4eae4efa 5464 entry = huge_ptep_get(src_pte);
ad27ce20
Z
5465 pte_folio = page_folio(pte_page(entry));
5466 folio_get(pte_folio);
4eae4efa
PX
5467
5468 /*
fb3d824d
DH
5469 * Failing to duplicate the anon rmap is a rare case
5470 * where we see pinned hugetlb pages while they're
5471 * prone to COW. We need to do the COW earlier during
5472 * fork.
4eae4efa
PX
5473 *
5474 * When pre-allocating the page or copying data, we
5475 * need to be without the pgtable locks since we could
5476 * sleep during the process.
5477 */
ad27ce20 5478 if (!folio_test_anon(pte_folio)) {
44887f39 5479 hugetlb_add_file_rmap(pte_folio);
ebe2e35e 5480 } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
4eae4efa 5481 pte_t src_pte_old = entry;
d0ce0e47 5482 struct folio *new_folio;
4eae4efa
PX
5483
5484 spin_unlock(src_ptl);
5485 spin_unlock(dst_ptl);
5486 /* Do not use reserve as it's private owned */
d0ce0e47
SK
5487 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
5488 if (IS_ERR(new_folio)) {
ad27ce20 5489 folio_put(pte_folio);
d0ce0e47 5490 ret = PTR_ERR(new_folio);
4eae4efa
PX
5491 break;
5492 }
1cb9dc4b 5493 ret = copy_user_large_folio(new_folio,
ad27ce20
Z
5494 pte_folio,
5495 addr, dst_vma);
5496 folio_put(pte_folio);
1cb9dc4b
LS
5497 if (ret) {
5498 folio_put(new_folio);
5499 break;
5500 }
4eae4efa 5501
d0ce0e47 5502 /* Install the new hugetlb folio if src pte stable */
4eae4efa
PX
5503 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5504 src_ptl = huge_pte_lockptr(h, src, src_pte);
5505 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5506 entry = huge_ptep_get(src_pte);
5507 if (!pte_same(src_pte_old, entry)) {
bc70fbf2 5508 restore_reserve_on_error(h, dst_vma, addr,
d2d7bb44 5509 new_folio);
d0ce0e47 5510 folio_put(new_folio);
3aa4ed80 5511 /* huge_ptep of dst_pte won't change as in child */
4eae4efa
PX
5512 goto again;
5513 }
5a2f8d22 5514 hugetlb_install_folio(dst_vma, dst_pte, addr,
935d4f0c 5515 new_folio, src_pte_old, sz);
4eae4efa
PX
5516 spin_unlock(src_ptl);
5517 spin_unlock(dst_ptl);
5518 continue;
5519 }
5520
34ee645e 5521 if (cow) {
0f10851e
JG
5522 /*
5523 * No need to notify as we are downgrading page
5524 * table protection not changing it to point
5525 * to a new page.
5526 *
ee65728e 5527 * See Documentation/mm/mmu_notifier.rst
0f10851e 5528 */
7f2e9525 5529 huge_ptep_set_wrprotect(src, addr, src_pte);
84894e1c 5530 entry = huge_pte_wrprotect(entry);
34ee645e 5531 }
4eae4efa 5532
5a2f8d22
PX
5533 if (!userfaultfd_wp(dst_vma))
5534 entry = huge_pte_clear_uffd_wp(entry);
5535
935d4f0c 5536 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4eae4efa 5537 hugetlb_count_add(npages, dst);
1c59827d 5538 }
cb900f41
KS
5539 spin_unlock(src_ptl);
5540 spin_unlock(dst_ptl);
63551ae0 5541 }
63551ae0 5542
623a1ddf
DH
5543 if (cow) {
5544 raw_write_seqcount_end(&src->write_protect_seq);
ac46d4f3 5545 mmu_notifier_invalidate_range_end(&range);
40549ba8
MK
5546 } else {
5547 hugetlb_vma_unlock_read(src_vma);
623a1ddf 5548 }
e8569dd2
AS
5549
5550 return ret;
63551ae0
DG
5551}
5552
550a7d60 5553static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
935d4f0c
RR
5554 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5555 unsigned long sz)
550a7d60
MA
5556{
5557 struct hstate *h = hstate_vma(vma);
5558 struct mm_struct *mm = vma->vm_mm;
550a7d60 5559 spinlock_t *src_ptl, *dst_ptl;
db110a99 5560 pte_t pte;
550a7d60 5561
550a7d60
MA
5562 dst_ptl = huge_pte_lock(h, mm, dst_pte);
5563 src_ptl = huge_pte_lockptr(h, mm, src_pte);
5564
5565 /*
5566 * We don't have to worry about the ordering of src and dst ptlocks
8651a137 5567 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
550a7d60
MA
5568 */
5569 if (src_ptl != dst_ptl)
5570 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5571
5572 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
935d4f0c 5573 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
550a7d60
MA
5574
5575 if (src_ptl != dst_ptl)
5576 spin_unlock(src_ptl);
5577 spin_unlock(dst_ptl);
5578}
5579
5580int move_hugetlb_page_tables(struct vm_area_struct *vma,
5581 struct vm_area_struct *new_vma,
5582 unsigned long old_addr, unsigned long new_addr,
5583 unsigned long len)
5584{
5585 struct hstate *h = hstate_vma(vma);
5586 struct address_space *mapping = vma->vm_file->f_mapping;
5587 unsigned long sz = huge_page_size(h);
5588 struct mm_struct *mm = vma->vm_mm;
5589 unsigned long old_end = old_addr + len;
e95a9851 5590 unsigned long last_addr_mask;
550a7d60
MA
5591 pte_t *src_pte, *dst_pte;
5592 struct mmu_notifier_range range;
3d0b95cd 5593 bool shared_pmd = false;
550a7d60 5594
7d4a8be0 5595 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
550a7d60
MA
5596 old_end);
5597 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3d0b95cd
BW
5598 /*
5599 * In case of shared PMDs, we should cover the maximum possible
5600 * range.
5601 */
5602 flush_cache_range(vma, range.start, range.end);
5603
550a7d60 5604 mmu_notifier_invalidate_range_start(&range);
e95a9851 5605 last_addr_mask = hugetlb_mask_last_page(h);
550a7d60 5606 /* Prevent race with file truncation */
40549ba8 5607 hugetlb_vma_lock_write(vma);
550a7d60
MA
5608 i_mmap_lock_write(mapping);
5609 for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
9c67a207 5610 src_pte = hugetlb_walk(vma, old_addr, sz);
e95a9851
MK
5611 if (!src_pte) {
5612 old_addr |= last_addr_mask;
5613 new_addr |= last_addr_mask;
550a7d60 5614 continue;
e95a9851 5615 }
550a7d60
MA
5616 if (huge_pte_none(huge_ptep_get(src_pte)))
5617 continue;
5618
4ddb4d91 5619 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
3d0b95cd 5620 shared_pmd = true;
4ddb4d91
MK
5621 old_addr |= last_addr_mask;
5622 new_addr |= last_addr_mask;
550a7d60 5623 continue;
3d0b95cd 5624 }
550a7d60
MA
5625
5626 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5627 if (!dst_pte)
5628 break;
5629
935d4f0c 5630 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
550a7d60 5631 }
3d0b95cd
BW
5632
5633 if (shared_pmd)
f720b471 5634 flush_hugetlb_tlb_range(vma, range.start, range.end);
3d0b95cd 5635 else
f720b471 5636 flush_hugetlb_tlb_range(vma, old_end - len, old_end);
550a7d60 5637 mmu_notifier_invalidate_range_end(&range);
13e4ad2c 5638 i_mmap_unlock_write(mapping);
40549ba8 5639 hugetlb_vma_unlock_write(vma);
550a7d60
MA
5640
5641 return len + old_addr - old_end;
5642}
5643
2820b0f0
RR
5644void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5645 unsigned long start, unsigned long end,
5646 struct page *ref_page, zap_flags_t zap_flags)
63551ae0
DG
5647{
5648 struct mm_struct *mm = vma->vm_mm;
5649 unsigned long address;
c7546f8f 5650 pte_t *ptep;
63551ae0 5651 pte_t pte;
cb900f41 5652 spinlock_t *ptl;
63551ae0 5653 struct page *page;
a5516438
AK
5654 struct hstate *h = hstate_vma(vma);
5655 unsigned long sz = huge_page_size(h);
df7a6d1f 5656 bool adjust_reservation = false;
e95a9851 5657 unsigned long last_addr_mask;
a4a118f2 5658 bool force_flush = false;
a5516438 5659
63551ae0 5660 WARN_ON(!is_vm_hugetlb_page(vma));
a5516438
AK
5661 BUG_ON(start & ~huge_page_mask(h));
5662 BUG_ON(end & ~huge_page_mask(h));
63551ae0 5663
07e32661
AK
5664 /*
5665 * This is a hugetlb vma, all the pte entries should point
5666 * to huge page.
5667 */
ed6a7935 5668 tlb_change_page_size(tlb, sz);
24669e58 5669 tlb_start_vma(tlb, vma);
dff11abe 5670
e95a9851 5671 last_addr_mask = hugetlb_mask_last_page(h);
569f48b8 5672 address = start;
569f48b8 5673 for (; address < end; address += sz) {
9c67a207 5674 ptep = hugetlb_walk(vma, address, sz);
e95a9851
MK
5675 if (!ptep) {
5676 address |= last_addr_mask;
c7546f8f 5677 continue;
e95a9851 5678 }
c7546f8f 5679
cb900f41 5680 ptl = huge_pte_lock(h, mm, ptep);
4ddb4d91 5681 if (huge_pmd_unshare(mm, vma, address, ptep)) {
31d49da5 5682 spin_unlock(ptl);
a4a118f2
NA
5683 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5684 force_flush = true;
4ddb4d91 5685 address |= last_addr_mask;
31d49da5
AK
5686 continue;
5687 }
39dde65c 5688
6629326b 5689 pte = huge_ptep_get(ptep);
31d49da5
AK
5690 if (huge_pte_none(pte)) {
5691 spin_unlock(ptl);
5692 continue;
5693 }
6629326b
HD
5694
5695 /*
9fbc1f63
NH
5696 * Migrating hugepage or HWPoisoned hugepage is already
5697 * unmapped and its refcount is dropped, so just clear pte here.
6629326b 5698 */
9fbc1f63 5699 if (unlikely(!pte_present(pte))) {
05e90bd0
PX
5700 /*
5701 * If the pte was wr-protected by uffd-wp in any of the
5702 * swap forms, meanwhile the caller does not want to
5703 * drop the uffd-wp bit in this zap, then replace the
5704 * pte with a marker.
5705 */
5706 if (pte_swp_uffd_wp_any(pte) &&
5707 !(zap_flags & ZAP_FLAG_DROP_MARKER))
5708 set_huge_pte_at(mm, address, ptep,
935d4f0c
RR
5709 make_pte_marker(PTE_MARKER_UFFD_WP),
5710 sz);
05e90bd0
PX
5711 else
5712 huge_pte_clear(mm, address, ptep, sz);
31d49da5
AK
5713 spin_unlock(ptl);
5714 continue;
8c4894c6 5715 }
6629326b
HD
5716
5717 page = pte_page(pte);
04f2cbe3
MG
5718 /*
5719 * If a reference page is supplied, it is because a specific
5720 * page is being unmapped, not a range. Ensure the page we
5721 * are about to unmap is the actual page of interest.
5722 */
5723 if (ref_page) {
31d49da5
AK
5724 if (page != ref_page) {
5725 spin_unlock(ptl);
5726 continue;
5727 }
04f2cbe3
MG
5728 /*
5729 * Mark the VMA as having unmapped its page so that
5730 * future faults in this VMA will fail rather than
5731 * looking like data was lost
5732 */
5733 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5734 }
5735
c7546f8f 5736 pte = huge_ptep_get_and_clear(mm, address, ptep);
b528e4b6 5737 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
106c992a 5738 if (huge_pte_dirty(pte))
6649a386 5739 set_page_dirty(page);
05e90bd0
PX
5740 /* Leave a uffd-wp pte marker if needed */
5741 if (huge_pte_uffd_wp(pte) &&
5742 !(zap_flags & ZAP_FLAG_DROP_MARKER))
5743 set_huge_pte_at(mm, address, ptep,
935d4f0c
RR
5744 make_pte_marker(PTE_MARKER_UFFD_WP),
5745 sz);
5d317b2b 5746 hugetlb_count_sub(pages_per_huge_page(h), mm);
e135826b 5747 hugetlb_remove_rmap(page_folio(page));
31d49da5 5748
df7a6d1f
BL
5749 /*
5750 * Restore the reservation for anonymous page, otherwise the
5751 * backing page could be stolen by someone.
5752 * If there we are freeing a surplus, do not set the restore
5753 * reservation bit.
5754 */
5755 if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
5756 folio_test_anon(page_folio(page))) {
5757 folio_set_hugetlb_restore_reserve(page_folio(page));
5758 /* Reservation to be adjusted after the spin lock */
5759 adjust_reservation = true;
5760 }
5761
cb900f41 5762 spin_unlock(ptl);
df7a6d1f
BL
5763
5764 /*
5765 * Adjust the reservation for the region that will have the
5766 * reserve restored. Keep in mind that vma_needs_reservation() changes
5767 * resv->adds_in_progress if it succeeds. If this is not done,
5768 * do_exit() will not see it, and will keep the reservation
5769 * forever.
5770 */
5771 if (adjust_reservation && vma_needs_reservation(h, vma, address))
5772 vma_add_reservation(h, vma, address);
5773
e77b0852 5774 tlb_remove_page_size(tlb, page, huge_page_size(h));
31d49da5
AK
5775 /*
5776 * Bail out after unmapping reference page if supplied
5777 */
5778 if (ref_page)
5779 break;
fe1668ae 5780 }
24669e58 5781 tlb_end_vma(tlb, vma);
a4a118f2
NA
5782
5783 /*
5784 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5785 * could defer the flush until now, since by holding i_mmap_rwsem we
5786 * guaranteed that the last refernece would not be dropped. But we must
5787 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5788 * dropped and the last reference to the shared PMDs page might be
5789 * dropped as well.
5790 *
5791 * In theory we could defer the freeing of the PMD pages as well, but
5792 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5793 * detect sharing, so we cannot defer the release of the page either.
5794 * Instead, do flush now.
5795 */
5796 if (force_flush)
5797 tlb_flush_mmu_tlbonly(tlb);
1da177e4 5798}
63551ae0 5799
2820b0f0
RR
5800void __hugetlb_zap_begin(struct vm_area_struct *vma,
5801 unsigned long *start, unsigned long *end)
d833352a 5802{
2820b0f0
RR
5803 if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5804 return;
5805
5806 adjust_range_if_pmd_sharing_possible(vma, start, end);
131a79b4 5807 hugetlb_vma_lock_write(vma);
2820b0f0
RR
5808 if (vma->vm_file)
5809 i_mmap_lock_write(vma->vm_file->f_mapping);
5810}
131a79b4 5811
2820b0f0
RR
5812void __hugetlb_zap_end(struct vm_area_struct *vma,
5813 struct zap_details *details)
5814{
5815 zap_flags_t zap_flags = details ? details->zap_flags : 0;
131a79b4 5816
2820b0f0
RR
5817 if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5818 return;
d833352a 5819
04ada095
MK
5820 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
5821 /*
5822 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5823 * When the vma_lock is freed, this makes the vma ineligible
5824 * for pmd sharing. And, i_mmap_rwsem is required to set up
5825 * pmd sharing. This is important as page tables for this
5826 * unmapped range will be asynchrously deleted. If the page
5827 * tables are shared, there will be issues when accessed by
5828 * someone else.
5829 */
5830 __hugetlb_vma_unlock_write_free(vma);
04ada095 5831 } else {
04ada095
MK
5832 hugetlb_vma_unlock_write(vma);
5833 }
2820b0f0
RR
5834
5835 if (vma->vm_file)
5836 i_mmap_unlock_write(vma->vm_file->f_mapping);
d833352a
MG
5837}
5838
502717f4 5839void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
05e90bd0
PX
5840 unsigned long end, struct page *ref_page,
5841 zap_flags_t zap_flags)
502717f4 5842{
369258ce 5843 struct mmu_notifier_range range;
24669e58 5844 struct mmu_gather tlb;
dff11abe 5845
7d4a8be0 5846 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
369258ce
MK
5847 start, end);
5848 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5849 mmu_notifier_invalidate_range_start(&range);
a72afd87 5850 tlb_gather_mmu(&tlb, vma->vm_mm);
369258ce 5851
05e90bd0 5852 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
369258ce
MK
5853
5854 mmu_notifier_invalidate_range_end(&range);
ae8eba8b 5855 tlb_finish_mmu(&tlb);
502717f4
CK
5856}
5857
04f2cbe3
MG
5858/*
5859 * This is called when the original mapper is failing to COW a MAP_PRIVATE
578b7725 5860 * mapping it owns the reserve page for. The intention is to unmap the page
04f2cbe3
MG
5861 * from other VMAs and let the children be SIGKILLed if they are faulting the
5862 * same region.
5863 */
2f4612af
DB
5864static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5865 struct page *page, unsigned long address)
04f2cbe3 5866{
7526674d 5867 struct hstate *h = hstate_vma(vma);
04f2cbe3
MG
5868 struct vm_area_struct *iter_vma;
5869 struct address_space *mapping;
04f2cbe3
MG
5870 pgoff_t pgoff;
5871
5872 /*
5873 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5874 * from page cache lookup which is in HPAGE_SIZE units.
5875 */
7526674d 5876 address = address & huge_page_mask(h);
36e4f20a
MH
5877 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5878 vma->vm_pgoff;
93c76a3d 5879 mapping = vma->vm_file->f_mapping;
04f2cbe3 5880
4eb2b1dc
MG
5881 /*
5882 * Take the mapping lock for the duration of the table walk. As
5883 * this mapping should be shared between all the VMAs,
5884 * __unmap_hugepage_range() is called as the lock is already held
5885 */
83cde9e8 5886 i_mmap_lock_write(mapping);
6b2dbba8 5887 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
04f2cbe3
MG
5888 /* Do not unmap the current VMA */
5889 if (iter_vma == vma)
5890 continue;
5891
2f84a899
MG
5892 /*
5893 * Shared VMAs have their own reserves and do not affect
5894 * MAP_PRIVATE accounting but it is possible that a shared
5895 * VMA is using the same page so check and skip such VMAs.
5896 */
5897 if (iter_vma->vm_flags & VM_MAYSHARE)
5898 continue;
5899
04f2cbe3
MG
5900 /*
5901 * Unmap the page from other VMAs without their own reserves.
5902 * They get marked to be SIGKILLed if they fault in these
5903 * areas. This is because a future no-page fault on this VMA
5904 * could insert a zeroed page instead of the data existing
5905 * from the time of fork. This would look like data corruption
5906 */
5907 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
24669e58 5908 unmap_hugepage_range(iter_vma, address,
05e90bd0 5909 address + huge_page_size(h), page, 0);
04f2cbe3 5910 }
83cde9e8 5911 i_mmap_unlock_write(mapping);
04f2cbe3
MG
5912}
5913
0fe6e20b 5914/*
c89357e2 5915 * hugetlb_wp() should be called with page lock of the original hugepage held.
aa6d2e8c 5916 * Called with hugetlb_fault_mutex_table held and pte_page locked so we
ef009b25
MH
5917 * cannot race with other handlers or page migration.
5918 * Keep the pte_same checks anyway to make transition from the mutex easier.
0fe6e20b 5919 */
bd722058 5920static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
9acad7ba 5921 struct vm_fault *vmf)
1e8f889b 5922{
bd722058
VMO
5923 struct vm_area_struct *vma = vmf->vma;
5924 struct mm_struct *mm = vma->vm_mm;
5925 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5926 pte_t pte = huge_ptep_get(vmf->pte);
a5516438 5927 struct hstate *h = hstate_vma(vma);
959a78b6 5928 struct folio *old_folio;
d0ce0e47 5929 struct folio *new_folio;
2b740303
SJ
5930 int outside_reserve = 0;
5931 vm_fault_t ret = 0;
ac46d4f3 5932 struct mmu_notifier_range range;
1e8f889b 5933
60d5b473
PX
5934 /*
5935 * Never handle CoW for uffd-wp protected pages. It should be only
5936 * handled when the uffd-wp protection is removed.
5937 *
5938 * Note that only the CoW optimization path (in hugetlb_no_page())
5939 * can trigger this, because hugetlb_fault() will always resolve
5940 * uffd-wp bit first.
5941 */
5942 if (!unshare && huge_pte_uffd_wp(pte))
5943 return 0;
5944
1d8d1464
DH
5945 /*
5946 * hugetlb does not support FOLL_FORCE-style write faults that keep the
5947 * PTE mapped R/O such as maybe_mkwrite() would do.
5948 */
5949 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
5950 return VM_FAULT_SIGSEGV;
5951
5952 /* Let's take out MAP_SHARED mappings first. */
5953 if (vma->vm_flags & VM_MAYSHARE) {
bd722058 5954 set_huge_ptep_writable(vma, vmf->address, vmf->pte);
1d8d1464
DH
5955 return 0;
5956 }
5957
959a78b6 5958 old_folio = page_folio(pte_page(pte));
1e8f889b 5959
662ce1dc
YY
5960 delayacct_wpcopy_start();
5961
04f2cbe3 5962retry_avoidcopy:
c89357e2
DH
5963 /*
5964 * If no-one else is actually using this page, we're the exclusive
5965 * owner and can reuse this page.
5966 */
959a78b6 5967 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5ca43289 5968 if (!PageAnonExclusive(&old_folio->page)) {
06968625 5969 folio_move_anon_rmap(old_folio, vma);
5ca43289
DH
5970 SetPageAnonExclusive(&old_folio->page);
5971 }
c89357e2 5972 if (likely(!unshare))
bd722058 5973 set_huge_ptep_writable(vma, vmf->address, vmf->pte);
662ce1dc
YY
5974
5975 delayacct_wpcopy_end();
83c54070 5976 return 0;
1e8f889b 5977 }
959a78b6
Z
5978 VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5979 PageAnonExclusive(&old_folio->page), &old_folio->page);
1e8f889b 5980
04f2cbe3
MG
5981 /*
5982 * If the process that created a MAP_PRIVATE mapping is about to
5983 * perform a COW due to a shared page count, attempt to satisfy
5984 * the allocation without using the existing reserves. The pagecache
5985 * page is used to determine if the reserve at this address was
5986 * consumed or not. If reserves were used, a partial faulted mapping
5987 * at the time of fork() could consume its reserves on COW instead
5988 * of the full address range.
5989 */
5944d011 5990 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
959a78b6 5991 old_folio != pagecache_folio)
04f2cbe3
MG
5992 outside_reserve = 1;
5993
959a78b6 5994 folio_get(old_folio);
b76c8cfb 5995
ad4404a2
DB
5996 /*
5997 * Drop page table lock as buddy allocator may be called. It will
5998 * be acquired again before returning to the caller, as expected.
5999 */
bd722058
VMO
6000 spin_unlock(vmf->ptl);
6001 new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve);
1e8f889b 6002
d0ce0e47 6003 if (IS_ERR(new_folio)) {
04f2cbe3
MG
6004 /*
6005 * If a process owning a MAP_PRIVATE mapping fails to COW,
6006 * it is due to references held by a child and an insufficient
6007 * huge page pool. To guarantee the original mappers
6008 * reliability, unmap the page from child processes. The child
6009 * may get SIGKILLed if it later faults.
6010 */
6011 if (outside_reserve) {
40549ba8
MK
6012 struct address_space *mapping = vma->vm_file->f_mapping;
6013 pgoff_t idx;
6014 u32 hash;
6015
959a78b6 6016 folio_put(old_folio);
40549ba8
MK
6017 /*
6018 * Drop hugetlb_fault_mutex and vma_lock before
6019 * unmapping. unmapping needs to hold vma_lock
6020 * in write mode. Dropping vma_lock in read mode
6021 * here is OK as COW mappings do not interact with
6022 * PMD sharing.
6023 *
6024 * Reacquire both after unmap operation.
6025 */
bd722058 6026 idx = vma_hugecache_offset(h, vma, vmf->address);
40549ba8
MK
6027 hash = hugetlb_fault_mutex_hash(mapping, idx);
6028 hugetlb_vma_unlock_read(vma);
6029 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6030
bd722058
VMO
6031 unmap_ref_private(mm, vma, &old_folio->page,
6032 vmf->address);
40549ba8
MK
6033
6034 mutex_lock(&hugetlb_fault_mutex_table[hash]);
6035 hugetlb_vma_lock_read(vma);
bd722058
VMO
6036 spin_lock(vmf->ptl);
6037 vmf->pte = hugetlb_walk(vma, vmf->address,
6038 huge_page_size(h));
6039 if (likely(vmf->pte &&
6040 pte_same(huge_ptep_get(vmf->pte), pte)))
2f4612af
DB
6041 goto retry_avoidcopy;
6042 /*
6043 * race occurs while re-acquiring page table
6044 * lock, and our job is done.
6045 */
662ce1dc 6046 delayacct_wpcopy_end();
2f4612af 6047 return 0;
04f2cbe3
MG
6048 }
6049
d0ce0e47 6050 ret = vmf_error(PTR_ERR(new_folio));
ad4404a2 6051 goto out_release_old;
1e8f889b
DG
6052 }
6053
0fe6e20b
NH
6054 /*
6055 * When the original hugepage is shared one, it does not have
6056 * anon_vma prepared.
6057 */
9acad7ba
VMO
6058 ret = vmf_anon_prepare(vmf);
6059 if (unlikely(ret))
ad4404a2 6060 goto out_release_all;
0fe6e20b 6061
bd722058 6062 if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) {
1cb9dc4b
LS
6063 ret = VM_FAULT_HWPOISON_LARGE;
6064 goto out_release_all;
6065 }
d0ce0e47 6066 __folio_mark_uptodate(new_folio);
1e8f889b 6067
bd722058
VMO
6068 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
6069 vmf->address + huge_page_size(h));
ac46d4f3 6070 mmu_notifier_invalidate_range_start(&range);
ad4404a2 6071
b76c8cfb 6072 /*
cb900f41 6073 * Retake the page table lock to check for racing updates
b76c8cfb
LW
6074 * before the page tables are altered
6075 */
bd722058
VMO
6076 spin_lock(vmf->ptl);
6077 vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
6078 if (likely(vmf->pte && pte_same(huge_ptep_get(vmf->pte), pte))) {
0f230bc2
PX
6079 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
6080
c89357e2 6081 /* Break COW or unshare */
bd722058 6082 huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
e135826b 6083 hugetlb_remove_rmap(old_folio);
bd722058 6084 hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address);
0f230bc2
PX
6085 if (huge_pte_uffd_wp(pte))
6086 newpte = huge_pte_mkuffd_wp(newpte);
bd722058
VMO
6087 set_huge_pte_at(mm, vmf->address, vmf->pte, newpte,
6088 huge_page_size(h));
d0ce0e47 6089 folio_set_hugetlb_migratable(new_folio);
1e8f889b 6090 /* Make the old page be freed below */
959a78b6 6091 new_folio = old_folio;
1e8f889b 6092 }
bd722058 6093 spin_unlock(vmf->ptl);
ac46d4f3 6094 mmu_notifier_invalidate_range_end(&range);
ad4404a2 6095out_release_all:
c89357e2
DH
6096 /*
6097 * No restore in case of successful pagetable update (Break COW or
6098 * unshare)
6099 */
959a78b6 6100 if (new_folio != old_folio)
bd722058 6101 restore_reserve_on_error(h, vma, vmf->address, new_folio);
d0ce0e47 6102 folio_put(new_folio);
ad4404a2 6103out_release_old:
959a78b6 6104 folio_put(old_folio);
8312034f 6105
bd722058 6106 spin_lock(vmf->ptl); /* Caller expects lock to be held */
662ce1dc
YY
6107
6108 delayacct_wpcopy_end();
ad4404a2 6109 return ret;
1e8f889b
DG
6110}
6111
3ae77f43
HD
6112/*
6113 * Return whether there is a pagecache page to back given address within VMA.
3ae77f43 6114 */
24334e78
PX
6115bool hugetlbfs_pagecache_present(struct hstate *h,
6116 struct vm_area_struct *vma, unsigned long address)
2a15efc9 6117{
91a2fb95 6118 struct address_space *mapping = vma->vm_file->f_mapping;
a08c7193 6119 pgoff_t idx = linear_page_index(vma, address);
fd4aed8d 6120 struct folio *folio;
2a15efc9 6121
fd4aed8d
MK
6122 folio = filemap_get_folio(mapping, idx);
6123 if (IS_ERR(folio))
6124 return false;
6125 folio_put(folio);
6126 return true;
2a15efc9
HD
6127}
6128
9b91c0e2 6129int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
ab76ad54
MK
6130 pgoff_t idx)
6131{
6132 struct inode *inode = mapping->host;
6133 struct hstate *h = hstate_inode(inode);
d9ef44de 6134 int err;
ab76ad54 6135
a08c7193 6136 idx <<= huge_page_order(h);
d9ef44de
MWO
6137 __folio_set_locked(folio);
6138 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
6139
6140 if (unlikely(err)) {
6141 __folio_clear_locked(folio);
ab76ad54 6142 return err;
d9ef44de 6143 }
9b91c0e2 6144 folio_clear_hugetlb_restore_reserve(folio);
ab76ad54 6145
22146c3c 6146 /*
d9ef44de 6147 * mark folio dirty so that it will not be removed from cache/file
22146c3c
MK
6148 * by non-hugetlbfs specific code paths.
6149 */
d9ef44de 6150 folio_mark_dirty(folio);
22146c3c 6151
ab76ad54
MK
6152 spin_lock(&inode->i_lock);
6153 inode->i_blocks += blocks_per_huge_page(h);
6154 spin_unlock(&inode->i_lock);
6155 return 0;
6156}
6157
7dac0ec8 6158static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
7677f7fd 6159 struct address_space *mapping,
7677f7fd
AR
6160 unsigned long reason)
6161{
7677f7fd 6162 u32 hash;
7677f7fd
AR
6163
6164 /*
958f32ce
LS
6165 * vma_lock and hugetlb_fault_mutex must be dropped before handling
6166 * userfault. Also mmap_lock could be dropped due to handling
6167 * userfault, any vma operation should be careful from here.
7677f7fd 6168 */
7dac0ec8
VMO
6169 hugetlb_vma_unlock_read(vmf->vma);
6170 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
7677f7fd 6171 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
7dac0ec8 6172 return handle_userfault(vmf, reason);
7677f7fd
AR
6173}
6174
2ea7ff1e
PX
6175/*
6176 * Recheck pte with pgtable lock. Returns true if pte didn't change, or
6177 * false if pte changed or is changing.
6178 */
6179static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
6180 pte_t *ptep, pte_t old_pte)
6181{
6182 spinlock_t *ptl;
6183 bool same;
6184
6185 ptl = huge_pte_lock(h, mm, ptep);
6186 same = pte_same(huge_ptep_get(ptep), old_pte);
6187 spin_unlock(ptl);
6188
6189 return same;
6190}
6191
7b6ec181 6192static vm_fault_t hugetlb_no_page(struct address_space *mapping,
7dac0ec8 6193 struct vm_fault *vmf)
ac9b9c66 6194{
7b6ec181
VMO
6195 struct vm_area_struct *vma = vmf->vma;
6196 struct mm_struct *mm = vma->vm_mm;
a5516438 6197 struct hstate *h = hstate_vma(vma);
2b740303 6198 vm_fault_t ret = VM_FAULT_SIGBUS;
409eb8c2 6199 int anon_rmap = 0;
4c887265 6200 unsigned long size;
d0ce0e47 6201 struct folio *folio;
1e8f889b 6202 pte_t new_pte;
d0ce0e47 6203 bool new_folio, new_pagecache_folio = false;
7b6ec181 6204 u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
4c887265 6205
04f2cbe3
MG
6206 /*
6207 * Currently, we are forced to kill the process in the event the
6208 * original mapper has unmapped pages from the child due to a failed
c89357e2
DH
6209 * COW/unsharing. Warn that such a situation has occurred as it may not
6210 * be obvious.
04f2cbe3
MG
6211 */
6212 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
910154d5 6213 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
ffb22af5 6214 current->pid);
958f32ce 6215 goto out;
04f2cbe3
MG
6216 }
6217
4c887265 6218 /*
188a3972
MK
6219 * Use page lock to guard against racing truncation
6220 * before we get page_table_lock.
4c887265 6221 */
d0ce0e47 6222 new_folio = false;
7b6ec181 6223 folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
66dabbb6 6224 if (IS_ERR(folio)) {
188a3972 6225 size = i_size_read(mapping->host) >> huge_page_shift(h);
7b6ec181 6226 if (vmf->pgoff >= size)
188a3972 6227 goto out;
7677f7fd 6228 /* Check for page in userfault range */
2ea7ff1e
PX
6229 if (userfaultfd_missing(vma)) {
6230 /*
6231 * Since hugetlb_no_page() was examining pte
6232 * without pgtable lock, we need to re-test under
6233 * lock because the pte may not be stable and could
6234 * have changed from under us. Try to detect
6235 * either changed or during-changing ptes and retry
6236 * properly when needed.
6237 *
6238 * Note that userfaultfd is actually fine with
6239 * false positives (e.g. caused by pte changed),
6240 * but not wrong logical events (e.g. caused by
6241 * reading a pte during changing). The latter can
6242 * confuse the userspace, so the strictness is very
6243 * much preferred. E.g., MISSING event should
6244 * never happen on the page after UFFDIO_COPY has
6245 * correctly installed the page and returned.
6246 */
7b6ec181 6247 if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
2ea7ff1e
PX
6248 ret = 0;
6249 goto out;
6250 }
6251
7dac0ec8 6252 return hugetlb_handle_userfault(vmf, mapping,
2ea7ff1e
PX
6253 VM_UFFD_MISSING);
6254 }
1a1aad8a 6255
37641efa
VMO
6256 if (!(vma->vm_flags & VM_MAYSHARE)) {
6257 ret = vmf_anon_prepare(vmf);
6258 if (unlikely(ret))
6259 goto out;
6260 }
6261
7b6ec181 6262 folio = alloc_hugetlb_folio(vma, vmf->address, 0);
d0ce0e47 6263 if (IS_ERR(folio)) {
4643d67e
MK
6264 /*
6265 * Returning error will result in faulting task being
6266 * sent SIGBUS. The hugetlb fault mutex prevents two
6267 * tasks from racing to fault in the same page which
6268 * could result in false unable to allocate errors.
6269 * Page migration does not take the fault mutex, but
6270 * does a clear then write of pte's under page table
6271 * lock. Page fault code could race with migration,
6272 * notice the clear pte and try to allocate a page
6273 * here. Before returning error, get ptl and make
6274 * sure there really is no pte entry.
6275 */
7b6ec181 6276 if (hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte))
d0ce0e47 6277 ret = vmf_error(PTR_ERR(folio));
f9bf6c03
PX
6278 else
6279 ret = 0;
6bda666a
CL
6280 goto out;
6281 }
7b6ec181
VMO
6282 clear_huge_page(&folio->page, vmf->real_address,
6283 pages_per_huge_page(h));
d0ce0e47
SK
6284 __folio_mark_uptodate(folio);
6285 new_folio = true;
ac9b9c66 6286
f83a275d 6287 if (vma->vm_flags & VM_MAYSHARE) {
7b6ec181
VMO
6288 int err = hugetlb_add_to_page_cache(folio, mapping,
6289 vmf->pgoff);
6bda666a 6290 if (err) {
3a5497a2
ML
6291 /*
6292 * err can't be -EEXIST which implies someone
6293 * else consumed the reservation since hugetlb
6294 * fault mutex is held when add a hugetlb page
6295 * to the page cache. So it's safe to call
6296 * restore_reserve_on_error() here.
6297 */
7b6ec181
VMO
6298 restore_reserve_on_error(h, vma, vmf->address,
6299 folio);
d0ce0e47 6300 folio_put(folio);
37641efa 6301 ret = VM_FAULT_SIGBUS;
6bda666a
CL
6302 goto out;
6303 }
d0ce0e47 6304 new_pagecache_folio = true;
23be7468 6305 } else {
d0ce0e47 6306 folio_lock(folio);
409eb8c2 6307 anon_rmap = 1;
23be7468 6308 }
0fe6e20b 6309 } else {
998b4382
NH
6310 /*
6311 * If memory error occurs between mmap() and fault, some process
6312 * don't have hwpoisoned swap entry for errored virtual address.
6313 * So we need to block hugepage fault by PG_hwpoison bit check.
6314 */
d0ce0e47 6315 if (unlikely(folio_test_hwpoison(folio))) {
0eb98f15 6316 ret = VM_FAULT_HWPOISON_LARGE |
972dc4de 6317 VM_FAULT_SET_HINDEX(hstate_index(h));
998b4382
NH
6318 goto backout_unlocked;
6319 }
7677f7fd
AR
6320
6321 /* Check for page in userfault range. */
6322 if (userfaultfd_minor(vma)) {
d0ce0e47
SK
6323 folio_unlock(folio);
6324 folio_put(folio);
2ea7ff1e 6325 /* See comment in userfaultfd_missing() block above */
7b6ec181 6326 if (!hugetlb_pte_stable(h, mm, vmf->pte, vmf->orig_pte)) {
2ea7ff1e
PX
6327 ret = 0;
6328 goto out;
6329 }
7dac0ec8 6330 return hugetlb_handle_userfault(vmf, mapping,
2ea7ff1e 6331 VM_UFFD_MINOR);
7677f7fd 6332 }
6bda666a 6333 }
1e8f889b 6334
57303d80
AW
6335 /*
6336 * If we are going to COW a private mapping later, we examine the
6337 * pending reservations for this page now. This will ensure that
6338 * any allocations necessary to record that reservation occur outside
6339 * the spinlock.
6340 */
7b6ec181
VMO
6341 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
6342 if (vma_needs_reservation(h, vma, vmf->address) < 0) {
2b26736c
AW
6343 ret = VM_FAULT_OOM;
6344 goto backout_unlocked;
6345 }
5e911373 6346 /* Just decrements count, does not deallocate */
7b6ec181 6347 vma_end_reservation(h, vma, vmf->address);
5e911373 6348 }
57303d80 6349
7b6ec181 6350 vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
83c54070 6351 ret = 0;
c64e912c 6352 /* If pte changed from under us, retry */
7b6ec181 6353 if (!pte_same(huge_ptep_get(vmf->pte), vmf->orig_pte))
4c887265
AL
6354 goto backout;
6355
4781593d 6356 if (anon_rmap)
7b6ec181 6357 hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
4781593d 6358 else
44887f39 6359 hugetlb_add_file_rmap(folio);
d0ce0e47 6360 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
1e8f889b 6361 && (vma->vm_flags & VM_SHARED)));
c64e912c
PX
6362 /*
6363 * If this pte was previously wr-protected, keep it wr-protected even
6364 * if populated.
6365 */
7b6ec181 6366 if (unlikely(pte_marker_uffd_wp(vmf->orig_pte)))
f1eb1bac 6367 new_pte = huge_pte_mkuffd_wp(new_pte);
7b6ec181 6368 set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h));
1e8f889b 6369
5d317b2b 6370 hugetlb_count_add(pages_per_huge_page(h), mm);
7b6ec181 6371 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
1e8f889b 6372 /* Optimization, do the COW without a second fault */
bd722058 6373 ret = hugetlb_wp(folio, vmf);
1e8f889b
DG
6374 }
6375
7b6ec181 6376 spin_unlock(vmf->ptl);
cb6acd01
MK
6377
6378 /*
d0ce0e47
SK
6379 * Only set hugetlb_migratable in newly allocated pages. Existing pages
6380 * found in the pagecache may not have hugetlb_migratable if they have
8f251a3d 6381 * been isolated for migration.
cb6acd01 6382 */
d0ce0e47
SK
6383 if (new_folio)
6384 folio_set_hugetlb_migratable(folio);
cb6acd01 6385
d0ce0e47 6386 folio_unlock(folio);
4c887265 6387out:
958f32ce
LS
6388 hugetlb_vma_unlock_read(vma);
6389 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
ac9b9c66 6390 return ret;
4c887265
AL
6391
6392backout:
7b6ec181 6393 spin_unlock(vmf->ptl);
2b26736c 6394backout_unlocked:
d0ce0e47 6395 if (new_folio && !new_pagecache_folio)
7b6ec181 6396 restore_reserve_on_error(h, vma, vmf->address, folio);
fa27759a 6397
d0ce0e47
SK
6398 folio_unlock(folio);
6399 folio_put(folio);
4c887265 6400 goto out;
ac9b9c66
HD
6401}
6402
8382d914 6403#ifdef CONFIG_SMP
188b04a7 6404u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
8382d914
DB
6405{
6406 unsigned long key[2];
6407 u32 hash;
6408
1b426bac
MK
6409 key[0] = (unsigned long) mapping;
6410 key[1] = idx;
8382d914 6411
55254636 6412 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
8382d914
DB
6413
6414 return hash & (num_fault_mutexes - 1);
6415}
6416#else
6417/*
6c26d310 6418 * For uniprocessor systems we always use a single mutex, so just
8382d914
DB
6419 * return 0 and avoid the hashing overhead.
6420 */
188b04a7 6421u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
8382d914
DB
6422{
6423 return 0;
6424}
6425#endif
6426
2b740303 6427vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 6428 unsigned long address, unsigned int flags)
86e5216f 6429{
2b740303 6430 vm_fault_t ret;
8382d914 6431 u32 hash;
061e62e8 6432 struct folio *folio = NULL;
371607a3 6433 struct folio *pagecache_folio = NULL;
a5516438 6434 struct hstate *h = hstate_vma(vma);
8382d914 6435 struct address_space *mapping;
0f792cf9 6436 int need_wait_lock = 0;
0ca22723
VMO
6437 struct vm_fault vmf = {
6438 .vma = vma,
9b42fa16 6439 .address = address & huge_page_mask(h),
0ca22723
VMO
6440 .real_address = address,
6441 .flags = flags,
9b42fa16
VMO
6442 .pgoff = vma_hugecache_offset(h, vma,
6443 address & huge_page_mask(h)),
0ca22723
VMO
6444 /* TODO: Track hugetlb faults using vm_fault */
6445
6446 /*
6447 * Some fields may not be initialized, be careful as it may
6448 * be hard to debug if called functions make assumptions
6449 */
6450 };
86e5216f 6451
3935baa9
DG
6452 /*
6453 * Serialize hugepage allocation and instantiation, so that we don't
6454 * get spurious allocation failures if two CPUs race to instantiate
6455 * the same page in the page cache.
6456 */
40549ba8 6457 mapping = vma->vm_file->f_mapping;
0ca22723 6458 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
c672c7f2 6459 mutex_lock(&hugetlb_fault_mutex_table[hash]);
8382d914 6460
40549ba8
MK
6461 /*
6462 * Acquire vma lock before calling huge_pte_alloc and hold
9b42fa16
VMO
6463 * until finished with vmf.pte. This prevents huge_pmd_unshare from
6464 * being called elsewhere and making the vmf.pte no longer valid.
40549ba8
MK
6465 */
6466 hugetlb_vma_lock_read(vma);
9b42fa16
VMO
6467 vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h));
6468 if (!vmf.pte) {
40549ba8
MK
6469 hugetlb_vma_unlock_read(vma);
6470 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6471 return VM_FAULT_OOM;
6472 }
6473
9b42fa16
VMO
6474 vmf.orig_pte = huge_ptep_get(vmf.pte);
6475 if (huge_pte_none_mostly(vmf.orig_pte)) {
6476 if (is_pte_marker(vmf.orig_pte)) {
af19487f 6477 pte_marker marker =
9b42fa16 6478 pte_marker_get(pte_to_swp_entry(vmf.orig_pte));
af19487f
AR
6479
6480 if (marker & PTE_MARKER_POISONED) {
6481 ret = VM_FAULT_HWPOISON_LARGE;
6482 goto out_mutex;
6483 }
6484 }
6485
958f32ce 6486 /*
af19487f
AR
6487 * Other PTE markers should be handled the same way as none PTE.
6488 *
958f32ce
LS
6489 * hugetlb_no_page will drop vma lock and hugetlb fault
6490 * mutex internally, which make us return immediately.
6491 */
7b6ec181 6492 return hugetlb_no_page(mapping, &vmf);
af19487f 6493 }
86e5216f 6494
83c54070 6495 ret = 0;
1e8f889b 6496
0f792cf9 6497 /*
9b42fa16
VMO
6498 * vmf.orig_pte could be a migration/hwpoison vmf.orig_pte at this
6499 * point, so this check prevents the kernel from going below assuming
6500 * that we have an active hugepage in pagecache. This goto expects
6501 * the 2nd page fault, and is_hugetlb_entry_(migration|hwpoisoned)
6502 * check will properly handle it.
0f792cf9 6503 */
9b42fa16
VMO
6504 if (!pte_present(vmf.orig_pte)) {
6505 if (unlikely(is_hugetlb_entry_migration(vmf.orig_pte))) {
fcd48540
PX
6506 /*
6507 * Release the hugetlb fault lock now, but retain
6508 * the vma lock, because it is needed to guard the
6509 * huge_pte_lockptr() later in
6510 * migration_entry_wait_huge(). The vma lock will
6511 * be released there.
6512 */
6513 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
9b42fa16 6514 migration_entry_wait_huge(vma, vmf.pte);
fcd48540 6515 return 0;
9b42fa16 6516 } else if (unlikely(is_hugetlb_entry_hwpoisoned(vmf.orig_pte)))
fcd48540
PX
6517 ret = VM_FAULT_HWPOISON_LARGE |
6518 VM_FAULT_SET_HINDEX(hstate_index(h));
0f792cf9 6519 goto out_mutex;
fcd48540 6520 }
0f792cf9 6521
57303d80 6522 /*
c89357e2
DH
6523 * If we are going to COW/unshare the mapping later, we examine the
6524 * pending reservations for this page now. This will ensure that any
57303d80 6525 * allocations necessary to record that reservation occur outside the
1d8d1464
DH
6526 * spinlock. Also lookup the pagecache page now as it is used to
6527 * determine if a reservation has been consumed.
57303d80 6528 */
c89357e2 6529 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
9b42fa16
VMO
6530 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) {
6531 if (vma_needs_reservation(h, vma, vmf.address) < 0) {
2b26736c 6532 ret = VM_FAULT_OOM;
b4d1d99f 6533 goto out_mutex;
2b26736c 6534 }
5e911373 6535 /* Just decrements count, does not deallocate */
9b42fa16 6536 vma_end_reservation(h, vma, vmf.address);
57303d80 6537
0ca22723
VMO
6538 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping,
6539 vmf.pgoff);
66dabbb6
CH
6540 if (IS_ERR(pagecache_folio))
6541 pagecache_folio = NULL;
57303d80
AW
6542 }
6543
9b42fa16 6544 vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
0f792cf9 6545
c89357e2 6546 /* Check for a racing update before calling hugetlb_wp() */
9b42fa16 6547 if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(vmf.pte))))
0f792cf9
NH
6548 goto out_ptl;
6549
166f3ecc 6550 /* Handle userfault-wp first, before trying to lock more pages */
9b42fa16
VMO
6551 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(vmf.pte)) &&
6552 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
d61ea1cb 6553 if (!userfaultfd_wp_async(vma)) {
9b42fa16 6554 spin_unlock(vmf.ptl);
d61ea1cb
PX
6555 if (pagecache_folio) {
6556 folio_unlock(pagecache_folio);
6557 folio_put(pagecache_folio);
6558 }
6559 hugetlb_vma_unlock_read(vma);
6560 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6561 return handle_userfault(&vmf, VM_UFFD_WP);
166f3ecc 6562 }
d61ea1cb 6563
9b42fa16
VMO
6564 vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte);
6565 set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte,
52526ca7 6566 huge_page_size(hstate_vma(vma)));
d61ea1cb 6567 /* Fallthrough to CoW */
166f3ecc
PX
6568 }
6569
56c9cfb1 6570 /*
9b42fa16 6571 * hugetlb_wp() requires page locks of pte_page(vmf.orig_pte) and
371607a3 6572 * pagecache_folio, so here we need take the former one
061e62e8 6573 * when folio != pagecache_folio or !pagecache_folio.
56c9cfb1 6574 */
9b42fa16 6575 folio = page_folio(pte_page(vmf.orig_pte));
061e62e8
Z
6576 if (folio != pagecache_folio)
6577 if (!folio_trylock(folio)) {
0f792cf9
NH
6578 need_wait_lock = 1;
6579 goto out_ptl;
6580 }
b4d1d99f 6581
061e62e8 6582 folio_get(folio);
b4d1d99f 6583
c89357e2 6584 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
9b42fa16 6585 if (!huge_pte_write(vmf.orig_pte)) {
bd722058 6586 ret = hugetlb_wp(pagecache_folio, &vmf);
0f792cf9 6587 goto out_put_page;
c89357e2 6588 } else if (likely(flags & FAULT_FLAG_WRITE)) {
9b42fa16 6589 vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
b4d1d99f 6590 }
b4d1d99f 6591 }
9b42fa16
VMO
6592 vmf.orig_pte = pte_mkyoung(vmf.orig_pte);
6593 if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte,
788c7df4 6594 flags & FAULT_FLAG_WRITE))
9b42fa16 6595 update_mmu_cache(vma, vmf.address, vmf.pte);
0f792cf9 6596out_put_page:
061e62e8
Z
6597 if (folio != pagecache_folio)
6598 folio_unlock(folio);
6599 folio_put(folio);
cb900f41 6600out_ptl:
9b42fa16 6601 spin_unlock(vmf.ptl);
57303d80 6602
371607a3
SK
6603 if (pagecache_folio) {
6604 folio_unlock(pagecache_folio);
6605 folio_put(pagecache_folio);
57303d80 6606 }
b4d1d99f 6607out_mutex:
40549ba8 6608 hugetlb_vma_unlock_read(vma);
c672c7f2 6609 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
0f792cf9
NH
6610 /*
6611 * Generally it's safe to hold refcount during waiting page lock. But
6612 * here we just wait to defer the next page fault to avoid busy loop and
6613 * the page is not used after unlocked before returning from the current
6614 * page fault. So we are safe from accessing freed page, even if we wait
6615 * here without taking refcount.
6616 */
6617 if (need_wait_lock)
061e62e8 6618 folio_wait_locked(folio);
1e8f889b 6619 return ret;
86e5216f
AL
6620}
6621
714c1891 6622#ifdef CONFIG_USERFAULTFD
72e315f7
HD
6623/*
6624 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6625 */
6626static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6627 struct vm_area_struct *vma, unsigned long address)
6628{
6629 struct mempolicy *mpol;
6630 nodemask_t *nodemask;
6631 struct folio *folio;
6632 gfp_t gfp_mask;
6633 int node;
6634
6635 gfp_mask = htlb_alloc_mask(h);
6636 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
42d0c3fb
BW
6637 /*
6638 * This is used to allocate a temporary hugetlb to hold the copied
6639 * content, which will then be copied again to the final hugetlb
6640 * consuming a reservation. Set the alloc_fallback to false to indicate
6641 * that breaking the per-node hugetlb pool is not allowed in this case.
6642 */
6643 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false);
72e315f7
HD
6644 mpol_cond_put(mpol);
6645
6646 return folio;
6647}
6648
8fb5debc 6649/*
a734991c
AR
6650 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6651 * with modifications for hugetlb pages.
8fb5debc 6652 */
61c50040 6653int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
a734991c
AR
6654 struct vm_area_struct *dst_vma,
6655 unsigned long dst_addr,
6656 unsigned long src_addr,
d9712937 6657 uffd_flags_t flags,
0169fd51 6658 struct folio **foliop)
8fb5debc 6659{
61c50040 6660 struct mm_struct *dst_mm = dst_vma->vm_mm;
d9712937
AR
6661 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6662 bool wp_enabled = (flags & MFILL_ATOMIC_WP);
8cc5fcbb
MA
6663 struct hstate *h = hstate_vma(dst_vma);
6664 struct address_space *mapping = dst_vma->vm_file->f_mapping;
6665 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
1e392147 6666 unsigned long size;
1c9e8def 6667 int vm_shared = dst_vma->vm_flags & VM_SHARED;
8fb5debc
MK
6668 pte_t _dst_pte;
6669 spinlock_t *ptl;
8cc5fcbb 6670 int ret = -ENOMEM;
d0ce0e47 6671 struct folio *folio;
f6191471 6672 int writable;
d0ce0e47 6673 bool folio_in_pagecache = false;
8fb5debc 6674
8a13897f
AR
6675 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6676 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6677
6678 /* Don't overwrite any existing PTEs (even markers) */
6679 if (!huge_pte_none(huge_ptep_get(dst_pte))) {
6680 spin_unlock(ptl);
6681 return -EEXIST;
6682 }
6683
6684 _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
935d4f0c
RR
6685 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
6686 huge_page_size(h));
8a13897f
AR
6687
6688 /* No need to invalidate - it was non-present before */
6689 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6690
6691 spin_unlock(ptl);
6692 return 0;
6693 }
6694
f6191471
AR
6695 if (is_continue) {
6696 ret = -EFAULT;
a08c7193 6697 folio = filemap_lock_hugetlb_folio(h, mapping, idx);
66dabbb6 6698 if (IS_ERR(folio))
f6191471 6699 goto out;
d0ce0e47 6700 folio_in_pagecache = true;
0169fd51
Z
6701 } else if (!*foliop) {
6702 /* If a folio already exists, then it's UFFDIO_COPY for
d84cf06e
MA
6703 * a non-missing case. Return -EEXIST.
6704 */
6705 if (vm_shared &&
6706 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6707 ret = -EEXIST;
6708 goto out;
6709 }
6710
d0ce0e47
SK
6711 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6712 if (IS_ERR(folio)) {
d84cf06e 6713 ret = -ENOMEM;
8fb5debc 6714 goto out;
d84cf06e 6715 }
8fb5debc 6716
e87340ca
Z
6717 ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6718 false);
8fb5debc 6719
c1e8d7c6 6720 /* fallback to copy_from_user outside mmap_lock */
8fb5debc 6721 if (unlikely(ret)) {
9e368259 6722 ret = -ENOENT;
d0ce0e47 6723 /* Free the allocated folio which may have
8cc5fcbb
MA
6724 * consumed a reservation.
6725 */
d2d7bb44 6726 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
d0ce0e47 6727 folio_put(folio);
8cc5fcbb 6728
d0ce0e47 6729 /* Allocate a temporary folio to hold the copied
8cc5fcbb
MA
6730 * contents.
6731 */
d0ce0e47
SK
6732 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6733 if (!folio) {
8cc5fcbb
MA
6734 ret = -ENOMEM;
6735 goto out;
6736 }
0169fd51
Z
6737 *foliop = folio;
6738 /* Set the outparam foliop and return to the caller to
8cc5fcbb 6739 * copy the contents outside the lock. Don't free the
0169fd51 6740 * folio.
8cc5fcbb 6741 */
8fb5debc
MK
6742 goto out;
6743 }
6744 } else {
8cc5fcbb
MA
6745 if (vm_shared &&
6746 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
0169fd51 6747 folio_put(*foliop);
8cc5fcbb 6748 ret = -EEXIST;
0169fd51 6749 *foliop = NULL;
8cc5fcbb
MA
6750 goto out;
6751 }
6752
d0ce0e47
SK
6753 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6754 if (IS_ERR(folio)) {
0169fd51 6755 folio_put(*foliop);
8cc5fcbb 6756 ret = -ENOMEM;
0169fd51 6757 *foliop = NULL;
8cc5fcbb
MA
6758 goto out;
6759 }
1cb9dc4b 6760 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
0169fd51
Z
6761 folio_put(*foliop);
6762 *foliop = NULL;
1cb9dc4b
LS
6763 if (ret) {
6764 folio_put(folio);
8cc5fcbb
MA
6765 goto out;
6766 }
8fb5debc
MK
6767 }
6768
6769 /*
b14d1671
JH
6770 * If we just allocated a new page, we need a memory barrier to ensure
6771 * that preceding stores to the page become visible before the
6772 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate
6773 * is what we need.
6774 *
6775 * In the case where we have not allocated a new page (is_continue),
6776 * the page must already be uptodate. UFFDIO_CONTINUE already includes
6777 * an earlier smp_wmb() to ensure that prior stores will be visible
6778 * before the set_pte_at() write.
8fb5debc 6779 */
b14d1671
JH
6780 if (!is_continue)
6781 __folio_mark_uptodate(folio);
6782 else
6783 WARN_ON_ONCE(!folio_test_uptodate(folio));
8fb5debc 6784
f6191471
AR
6785 /* Add shared, newly allocated pages to the page cache. */
6786 if (vm_shared && !is_continue) {
1e392147
AA
6787 size = i_size_read(mapping->host) >> huge_page_shift(h);
6788 ret = -EFAULT;
6789 if (idx >= size)
6790 goto out_release_nounlock;
1c9e8def 6791
1e392147
AA
6792 /*
6793 * Serialization between remove_inode_hugepages() and
7e1813d4 6794 * hugetlb_add_to_page_cache() below happens through the
1e392147
AA
6795 * hugetlb_fault_mutex_table that here must be hold by
6796 * the caller.
6797 */
9b91c0e2 6798 ret = hugetlb_add_to_page_cache(folio, mapping, idx);
1c9e8def
MK
6799 if (ret)
6800 goto out_release_nounlock;
d0ce0e47 6801 folio_in_pagecache = true;
1c9e8def
MK
6802 }
6803
bcc66543 6804 ptl = huge_pte_lock(h, dst_mm, dst_pte);
8fb5debc 6805
8625147c 6806 ret = -EIO;
d0ce0e47 6807 if (folio_test_hwpoison(folio))
8625147c
JH
6808 goto out_release_unlock;
6809
6041c691
PX
6810 /*
6811 * We allow to overwrite a pte marker: consider when both MISSING|WP
6812 * registered, we firstly wr-protect a none pte which has no page cache
6813 * page backing it, then access the page.
6814 */
fa27759a 6815 ret = -EEXIST;
6041c691 6816 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
8fb5debc
MK
6817 goto out_release_unlock;
6818
d0ce0e47 6819 if (folio_in_pagecache)
44887f39 6820 hugetlb_add_file_rmap(folio);
4781593d 6821 else
9d5fafd5 6822 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
8fb5debc 6823
6041c691
PX
6824 /*
6825 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6826 * with wp flag set, don't set pte write bit.
6827 */
d9712937 6828 if (wp_enabled || (is_continue && !vm_shared))
f6191471
AR
6829 writable = 0;
6830 else
6831 writable = dst_vma->vm_flags & VM_WRITE;
6832
d0ce0e47 6833 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable);
6041c691
PX
6834 /*
6835 * Always mark UFFDIO_COPY page dirty; note that this may not be
6836 * extremely important for hugetlbfs for now since swapping is not
6837 * supported, but we should still be clear in that this page cannot be
6838 * thrown away at will, even if write bit not set.
6839 */
6840 _dst_pte = huge_pte_mkdirty(_dst_pte);
8fb5debc
MK
6841 _dst_pte = pte_mkyoung(_dst_pte);
6842
d9712937 6843 if (wp_enabled)
6041c691
PX
6844 _dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6845
935d4f0c 6846 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
8fb5debc 6847
8fb5debc
MK
6848 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6849
6850 /* No need to invalidate - it was non-present before */
6851 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6852
6853 spin_unlock(ptl);
f6191471 6854 if (!is_continue)
d0ce0e47 6855 folio_set_hugetlb_migratable(folio);
f6191471 6856 if (vm_shared || is_continue)
d0ce0e47 6857 folio_unlock(folio);
8fb5debc
MK
6858 ret = 0;
6859out:
6860 return ret;
6861out_release_unlock:
6862 spin_unlock(ptl);
f6191471 6863 if (vm_shared || is_continue)
d0ce0e47 6864 folio_unlock(folio);
5af10dfd 6865out_release_nounlock:
d0ce0e47 6866 if (!folio_in_pagecache)
d2d7bb44 6867 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
d0ce0e47 6868 folio_put(folio);
8fb5debc
MK
6869 goto out;
6870}
714c1891 6871#endif /* CONFIG_USERFAULTFD */
8fb5debc 6872
a79390f5 6873long hugetlb_change_protection(struct vm_area_struct *vma,
5a90d5a1
PX
6874 unsigned long address, unsigned long end,
6875 pgprot_t newprot, unsigned long cp_flags)
8f860591
ZY
6876{
6877 struct mm_struct *mm = vma->vm_mm;
6878 unsigned long start = address;
6879 pte_t *ptep;
6880 pte_t pte;
a5516438 6881 struct hstate *h = hstate_vma(vma);
a79390f5 6882 long pages = 0, psize = huge_page_size(h);
dff11abe 6883 bool shared_pmd = false;
ac46d4f3 6884 struct mmu_notifier_range range;
e95a9851 6885 unsigned long last_addr_mask;
5a90d5a1
PX
6886 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6887 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
dff11abe
MK
6888
6889 /*
6890 * In the case of shared PMDs, the area to flush could be beyond
ac46d4f3 6891 * start/end. Set range.start/range.end to cover the maximum possible
dff11abe
MK
6892 * range if PMD sharing is possible.
6893 */
7269f999 6894 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
7d4a8be0 6895 0, mm, start, end);
ac46d4f3 6896 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
8f860591
ZY
6897
6898 BUG_ON(address >= end);
ac46d4f3 6899 flush_cache_range(vma, range.start, range.end);
8f860591 6900
ac46d4f3 6901 mmu_notifier_invalidate_range_start(&range);
40549ba8 6902 hugetlb_vma_lock_write(vma);
83cde9e8 6903 i_mmap_lock_write(vma->vm_file->f_mapping);
40549ba8 6904 last_addr_mask = hugetlb_mask_last_page(h);
60dfaad6 6905 for (; address < end; address += psize) {
cb900f41 6906 spinlock_t *ptl;
9c67a207 6907 ptep = hugetlb_walk(vma, address, psize);
e95a9851 6908 if (!ptep) {
fed15f13
PX
6909 if (!uffd_wp) {
6910 address |= last_addr_mask;
6911 continue;
6912 }
6913 /*
6914 * Userfaultfd wr-protect requires pgtable
6915 * pre-allocations to install pte markers.
6916 */
6917 ptep = huge_pte_alloc(mm, vma, address, psize);
d1751118
PX
6918 if (!ptep) {
6919 pages = -ENOMEM;
fed15f13 6920 break;
d1751118 6921 }
e95a9851 6922 }
cb900f41 6923 ptl = huge_pte_lock(h, mm, ptep);
4ddb4d91 6924 if (huge_pmd_unshare(mm, vma, address, ptep)) {
60dfaad6
PX
6925 /*
6926 * When uffd-wp is enabled on the vma, unshare
6927 * shouldn't happen at all. Warn about it if it
6928 * happened due to some reason.
6929 */
6930 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
7da4d641 6931 pages++;
cb900f41 6932 spin_unlock(ptl);
dff11abe 6933 shared_pmd = true;
4ddb4d91 6934 address |= last_addr_mask;
39dde65c 6935 continue;
7da4d641 6936 }
a8bda28d
NH
6937 pte = huge_ptep_get(ptep);
6938 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
0e678153
DH
6939 /* Nothing to do. */
6940 } else if (unlikely(is_hugetlb_entry_migration(pte))) {
a8bda28d 6941 swp_entry_t entry = pte_to_swp_entry(pte);
6c287605 6942 struct page *page = pfn_swap_entry_to_page(entry);
44f86392 6943 pte_t newpte = pte;
a8bda28d 6944
44f86392 6945 if (is_writable_migration_entry(entry)) {
6c287605
DH
6946 if (PageAnon(page))
6947 entry = make_readable_exclusive_migration_entry(
6948 swp_offset(entry));
6949 else
6950 entry = make_readable_migration_entry(
6951 swp_offset(entry));
a8bda28d 6952 newpte = swp_entry_to_pte(entry);
a8bda28d
NH
6953 pages++;
6954 }
44f86392
DH
6955
6956 if (uffd_wp)
6957 newpte = pte_swp_mkuffd_wp(newpte);
6958 else if (uffd_wp_resolve)
6959 newpte = pte_swp_clear_uffd_wp(newpte);
6960 if (!pte_same(pte, newpte))
935d4f0c 6961 set_huge_pte_at(mm, address, ptep, newpte, psize);
0e678153 6962 } else if (unlikely(is_pte_marker(pte))) {
c5977c95
PX
6963 /*
6964 * Do nothing on a poison marker; page is
6965 * corrupted, permissons do not apply. Here
6966 * pte_marker_uffd_wp()==true implies !poison
6967 * because they're mutual exclusive.
6968 */
6969 if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
0e678153 6970 /* Safe to modify directly (non-present->none). */
60dfaad6 6971 huge_pte_clear(mm, address, ptep, psize);
0e678153 6972 } else if (!huge_pte_none(pte)) {
023bdd00 6973 pte_t old_pte;
79c1c594 6974 unsigned int shift = huge_page_shift(hstate_vma(vma));
023bdd00
AK
6975
6976 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
16785bd7 6977 pte = huge_pte_modify(old_pte, newprot);
79c1c594 6978 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
5a90d5a1 6979 if (uffd_wp)
f1eb1bac 6980 pte = huge_pte_mkuffd_wp(pte);
5a90d5a1
PX
6981 else if (uffd_wp_resolve)
6982 pte = huge_pte_clear_uffd_wp(pte);
023bdd00 6983 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
7da4d641 6984 pages++;
60dfaad6
PX
6985 } else {
6986 /* None pte */
6987 if (unlikely(uffd_wp))
6988 /* Safe to modify directly (none->non-present). */
6989 set_huge_pte_at(mm, address, ptep,
935d4f0c
RR
6990 make_pte_marker(PTE_MARKER_UFFD_WP),
6991 psize);
8f860591 6992 }
cb900f41 6993 spin_unlock(ptl);
8f860591 6994 }
d833352a 6995 /*
c8c06efa 6996 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
d833352a 6997 * may have cleared our pud entry and done put_page on the page table:
c8c06efa 6998 * once we release i_mmap_rwsem, another task can do the final put_page
dff11abe
MK
6999 * and that page table be reused and filled with junk. If we actually
7000 * did unshare a page of pmds, flush the range corresponding to the pud.
d833352a 7001 */
dff11abe 7002 if (shared_pmd)
ac46d4f3 7003 flush_hugetlb_tlb_range(vma, range.start, range.end);
dff11abe
MK
7004 else
7005 flush_hugetlb_tlb_range(vma, start, end);
0f10851e 7006 /*
1af5a810
AP
7007 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
7008 * downgrading page table protection not changing it to point to a new
7009 * page.
0f10851e 7010 *
ee65728e 7011 * See Documentation/mm/mmu_notifier.rst
0f10851e 7012 */
83cde9e8 7013 i_mmap_unlock_write(vma->vm_file->f_mapping);
40549ba8 7014 hugetlb_vma_unlock_write(vma);
ac46d4f3 7015 mmu_notifier_invalidate_range_end(&range);
7da4d641 7016
d1751118 7017 return pages > 0 ? (pages << h->order) : pages;
8f860591
ZY
7018}
7019
33b8f84a
MK
7020/* Return true if reservation was successful, false otherwise. */
7021bool hugetlb_reserve_pages(struct inode *inode,
a1e78772 7022 long from, long to,
5a6fe125 7023 struct vm_area_struct *vma,
ca16d140 7024 vm_flags_t vm_flags)
e4e574b7 7025{
c5094ec7 7026 long chg = -1, add = -1;
a5516438 7027 struct hstate *h = hstate_inode(inode);
90481622 7028 struct hugepage_subpool *spool = subpool_inode(inode);
9119a41e 7029 struct resv_map *resv_map;
075a61d0 7030 struct hugetlb_cgroup *h_cg = NULL;
0db9d74e 7031 long gbl_reserve, regions_needed = 0;
e4e574b7 7032
63489f8e
MK
7033 /* This should never happen */
7034 if (from > to) {
7035 VM_WARN(1, "%s called with a negative range\n", __func__);
33b8f84a 7036 return false;
63489f8e
MK
7037 }
7038
8d9bfb26 7039 /*
e700898f
MK
7040 * vma specific semaphore used for pmd sharing and fault/truncation
7041 * synchronization
8d9bfb26
MK
7042 */
7043 hugetlb_vma_lock_alloc(vma);
7044
17c9d12e
MG
7045 /*
7046 * Only apply hugepage reservation if asked. At fault time, an
7047 * attempt will be made for VM_NORESERVE to allocate a page
90481622 7048 * without using reserves
17c9d12e 7049 */
ca16d140 7050 if (vm_flags & VM_NORESERVE)
33b8f84a 7051 return true;
17c9d12e 7052
a1e78772
MG
7053 /*
7054 * Shared mappings base their reservation on the number of pages that
7055 * are already allocated on behalf of the file. Private mappings need
7056 * to reserve the full area even if read-only as mprotect() may be
7057 * called to make the mapping read-write. Assume !vma is a shm mapping
7058 */
9119a41e 7059 if (!vma || vma->vm_flags & VM_MAYSHARE) {
f27a5136
MK
7060 /*
7061 * resv_map can not be NULL as hugetlb_reserve_pages is only
7062 * called for inodes for which resv_maps were created (see
7063 * hugetlbfs_get_inode).
7064 */
4e35f483 7065 resv_map = inode_resv_map(inode);
9119a41e 7066
0db9d74e 7067 chg = region_chg(resv_map, from, to, &regions_needed);
9119a41e 7068 } else {
e9fe92ae 7069 /* Private mapping. */
9119a41e 7070 resv_map = resv_map_alloc();
17c9d12e 7071 if (!resv_map)
8d9bfb26 7072 goto out_err;
17c9d12e 7073
a1e78772 7074 chg = to - from;
84afd99b 7075
17c9d12e
MG
7076 set_vma_resv_map(vma, resv_map);
7077 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
7078 }
7079
33b8f84a 7080 if (chg < 0)
c50ac050 7081 goto out_err;
8a630112 7082
33b8f84a
MK
7083 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
7084 chg * pages_per_huge_page(h), &h_cg) < 0)
075a61d0 7085 goto out_err;
075a61d0
MA
7086
7087 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
7088 /* For private mappings, the hugetlb_cgroup uncharge info hangs
7089 * of the resv_map.
7090 */
7091 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
7092 }
7093
1c5ecae3
MK
7094 /*
7095 * There must be enough pages in the subpool for the mapping. If
7096 * the subpool has a minimum size, there may be some global
7097 * reservations already in place (gbl_reserve).
7098 */
7099 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
33b8f84a 7100 if (gbl_reserve < 0)
075a61d0 7101 goto out_uncharge_cgroup;
5a6fe125
MG
7102
7103 /*
17c9d12e 7104 * Check enough hugepages are available for the reservation.
90481622 7105 * Hand the pages back to the subpool if there are not
5a6fe125 7106 */
33b8f84a 7107 if (hugetlb_acct_memory(h, gbl_reserve) < 0)
075a61d0 7108 goto out_put_pages;
17c9d12e
MG
7109
7110 /*
7111 * Account for the reservations made. Shared mappings record regions
7112 * that have reservations as they are shared by multiple VMAs.
7113 * When the last VMA disappears, the region map says how much
7114 * the reservation was and the page cache tells how much of
7115 * the reservation was consumed. Private mappings are per-VMA and
7116 * only the consumed reservations are tracked. When the VMA
7117 * disappears, the original reservation is the VMA size and the
7118 * consumed reservations are stored in the map. Hence, nothing
7119 * else has to be done for private mappings here
7120 */
33039678 7121 if (!vma || vma->vm_flags & VM_MAYSHARE) {
075a61d0 7122 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
0db9d74e
MA
7123
7124 if (unlikely(add < 0)) {
7125 hugetlb_acct_memory(h, -gbl_reserve);
075a61d0 7126 goto out_put_pages;
0db9d74e 7127 } else if (unlikely(chg > add)) {
33039678
MK
7128 /*
7129 * pages in this range were added to the reserve
7130 * map between region_chg and region_add. This
d0ce0e47 7131 * indicates a race with alloc_hugetlb_folio. Adjust
33039678
MK
7132 * the subpool and reserve counts modified above
7133 * based on the difference.
7134 */
7135 long rsv_adjust;
7136
d85aecf2
ML
7137 /*
7138 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
7139 * reference to h_cg->css. See comment below for detail.
7140 */
075a61d0
MA
7141 hugetlb_cgroup_uncharge_cgroup_rsvd(
7142 hstate_index(h),
7143 (chg - add) * pages_per_huge_page(h), h_cg);
7144
33039678
MK
7145 rsv_adjust = hugepage_subpool_put_pages(spool,
7146 chg - add);
7147 hugetlb_acct_memory(h, -rsv_adjust);
d85aecf2
ML
7148 } else if (h_cg) {
7149 /*
7150 * The file_regions will hold their own reference to
7151 * h_cg->css. So we should release the reference held
7152 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
7153 * done.
7154 */
7155 hugetlb_cgroup_put_rsvd_cgroup(h_cg);
33039678
MK
7156 }
7157 }
33b8f84a
MK
7158 return true;
7159
075a61d0
MA
7160out_put_pages:
7161 /* put back original number of pages, chg */
7162 (void)hugepage_subpool_put_pages(spool, chg);
7163out_uncharge_cgroup:
7164 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
7165 chg * pages_per_huge_page(h), h_cg);
c50ac050 7166out_err:
8d9bfb26 7167 hugetlb_vma_lock_free(vma);
5e911373 7168 if (!vma || vma->vm_flags & VM_MAYSHARE)
0db9d74e
MA
7169 /* Only call region_abort if the region_chg succeeded but the
7170 * region_add failed or didn't run.
7171 */
7172 if (chg >= 0 && add < 0)
7173 region_abort(resv_map, from, to, regions_needed);
92fe9dcb 7174 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
f031dd27 7175 kref_put(&resv_map->refs, resv_map_release);
92fe9dcb
RR
7176 set_vma_resv_map(vma, NULL);
7177 }
33b8f84a 7178 return false;
a43a8c39
CK
7179}
7180
b5cec28d
MK
7181long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
7182 long freed)
a43a8c39 7183{
a5516438 7184 struct hstate *h = hstate_inode(inode);
4e35f483 7185 struct resv_map *resv_map = inode_resv_map(inode);
9119a41e 7186 long chg = 0;
90481622 7187 struct hugepage_subpool *spool = subpool_inode(inode);
1c5ecae3 7188 long gbl_reserve;
45c682a6 7189
f27a5136
MK
7190 /*
7191 * Since this routine can be called in the evict inode path for all
7192 * hugetlbfs inodes, resv_map could be NULL.
7193 */
b5cec28d
MK
7194 if (resv_map) {
7195 chg = region_del(resv_map, start, end);
7196 /*
7197 * region_del() can fail in the rare case where a region
7198 * must be split and another region descriptor can not be
7199 * allocated. If end == LONG_MAX, it will not fail.
7200 */
7201 if (chg < 0)
7202 return chg;
7203 }
7204
45c682a6 7205 spin_lock(&inode->i_lock);
e4c6f8be 7206 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
45c682a6
KC
7207 spin_unlock(&inode->i_lock);
7208
1c5ecae3
MK
7209 /*
7210 * If the subpool has a minimum size, the number of global
7211 * reservations to be released may be adjusted.
dddf31a4
ML
7212 *
7213 * Note that !resv_map implies freed == 0. So (chg - freed)
7214 * won't go negative.
1c5ecae3
MK
7215 */
7216 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
7217 hugetlb_acct_memory(h, -gbl_reserve);
b5cec28d
MK
7218
7219 return 0;
a43a8c39 7220}
93f70f90 7221
3212b535
SC
7222#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7223static unsigned long page_table_shareable(struct vm_area_struct *svma,
7224 struct vm_area_struct *vma,
7225 unsigned long addr, pgoff_t idx)
7226{
7227 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
7228 svma->vm_start;
7229 unsigned long sbase = saddr & PUD_MASK;
7230 unsigned long s_end = sbase + PUD_SIZE;
7231
7232 /* Allow segments to share if only one is marked locked */
e430a95a
SB
7233 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
7234 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
3212b535
SC
7235
7236 /*
7237 * match the virtual addresses, permission and the alignment of the
7238 * page table page.
131a79b4
MK
7239 *
7240 * Also, vma_lock (vm_private_data) is required for sharing.
3212b535
SC
7241 */
7242 if (pmd_index(addr) != pmd_index(saddr) ||
7243 vm_flags != svm_flags ||
131a79b4
MK
7244 !range_in_vma(svma, sbase, s_end) ||
7245 !svma->vm_private_data)
3212b535
SC
7246 return 0;
7247
7248 return saddr;
7249}
7250
bbff39cc 7251bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
3212b535 7252{
bbff39cc
MK
7253 unsigned long start = addr & PUD_MASK;
7254 unsigned long end = start + PUD_SIZE;
7255
8d9bfb26
MK
7256#ifdef CONFIG_USERFAULTFD
7257 if (uffd_disable_huge_pmd_share(vma))
7258 return false;
7259#endif
3212b535
SC
7260 /*
7261 * check on proper vm_flags and page table alignment
7262 */
8d9bfb26
MK
7263 if (!(vma->vm_flags & VM_MAYSHARE))
7264 return false;
bbff39cc 7265 if (!vma->vm_private_data) /* vma lock required for sharing */
8d9bfb26
MK
7266 return false;
7267 if (!range_in_vma(vma, start, end))
7268 return false;
7269 return true;
7270}
7271
017b1660
MK
7272/*
7273 * Determine if start,end range within vma could be mapped by shared pmd.
7274 * If yes, adjust start and end to cover range associated with possible
7275 * shared pmd mappings.
7276 */
7277void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7278 unsigned long *start, unsigned long *end)
7279{
a1ba9da8
LX
7280 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
7281 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
017b1660 7282
a1ba9da8 7283 /*
f0953a1b
IM
7284 * vma needs to span at least one aligned PUD size, and the range
7285 * must be at least partially within in.
a1ba9da8
LX
7286 */
7287 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
7288 (*end <= v_start) || (*start >= v_end))
017b1660
MK
7289 return;
7290
75802ca6 7291 /* Extend the range to be PUD aligned for a worst case scenario */
a1ba9da8
LX
7292 if (*start > v_start)
7293 *start = ALIGN_DOWN(*start, PUD_SIZE);
017b1660 7294
a1ba9da8
LX
7295 if (*end < v_end)
7296 *end = ALIGN(*end, PUD_SIZE);
017b1660
MK
7297}
7298
3212b535
SC
7299/*
7300 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
7301 * and returns the corresponding pte. While this is not necessary for the
7302 * !shared pmd case because we can allocate the pmd later as well, it makes the
3a47c54f
MK
7303 * code much cleaner. pmd allocation is essential for the shared case because
7304 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
7305 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
7306 * bad pmd for sharing.
3212b535 7307 */
aec44e0f
PX
7308pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7309 unsigned long addr, pud_t *pud)
3212b535 7310{
3212b535
SC
7311 struct address_space *mapping = vma->vm_file->f_mapping;
7312 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
7313 vma->vm_pgoff;
7314 struct vm_area_struct *svma;
7315 unsigned long saddr;
7316 pte_t *spte = NULL;
7317 pte_t *pte;
7318
3a47c54f 7319 i_mmap_lock_read(mapping);
3212b535
SC
7320 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
7321 if (svma == vma)
7322 continue;
7323
7324 saddr = page_table_shareable(svma, vma, addr, idx);
7325 if (saddr) {
9c67a207
PX
7326 spte = hugetlb_walk(svma, saddr,
7327 vma_mmu_pagesize(svma));
3212b535
SC
7328 if (spte) {
7329 get_page(virt_to_page(spte));
7330 break;
7331 }
7332 }
7333 }
7334
7335 if (!spte)
7336 goto out;
7337
349d1670 7338 spin_lock(&mm->page_table_lock);
dc6c9a35 7339 if (pud_none(*pud)) {
3212b535
SC
7340 pud_populate(mm, pud,
7341 (pmd_t *)((unsigned long)spte & PAGE_MASK));
c17b1f42 7342 mm_inc_nr_pmds(mm);
dc6c9a35 7343 } else {
3212b535 7344 put_page(virt_to_page(spte));
dc6c9a35 7345 }
349d1670 7346 spin_unlock(&mm->page_table_lock);
3212b535
SC
7347out:
7348 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3a47c54f 7349 i_mmap_unlock_read(mapping);
3212b535
SC
7350 return pte;
7351}
7352
7353/*
7354 * unmap huge page backed by shared pte.
7355 *
7356 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
7357 * indicated by page_count > 1, unmap is achieved by clearing pud and
7358 * decrementing the ref count. If count == 1, the pte page is not shared.
7359 *
3a47c54f 7360 * Called with page table lock held.
3212b535
SC
7361 *
7362 * returns: 1 successfully unmapped a shared pte page
7363 * 0 the underlying pte page is not shared, or it is the last user
7364 */
34ae204f 7365int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
4ddb4d91 7366 unsigned long addr, pte_t *ptep)
3212b535 7367{
4ddb4d91
MK
7368 pgd_t *pgd = pgd_offset(mm, addr);
7369 p4d_t *p4d = p4d_offset(pgd, addr);
7370 pud_t *pud = pud_offset(p4d, addr);
3212b535 7371
34ae204f 7372 i_mmap_assert_write_locked(vma->vm_file->f_mapping);
40549ba8 7373 hugetlb_vma_assert_locked(vma);
3212b535
SC
7374 BUG_ON(page_count(virt_to_page(ptep)) == 0);
7375 if (page_count(virt_to_page(ptep)) == 1)
7376 return 0;
7377
7378 pud_clear(pud);
7379 put_page(virt_to_page(ptep));
dc6c9a35 7380 mm_dec_nr_pmds(mm);
3212b535
SC
7381 return 1;
7382}
c1991e07 7383
9e5fc74c 7384#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
8d9bfb26 7385
aec44e0f
PX
7386pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7387 unsigned long addr, pud_t *pud)
9e5fc74c
SC
7388{
7389 return NULL;
7390}
e81f2d22 7391
34ae204f 7392int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
4ddb4d91 7393 unsigned long addr, pte_t *ptep)
e81f2d22
ZZ
7394{
7395 return 0;
7396}
017b1660
MK
7397
7398void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7399 unsigned long *start, unsigned long *end)
7400{
7401}
c1991e07
PX
7402
7403bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7404{
7405 return false;
7406}
3212b535
SC
7407#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7408
9e5fc74c 7409#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
aec44e0f 7410pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
9e5fc74c
SC
7411 unsigned long addr, unsigned long sz)
7412{
7413 pgd_t *pgd;
c2febafc 7414 p4d_t *p4d;
9e5fc74c
SC
7415 pud_t *pud;
7416 pte_t *pte = NULL;
7417
7418 pgd = pgd_offset(mm, addr);
f4f0a3d8
KS
7419 p4d = p4d_alloc(mm, pgd, addr);
7420 if (!p4d)
7421 return NULL;
c2febafc 7422 pud = pud_alloc(mm, p4d, addr);
9e5fc74c
SC
7423 if (pud) {
7424 if (sz == PUD_SIZE) {
7425 pte = (pte_t *)pud;
7426 } else {
7427 BUG_ON(sz != PMD_SIZE);
c1991e07 7428 if (want_pmd_share(vma, addr) && pud_none(*pud))
aec44e0f 7429 pte = huge_pmd_share(mm, vma, addr, pud);
9e5fc74c
SC
7430 else
7431 pte = (pte_t *)pmd_alloc(mm, pud, addr);
7432 }
7433 }
191fcdb6
JH
7434
7435 if (pte) {
7436 pte_t pteval = ptep_get_lockless(pte);
7437
7438 BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7439 }
9e5fc74c
SC
7440
7441 return pte;
7442}
7443
9b19df29
PA
7444/*
7445 * huge_pte_offset() - Walk the page table to resolve the hugepage
7446 * entry at address @addr
7447 *
8ac0b81a
LX
7448 * Return: Pointer to page table entry (PUD or PMD) for
7449 * address @addr, or NULL if a !p*d_present() entry is encountered and the
9b19df29
PA
7450 * size @sz doesn't match the hugepage size at this level of the page
7451 * table.
7452 */
7868a208
PA
7453pte_t *huge_pte_offset(struct mm_struct *mm,
7454 unsigned long addr, unsigned long sz)
9e5fc74c
SC
7455{
7456 pgd_t *pgd;
c2febafc 7457 p4d_t *p4d;
8ac0b81a
LX
7458 pud_t *pud;
7459 pmd_t *pmd;
9e5fc74c
SC
7460
7461 pgd = pgd_offset(mm, addr);
c2febafc
KS
7462 if (!pgd_present(*pgd))
7463 return NULL;
7464 p4d = p4d_offset(pgd, addr);
7465 if (!p4d_present(*p4d))
7466 return NULL;
9b19df29 7467
c2febafc 7468 pud = pud_offset(p4d, addr);
8ac0b81a
LX
7469 if (sz == PUD_SIZE)
7470 /* must be pud huge, non-present or none */
c2febafc 7471 return (pte_t *)pud;
8ac0b81a 7472 if (!pud_present(*pud))
9b19df29 7473 return NULL;
8ac0b81a 7474 /* must have a valid entry and size to go further */
9b19df29 7475
8ac0b81a
LX
7476 pmd = pmd_offset(pud, addr);
7477 /* must be pmd huge, non-present or none */
7478 return (pte_t *)pmd;
9e5fc74c
SC
7479}
7480
e95a9851
MK
7481/*
7482 * Return a mask that can be used to update an address to the last huge
7483 * page in a page table page mapping size. Used to skip non-present
7484 * page table entries when linearly scanning address ranges. Architectures
7485 * with unique huge page to page table relationships can define their own
7486 * version of this routine.
7487 */
7488unsigned long hugetlb_mask_last_page(struct hstate *h)
7489{
7490 unsigned long hp_size = huge_page_size(h);
7491
7492 if (hp_size == PUD_SIZE)
7493 return P4D_SIZE - PUD_SIZE;
7494 else if (hp_size == PMD_SIZE)
7495 return PUD_SIZE - PMD_SIZE;
7496 else
7497 return 0UL;
7498}
7499
7500#else
7501
7502/* See description above. Architectures can provide their own version. */
7503__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7504{
4ddb4d91
MK
7505#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7506 if (huge_page_size(h) == PMD_SIZE)
7507 return PUD_SIZE - PMD_SIZE;
7508#endif
e95a9851
MK
7509 return 0UL;
7510}
7511
61f77eda
NH
7512#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7513
7514/*
7515 * These functions are overwritable if your architecture needs its own
7516 * behavior.
7517 */
9747b9e9 7518bool isolate_hugetlb(struct folio *folio, struct list_head *list)
31caf665 7519{
9747b9e9 7520 bool ret = true;
bcc54222 7521
db71ef79 7522 spin_lock_irq(&hugetlb_lock);
6aa3a920
SK
7523 if (!folio_test_hugetlb(folio) ||
7524 !folio_test_hugetlb_migratable(folio) ||
7525 !folio_try_get(folio)) {
9747b9e9 7526 ret = false;
bcc54222
NH
7527 goto unlock;
7528 }
6aa3a920
SK
7529 folio_clear_hugetlb_migratable(folio);
7530 list_move_tail(&folio->lru, list);
bcc54222 7531unlock:
db71ef79 7532 spin_unlock_irq(&hugetlb_lock);
bcc54222 7533 return ret;
31caf665
NH
7534}
7535
04bac040 7536int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
25182f05
NH
7537{
7538 int ret = 0;
7539
7540 *hugetlb = false;
7541 spin_lock_irq(&hugetlb_lock);
04bac040 7542 if (folio_test_hugetlb(folio)) {
25182f05 7543 *hugetlb = true;
04bac040 7544 if (folio_test_hugetlb_freed(folio))
b283d983 7545 ret = 0;
04bac040
SK
7546 else if (folio_test_hugetlb_migratable(folio) || unpoison)
7547 ret = folio_try_get(folio);
0ed950d1
NH
7548 else
7549 ret = -EBUSY;
25182f05
NH
7550 }
7551 spin_unlock_irq(&hugetlb_lock);
7552 return ret;
7553}
7554
e591ef7d
NH
7555int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7556 bool *migratable_cleared)
405ce051
NH
7557{
7558 int ret;
7559
7560 spin_lock_irq(&hugetlb_lock);
e591ef7d 7561 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
405ce051
NH
7562 spin_unlock_irq(&hugetlb_lock);
7563 return ret;
7564}
7565
ea8e72f4 7566void folio_putback_active_hugetlb(struct folio *folio)
31caf665 7567{
db71ef79 7568 spin_lock_irq(&hugetlb_lock);
ea8e72f4
SK
7569 folio_set_hugetlb_migratable(folio);
7570 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
db71ef79 7571 spin_unlock_irq(&hugetlb_lock);
ea8e72f4 7572 folio_put(folio);
31caf665 7573}
ab5ac90a 7574
345c62d1 7575void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
ab5ac90a 7576{
345c62d1 7577 struct hstate *h = folio_hstate(old_folio);
ab5ac90a 7578
345c62d1
SK
7579 hugetlb_cgroup_migrate(old_folio, new_folio);
7580 set_page_owner_migrate_reason(&new_folio->page, reason);
ab5ac90a
MH
7581
7582 /*
345c62d1 7583 * transfer temporary state of the new hugetlb folio. This is
ab5ac90a
MH
7584 * reverse to other transitions because the newpage is going to
7585 * be final while the old one will be freed so it takes over
7586 * the temporary status.
7587 *
7588 * Also note that we have to transfer the per-node surplus state
7589 * here as well otherwise the global surplus count will not match
7590 * the per-node's.
7591 */
345c62d1
SK
7592 if (folio_test_hugetlb_temporary(new_folio)) {
7593 int old_nid = folio_nid(old_folio);
7594 int new_nid = folio_nid(new_folio);
7595
345c62d1
SK
7596 folio_set_hugetlb_temporary(old_folio);
7597 folio_clear_hugetlb_temporary(new_folio);
ab5ac90a 7598
ab5ac90a 7599
5af1ab1d
ML
7600 /*
7601 * There is no need to transfer the per-node surplus state
7602 * when we do not cross the node.
7603 */
7604 if (new_nid == old_nid)
7605 return;
db71ef79 7606 spin_lock_irq(&hugetlb_lock);
ab5ac90a
MH
7607 if (h->surplus_huge_pages_node[old_nid]) {
7608 h->surplus_huge_pages_node[old_nid]--;
7609 h->surplus_huge_pages_node[new_nid]++;
7610 }
db71ef79 7611 spin_unlock_irq(&hugetlb_lock);
ab5ac90a
MH
7612 }
7613}
cf11e85f 7614
b30c14cd
JH
7615static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7616 unsigned long start,
7617 unsigned long end)
6dfeaff9
PX
7618{
7619 struct hstate *h = hstate_vma(vma);
7620 unsigned long sz = huge_page_size(h);
7621 struct mm_struct *mm = vma->vm_mm;
7622 struct mmu_notifier_range range;
b30c14cd 7623 unsigned long address;
6dfeaff9
PX
7624 spinlock_t *ptl;
7625 pte_t *ptep;
7626
7627 if (!(vma->vm_flags & VM_MAYSHARE))
7628 return;
7629
6dfeaff9
PX
7630 if (start >= end)
7631 return;
7632
9c8bbfac 7633 flush_cache_range(vma, start, end);
6dfeaff9
PX
7634 /*
7635 * No need to call adjust_range_if_pmd_sharing_possible(), because
7636 * we have already done the PUD_SIZE alignment.
7637 */
7d4a8be0 7638 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
6dfeaff9
PX
7639 start, end);
7640 mmu_notifier_invalidate_range_start(&range);
40549ba8 7641 hugetlb_vma_lock_write(vma);
6dfeaff9
PX
7642 i_mmap_lock_write(vma->vm_file->f_mapping);
7643 for (address = start; address < end; address += PUD_SIZE) {
9c67a207 7644 ptep = hugetlb_walk(vma, address, sz);
6dfeaff9
PX
7645 if (!ptep)
7646 continue;
7647 ptl = huge_pte_lock(h, mm, ptep);
4ddb4d91 7648 huge_pmd_unshare(mm, vma, address, ptep);
6dfeaff9
PX
7649 spin_unlock(ptl);
7650 }
7651 flush_hugetlb_tlb_range(vma, start, end);
7652 i_mmap_unlock_write(vma->vm_file->f_mapping);
40549ba8 7653 hugetlb_vma_unlock_write(vma);
6dfeaff9 7654 /*
1af5a810 7655 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
ee65728e 7656 * Documentation/mm/mmu_notifier.rst.
6dfeaff9
PX
7657 */
7658 mmu_notifier_invalidate_range_end(&range);
7659}
7660
b30c14cd
JH
7661/*
7662 * This function will unconditionally remove all the shared pmd pgtable entries
7663 * within the specific vma for a hugetlbfs memory range.
7664 */
7665void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7666{
7667 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7668 ALIGN_DOWN(vma->vm_end, PUD_SIZE));
7669}
7670
cf11e85f 7671#ifdef CONFIG_CMA
cf11e85f
RG
7672static bool cma_reserve_called __initdata;
7673
7674static int __init cmdline_parse_hugetlb_cma(char *p)
7675{
38e719ab
BW
7676 int nid, count = 0;
7677 unsigned long tmp;
7678 char *s = p;
7679
7680 while (*s) {
7681 if (sscanf(s, "%lu%n", &tmp, &count) != 1)
7682 break;
7683
7684 if (s[count] == ':') {
f9317f77 7685 if (tmp >= MAX_NUMNODES)
38e719ab 7686 break;
f9317f77 7687 nid = array_index_nospec(tmp, MAX_NUMNODES);
38e719ab
BW
7688
7689 s += count + 1;
7690 tmp = memparse(s, &s);
7691 hugetlb_cma_size_in_node[nid] = tmp;
7692 hugetlb_cma_size += tmp;
7693
7694 /*
7695 * Skip the separator if have one, otherwise
7696 * break the parsing.
7697 */
7698 if (*s == ',')
7699 s++;
7700 else
7701 break;
7702 } else {
7703 hugetlb_cma_size = memparse(p, &p);
7704 break;
7705 }
7706 }
7707
cf11e85f
RG
7708 return 0;
7709}
7710
7711early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7712
7713void __init hugetlb_cma_reserve(int order)
7714{
7715 unsigned long size, reserved, per_node;
38e719ab 7716 bool node_specific_cma_alloc = false;
cf11e85f
RG
7717 int nid;
7718
ce70cfb1
AK
7719 /*
7720 * HugeTLB CMA reservation is required for gigantic
7721 * huge pages which could not be allocated via the
7722 * page allocator. Just warn if there is any change
7723 * breaking this assumption.
7724 */
7725 VM_WARN_ON(order <= MAX_PAGE_ORDER);
cf11e85f
RG
7726 cma_reserve_called = true;
7727
38e719ab
BW
7728 if (!hugetlb_cma_size)
7729 return;
7730
7731 for (nid = 0; nid < MAX_NUMNODES; nid++) {
7732 if (hugetlb_cma_size_in_node[nid] == 0)
7733 continue;
7734
30a51400 7735 if (!node_online(nid)) {
38e719ab
BW
7736 pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7737 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7738 hugetlb_cma_size_in_node[nid] = 0;
7739 continue;
7740 }
7741
7742 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
7743 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7744 nid, (PAGE_SIZE << order) / SZ_1M);
7745 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7746 hugetlb_cma_size_in_node[nid] = 0;
7747 } else {
7748 node_specific_cma_alloc = true;
7749 }
7750 }
7751
7752 /* Validate the CMA size again in case some invalid nodes specified. */
cf11e85f
RG
7753 if (!hugetlb_cma_size)
7754 return;
7755
7756 if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7757 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7758 (PAGE_SIZE << order) / SZ_1M);
a01f4390 7759 hugetlb_cma_size = 0;
cf11e85f
RG
7760 return;
7761 }
7762
38e719ab
BW
7763 if (!node_specific_cma_alloc) {
7764 /*
7765 * If 3 GB area is requested on a machine with 4 numa nodes,
7766 * let's allocate 1 GB on first three nodes and ignore the last one.
7767 */
7768 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7769 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7770 hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
7771 }
cf11e85f
RG
7772
7773 reserved = 0;
30a51400 7774 for_each_online_node(nid) {
cf11e85f 7775 int res;
2281f797 7776 char name[CMA_MAX_NAME];
cf11e85f 7777
38e719ab
BW
7778 if (node_specific_cma_alloc) {
7779 if (hugetlb_cma_size_in_node[nid] == 0)
7780 continue;
7781
7782 size = hugetlb_cma_size_in_node[nid];
7783 } else {
7784 size = min(per_node, hugetlb_cma_size - reserved);
7785 }
7786
cf11e85f
RG
7787 size = round_up(size, PAGE_SIZE << order);
7788
2281f797 7789 snprintf(name, sizeof(name), "hugetlb%d", nid);
a01f4390
MK
7790 /*
7791 * Note that 'order per bit' is based on smallest size that
7792 * may be returned to CMA allocator in the case of
7793 * huge page demotion.
7794 */
7795 res = cma_declare_contiguous_nid(0, size, 0,
55d134a7
FL
7796 PAGE_SIZE << HUGETLB_PAGE_ORDER,
7797 HUGETLB_PAGE_ORDER, false, name,
7798 &hugetlb_cma[nid], nid);
cf11e85f
RG
7799 if (res) {
7800 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7801 res, nid);
7802 continue;
7803 }
7804
7805 reserved += size;
7806 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7807 size / SZ_1M, nid);
7808
7809 if (reserved >= hugetlb_cma_size)
7810 break;
7811 }
a01f4390
MK
7812
7813 if (!reserved)
7814 /*
7815 * hugetlb_cma_size is used to determine if allocations from
7816 * cma are possible. Set to zero if no cma regions are set up.
7817 */
7818 hugetlb_cma_size = 0;
cf11e85f
RG
7819}
7820
263b8998 7821static void __init hugetlb_cma_check(void)
cf11e85f
RG
7822{
7823 if (!hugetlb_cma_size || cma_reserve_called)
7824 return;
7825
7826 pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7827}
7828
7829#endif /* CONFIG_CMA */