selftests: mm: fix unused and uninitialized variable warning
[linux-2.6-block.git] / mm / hugetlb.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Generic hugetlb support.
6d49e352 4 * (C) Nadia Yvette Chambers, April 2004
1da177e4 5 */
1da177e4
LT
6#include <linux/list.h>
7#include <linux/init.h>
1da177e4 8#include <linux/mm.h>
e1759c21 9#include <linux/seq_file.h>
1da177e4
LT
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
cddb8a5c 12#include <linux/mmu_notifier.h>
1da177e4 13#include <linux/nodemask.h>
63551ae0 14#include <linux/pagemap.h>
5da7ca86 15#include <linux/mempolicy.h>
3b32123d 16#include <linux/compiler.h>
aea47ff3 17#include <linux/cpuset.h>
3935baa9 18#include <linux/mutex.h>
97ad1087 19#include <linux/memblock.h>
a3437870 20#include <linux/sysfs.h>
5a0e3ad6 21#include <linux/slab.h>
bbe88753 22#include <linux/sched/mm.h>
63489f8e 23#include <linux/mmdebug.h>
174cd4b1 24#include <linux/sched/signal.h>
0fe6e20b 25#include <linux/rmap.h>
c6247f72 26#include <linux/string_helpers.h>
fd6a03ed
NH
27#include <linux/swap.h>
28#include <linux/swapops.h>
8382d914 29#include <linux/jhash.h>
98fa15f3 30#include <linux/numa.h>
c77c0a8a 31#include <linux/llist.h>
cf11e85f 32#include <linux/cma.h>
8cc5fcbb 33#include <linux/migrate.h>
f9317f77 34#include <linux/nospec.h>
662ce1dc 35#include <linux/delayacct.h>
b958d4d0 36#include <linux/memory.h>
af19487f 37#include <linux/mm_inline.h>
c6c21c31 38#include <linux/padata.h>
d6606683 39
63551ae0 40#include <asm/page.h>
ca15ca40 41#include <asm/pgalloc.h>
24669e58 42#include <asm/tlb.h>
63551ae0 43
24669e58 44#include <linux/io.h>
63551ae0 45#include <linux/hugetlb.h>
9dd540e2 46#include <linux/hugetlb_cgroup.h>
9a305230 47#include <linux/node.h>
ab5ac90a 48#include <linux/page_owner.h>
7835e98b 49#include "internal.h"
f41f2ed4 50#include "hugetlb_vmemmap.h"
1da177e4 51
c3f38a38 52int hugetlb_max_hstate __read_mostly;
e5ff2159
AK
53unsigned int default_hstate_idx;
54struct hstate hstates[HUGE_MAX_HSTATE];
cf11e85f 55
dbda8fea 56#ifdef CONFIG_CMA
cf11e85f 57static struct cma *hugetlb_cma[MAX_NUMNODES];
38e719ab 58static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
2f6c57d6 59static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
a01f4390 60{
2f6c57d6 61 return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
a01f4390
MK
62 1 << order);
63}
64#else
2f6c57d6 65static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
a01f4390
MK
66{
67 return false;
68}
dbda8fea
BS
69#endif
70static unsigned long hugetlb_cma_size __initdata;
cf11e85f 71
b78b27d0 72__initdata struct list_head huge_boot_pages[MAX_NUMNODES];
53ba51d2 73
e5ff2159
AK
74/* for command line parsing */
75static struct hstate * __initdata parsed_hstate;
76static unsigned long __initdata default_hstate_max_huge_pages;
9fee021d 77static bool __initdata parsed_valid_hugepagesz = true;
282f4214 78static bool __initdata parsed_default_hugepagesz;
b5389086 79static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
e5ff2159 80
3935baa9 81/*
31caf665
NH
82 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
83 * free_huge_pages, and surplus_huge_pages.
3935baa9 84 */
c3f38a38 85DEFINE_SPINLOCK(hugetlb_lock);
0bd0f9fb 86
8382d914
DB
87/*
88 * Serializes faults on the same logical page. This is used to
89 * prevent spurious OOMs when the hugepage pool is fully utilized.
90 */
91static int num_fault_mutexes;
c672c7f2 92struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
8382d914 93
7ca02d0a
MK
94/* Forward declaration */
95static int hugetlb_acct_memory(struct hstate *h, long delta);
8d9bfb26
MK
96static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
97static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
ecfbd733 98static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
b30c14cd
JH
99static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
100 unsigned long start, unsigned long end);
bf491692 101static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
7ca02d0a 102
1d88433b 103static inline bool subpool_is_free(struct hugepage_subpool *spool)
90481622 104{
1d88433b
ML
105 if (spool->count)
106 return false;
107 if (spool->max_hpages != -1)
108 return spool->used_hpages == 0;
109 if (spool->min_hpages != -1)
110 return spool->rsv_hpages == spool->min_hpages;
111
112 return true;
113}
90481622 114
db71ef79
MK
115static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
116 unsigned long irq_flags)
1d88433b 117{
db71ef79 118 spin_unlock_irqrestore(&spool->lock, irq_flags);
90481622
DG
119
120 /* If no pages are used, and no other handles to the subpool
7c8de358 121 * remain, give up any reservations based on minimum size and
7ca02d0a 122 * free the subpool */
1d88433b 123 if (subpool_is_free(spool)) {
7ca02d0a
MK
124 if (spool->min_hpages != -1)
125 hugetlb_acct_memory(spool->hstate,
126 -spool->min_hpages);
90481622 127 kfree(spool);
7ca02d0a 128 }
90481622
DG
129}
130
7ca02d0a
MK
131struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
132 long min_hpages)
90481622
DG
133{
134 struct hugepage_subpool *spool;
135
c6a91820 136 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
90481622
DG
137 if (!spool)
138 return NULL;
139
140 spin_lock_init(&spool->lock);
141 spool->count = 1;
7ca02d0a
MK
142 spool->max_hpages = max_hpages;
143 spool->hstate = h;
144 spool->min_hpages = min_hpages;
145
146 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
147 kfree(spool);
148 return NULL;
149 }
150 spool->rsv_hpages = min_hpages;
90481622
DG
151
152 return spool;
153}
154
155void hugepage_put_subpool(struct hugepage_subpool *spool)
156{
db71ef79
MK
157 unsigned long flags;
158
159 spin_lock_irqsave(&spool->lock, flags);
90481622
DG
160 BUG_ON(!spool->count);
161 spool->count--;
db71ef79 162 unlock_or_release_subpool(spool, flags);
90481622
DG
163}
164
1c5ecae3
MK
165/*
166 * Subpool accounting for allocating and reserving pages.
167 * Return -ENOMEM if there are not enough resources to satisfy the
9e7ee400 168 * request. Otherwise, return the number of pages by which the
1c5ecae3
MK
169 * global pools must be adjusted (upward). The returned value may
170 * only be different than the passed value (delta) in the case where
7c8de358 171 * a subpool minimum size must be maintained.
1c5ecae3
MK
172 */
173static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
90481622
DG
174 long delta)
175{
1c5ecae3 176 long ret = delta;
90481622
DG
177
178 if (!spool)
1c5ecae3 179 return ret;
90481622 180
db71ef79 181 spin_lock_irq(&spool->lock);
1c5ecae3
MK
182
183 if (spool->max_hpages != -1) { /* maximum size accounting */
184 if ((spool->used_hpages + delta) <= spool->max_hpages)
185 spool->used_hpages += delta;
186 else {
187 ret = -ENOMEM;
188 goto unlock_ret;
189 }
90481622 190 }
90481622 191
09a95e29
MK
192 /* minimum size accounting */
193 if (spool->min_hpages != -1 && spool->rsv_hpages) {
1c5ecae3
MK
194 if (delta > spool->rsv_hpages) {
195 /*
196 * Asking for more reserves than those already taken on
197 * behalf of subpool. Return difference.
198 */
199 ret = delta - spool->rsv_hpages;
200 spool->rsv_hpages = 0;
201 } else {
202 ret = 0; /* reserves already accounted for */
203 spool->rsv_hpages -= delta;
204 }
205 }
206
207unlock_ret:
db71ef79 208 spin_unlock_irq(&spool->lock);
90481622
DG
209 return ret;
210}
211
1c5ecae3
MK
212/*
213 * Subpool accounting for freeing and unreserving pages.
214 * Return the number of global page reservations that must be dropped.
215 * The return value may only be different than the passed value (delta)
216 * in the case where a subpool minimum size must be maintained.
217 */
218static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
90481622
DG
219 long delta)
220{
1c5ecae3 221 long ret = delta;
db71ef79 222 unsigned long flags;
1c5ecae3 223
90481622 224 if (!spool)
1c5ecae3 225 return delta;
90481622 226
db71ef79 227 spin_lock_irqsave(&spool->lock, flags);
1c5ecae3
MK
228
229 if (spool->max_hpages != -1) /* maximum size accounting */
230 spool->used_hpages -= delta;
231
09a95e29
MK
232 /* minimum size accounting */
233 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
1c5ecae3
MK
234 if (spool->rsv_hpages + delta <= spool->min_hpages)
235 ret = 0;
236 else
237 ret = spool->rsv_hpages + delta - spool->min_hpages;
238
239 spool->rsv_hpages += delta;
240 if (spool->rsv_hpages > spool->min_hpages)
241 spool->rsv_hpages = spool->min_hpages;
242 }
243
244 /*
245 * If hugetlbfs_put_super couldn't free spool due to an outstanding
246 * quota reference, free it now.
247 */
db71ef79 248 unlock_or_release_subpool(spool, flags);
1c5ecae3
MK
249
250 return ret;
90481622
DG
251}
252
253static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
254{
255 return HUGETLBFS_SB(inode->i_sb)->spool;
256}
257
258static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
259{
496ad9aa 260 return subpool_inode(file_inode(vma->vm_file));
90481622
DG
261}
262
e700898f
MK
263/*
264 * hugetlb vma_lock helper routines
265 */
e700898f
MK
266void hugetlb_vma_lock_read(struct vm_area_struct *vma)
267{
268 if (__vma_shareable_lock(vma)) {
269 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
270
271 down_read(&vma_lock->rw_sema);
bf491692
RR
272 } else if (__vma_private_lock(vma)) {
273 struct resv_map *resv_map = vma_resv_map(vma);
274
275 down_read(&resv_map->rw_sema);
e700898f
MK
276 }
277}
278
279void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
280{
281 if (__vma_shareable_lock(vma)) {
282 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
283
284 up_read(&vma_lock->rw_sema);
bf491692
RR
285 } else if (__vma_private_lock(vma)) {
286 struct resv_map *resv_map = vma_resv_map(vma);
287
288 up_read(&resv_map->rw_sema);
e700898f
MK
289 }
290}
291
292void hugetlb_vma_lock_write(struct vm_area_struct *vma)
293{
294 if (__vma_shareable_lock(vma)) {
295 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
296
297 down_write(&vma_lock->rw_sema);
bf491692
RR
298 } else if (__vma_private_lock(vma)) {
299 struct resv_map *resv_map = vma_resv_map(vma);
300
301 down_write(&resv_map->rw_sema);
e700898f
MK
302 }
303}
304
305void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
306{
307 if (__vma_shareable_lock(vma)) {
308 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
309
310 up_write(&vma_lock->rw_sema);
bf491692
RR
311 } else if (__vma_private_lock(vma)) {
312 struct resv_map *resv_map = vma_resv_map(vma);
313
314 up_write(&resv_map->rw_sema);
e700898f
MK
315 }
316}
317
318int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
319{
e700898f 320
bf491692
RR
321 if (__vma_shareable_lock(vma)) {
322 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
e700898f 323
bf491692
RR
324 return down_write_trylock(&vma_lock->rw_sema);
325 } else if (__vma_private_lock(vma)) {
326 struct resv_map *resv_map = vma_resv_map(vma);
327
328 return down_write_trylock(&resv_map->rw_sema);
329 }
330
331 return 1;
e700898f
MK
332}
333
334void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
335{
336 if (__vma_shareable_lock(vma)) {
337 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
338
339 lockdep_assert_held(&vma_lock->rw_sema);
bf491692
RR
340 } else if (__vma_private_lock(vma)) {
341 struct resv_map *resv_map = vma_resv_map(vma);
342
343 lockdep_assert_held(&resv_map->rw_sema);
e700898f
MK
344 }
345}
346
347void hugetlb_vma_lock_release(struct kref *kref)
348{
349 struct hugetlb_vma_lock *vma_lock = container_of(kref,
350 struct hugetlb_vma_lock, refs);
351
352 kfree(vma_lock);
353}
354
355static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
356{
357 struct vm_area_struct *vma = vma_lock->vma;
358
359 /*
360 * vma_lock structure may or not be released as a result of put,
361 * it certainly will no longer be attached to vma so clear pointer.
362 * Semaphore synchronizes access to vma_lock->vma field.
363 */
364 vma_lock->vma = NULL;
365 vma->vm_private_data = NULL;
366 up_write(&vma_lock->rw_sema);
367 kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
368}
369
370static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
371{
372 if (__vma_shareable_lock(vma)) {
373 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
374
375 __hugetlb_vma_unlock_write_put(vma_lock);
bf491692
RR
376 } else if (__vma_private_lock(vma)) {
377 struct resv_map *resv_map = vma_resv_map(vma);
378
379 /* no free for anon vmas, but still need to unlock */
380 up_write(&resv_map->rw_sema);
e700898f
MK
381 }
382}
383
384static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
385{
386 /*
387 * Only present in sharable vmas.
388 */
389 if (!vma || !__vma_shareable_lock(vma))
390 return;
391
392 if (vma->vm_private_data) {
393 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
394
395 down_write(&vma_lock->rw_sema);
396 __hugetlb_vma_unlock_write_put(vma_lock);
397 }
398}
399
400static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
401{
402 struct hugetlb_vma_lock *vma_lock;
403
404 /* Only establish in (flags) sharable vmas */
405 if (!vma || !(vma->vm_flags & VM_MAYSHARE))
406 return;
407
408 /* Should never get here with non-NULL vm_private_data */
409 if (vma->vm_private_data)
410 return;
411
412 vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
413 if (!vma_lock) {
414 /*
415 * If we can not allocate structure, then vma can not
416 * participate in pmd sharing. This is only a possible
417 * performance enhancement and memory saving issue.
418 * However, the lock is also used to synchronize page
419 * faults with truncation. If the lock is not present,
420 * unlikely races could leave pages in a file past i_size
421 * until the file is removed. Warn in the unlikely case of
422 * allocation failure.
423 */
424 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
425 return;
426 }
427
428 kref_init(&vma_lock->refs);
429 init_rwsem(&vma_lock->rw_sema);
430 vma_lock->vma = vma;
431 vma->vm_private_data = vma_lock;
432}
433
0db9d74e
MA
434/* Helper that removes a struct file_region from the resv_map cache and returns
435 * it for use.
436 */
437static struct file_region *
438get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
439{
3259914f 440 struct file_region *nrg;
0db9d74e
MA
441
442 VM_BUG_ON(resv->region_cache_count <= 0);
443
444 resv->region_cache_count--;
445 nrg = list_first_entry(&resv->region_cache, struct file_region, link);
0db9d74e
MA
446 list_del(&nrg->link);
447
448 nrg->from = from;
449 nrg->to = to;
450
451 return nrg;
452}
453
075a61d0
MA
454static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
455 struct file_region *rg)
456{
457#ifdef CONFIG_CGROUP_HUGETLB
458 nrg->reservation_counter = rg->reservation_counter;
459 nrg->css = rg->css;
460 if (rg->css)
461 css_get(rg->css);
462#endif
463}
464
465/* Helper that records hugetlb_cgroup uncharge info. */
466static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
467 struct hstate *h,
468 struct resv_map *resv,
469 struct file_region *nrg)
470{
471#ifdef CONFIG_CGROUP_HUGETLB
472 if (h_cg) {
473 nrg->reservation_counter =
474 &h_cg->rsvd_hugepage[hstate_index(h)];
475 nrg->css = &h_cg->css;
d85aecf2
ML
476 /*
477 * The caller will hold exactly one h_cg->css reference for the
478 * whole contiguous reservation region. But this area might be
479 * scattered when there are already some file_regions reside in
480 * it. As a result, many file_regions may share only one css
481 * reference. In order to ensure that one file_region must hold
482 * exactly one h_cg->css reference, we should do css_get for
483 * each file_region and leave the reference held by caller
484 * untouched.
485 */
486 css_get(&h_cg->css);
075a61d0
MA
487 if (!resv->pages_per_hpage)
488 resv->pages_per_hpage = pages_per_huge_page(h);
489 /* pages_per_hpage should be the same for all entries in
490 * a resv_map.
491 */
492 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
493 } else {
494 nrg->reservation_counter = NULL;
495 nrg->css = NULL;
496 }
497#endif
498}
499
d85aecf2
ML
500static void put_uncharge_info(struct file_region *rg)
501{
502#ifdef CONFIG_CGROUP_HUGETLB
503 if (rg->css)
504 css_put(rg->css);
505#endif
506}
507
a9b3f867
MA
508static bool has_same_uncharge_info(struct file_region *rg,
509 struct file_region *org)
510{
511#ifdef CONFIG_CGROUP_HUGETLB
0739eb43 512 return rg->reservation_counter == org->reservation_counter &&
a9b3f867
MA
513 rg->css == org->css;
514
515#else
516 return true;
517#endif
518}
519
520static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
521{
3259914f 522 struct file_region *nrg, *prg;
a9b3f867
MA
523
524 prg = list_prev_entry(rg, link);
525 if (&prg->link != &resv->regions && prg->to == rg->from &&
526 has_same_uncharge_info(prg, rg)) {
527 prg->to = rg->to;
528
529 list_del(&rg->link);
d85aecf2 530 put_uncharge_info(rg);
a9b3f867
MA
531 kfree(rg);
532
7db5e7b6 533 rg = prg;
a9b3f867
MA
534 }
535
536 nrg = list_next_entry(rg, link);
537 if (&nrg->link != &resv->regions && nrg->from == rg->to &&
538 has_same_uncharge_info(nrg, rg)) {
539 nrg->from = rg->from;
540
541 list_del(&rg->link);
d85aecf2 542 put_uncharge_info(rg);
a9b3f867 543 kfree(rg);
a9b3f867
MA
544 }
545}
546
2103cf9c 547static inline long
84448c8e 548hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
2103cf9c
PX
549 long to, struct hstate *h, struct hugetlb_cgroup *cg,
550 long *regions_needed)
551{
552 struct file_region *nrg;
553
554 if (!regions_needed) {
555 nrg = get_file_region_entry_from_cache(map, from, to);
556 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
84448c8e 557 list_add(&nrg->link, rg);
2103cf9c
PX
558 coalesce_file_region(map, nrg);
559 } else
560 *regions_needed += 1;
561
562 return to - from;
563}
564
972a3da3
WY
565/*
566 * Must be called with resv->lock held.
567 *
568 * Calling this with regions_needed != NULL will count the number of pages
569 * to be added but will not modify the linked list. And regions_needed will
570 * indicate the number of file_regions needed in the cache to carry out to add
571 * the regions for this range.
d75c6af9
MA
572 */
573static long add_reservation_in_range(struct resv_map *resv, long f, long t,
075a61d0 574 struct hugetlb_cgroup *h_cg,
972a3da3 575 struct hstate *h, long *regions_needed)
d75c6af9 576{
0db9d74e 577 long add = 0;
d75c6af9 578 struct list_head *head = &resv->regions;
0db9d74e 579 long last_accounted_offset = f;
84448c8e
JK
580 struct file_region *iter, *trg = NULL;
581 struct list_head *rg = NULL;
d75c6af9 582
0db9d74e
MA
583 if (regions_needed)
584 *regions_needed = 0;
d75c6af9 585
0db9d74e 586 /* In this loop, we essentially handle an entry for the range
84448c8e 587 * [last_accounted_offset, iter->from), at every iteration, with some
0db9d74e
MA
588 * bounds checking.
589 */
84448c8e 590 list_for_each_entry_safe(iter, trg, head, link) {
0db9d74e 591 /* Skip irrelevant regions that start before our range. */
84448c8e 592 if (iter->from < f) {
0db9d74e
MA
593 /* If this region ends after the last accounted offset,
594 * then we need to update last_accounted_offset.
595 */
84448c8e
JK
596 if (iter->to > last_accounted_offset)
597 last_accounted_offset = iter->to;
0db9d74e
MA
598 continue;
599 }
d75c6af9 600
0db9d74e
MA
601 /* When we find a region that starts beyond our range, we've
602 * finished.
603 */
84448c8e
JK
604 if (iter->from >= t) {
605 rg = iter->link.prev;
d75c6af9 606 break;
84448c8e 607 }
d75c6af9 608
84448c8e 609 /* Add an entry for last_accounted_offset -> iter->from, and
0db9d74e
MA
610 * update last_accounted_offset.
611 */
84448c8e
JK
612 if (iter->from > last_accounted_offset)
613 add += hugetlb_resv_map_add(resv, iter->link.prev,
2103cf9c 614 last_accounted_offset,
84448c8e 615 iter->from, h, h_cg,
2103cf9c 616 regions_needed);
0db9d74e 617
84448c8e 618 last_accounted_offset = iter->to;
0db9d74e
MA
619 }
620
621 /* Handle the case where our range extends beyond
622 * last_accounted_offset.
623 */
84448c8e
JK
624 if (!rg)
625 rg = head->prev;
2103cf9c
PX
626 if (last_accounted_offset < t)
627 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
628 t, h, h_cg, regions_needed);
0db9d74e 629
0db9d74e
MA
630 return add;
631}
632
633/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
634 */
635static int allocate_file_region_entries(struct resv_map *resv,
636 int regions_needed)
637 __must_hold(&resv->lock)
638{
34665341 639 LIST_HEAD(allocated_regions);
0db9d74e
MA
640 int to_allocate = 0, i = 0;
641 struct file_region *trg = NULL, *rg = NULL;
642
643 VM_BUG_ON(regions_needed < 0);
644
0db9d74e
MA
645 /*
646 * Check for sufficient descriptors in the cache to accommodate
647 * the number of in progress add operations plus regions_needed.
648 *
649 * This is a while loop because when we drop the lock, some other call
650 * to region_add or region_del may have consumed some region_entries,
651 * so we keep looping here until we finally have enough entries for
652 * (adds_in_progress + regions_needed).
653 */
654 while (resv->region_cache_count <
655 (resv->adds_in_progress + regions_needed)) {
656 to_allocate = resv->adds_in_progress + regions_needed -
657 resv->region_cache_count;
658
659 /* At this point, we should have enough entries in the cache
f0953a1b 660 * for all the existing adds_in_progress. We should only be
0db9d74e 661 * needing to allocate for regions_needed.
d75c6af9 662 */
0db9d74e
MA
663 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
664
665 spin_unlock(&resv->lock);
666 for (i = 0; i < to_allocate; i++) {
667 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
668 if (!trg)
669 goto out_of_memory;
670 list_add(&trg->link, &allocated_regions);
d75c6af9 671 }
d75c6af9 672
0db9d74e
MA
673 spin_lock(&resv->lock);
674
d3ec7b6e
WY
675 list_splice(&allocated_regions, &resv->region_cache);
676 resv->region_cache_count += to_allocate;
d75c6af9
MA
677 }
678
0db9d74e 679 return 0;
d75c6af9 680
0db9d74e
MA
681out_of_memory:
682 list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
683 list_del(&rg->link);
684 kfree(rg);
685 }
686 return -ENOMEM;
d75c6af9
MA
687}
688
1dd308a7
MK
689/*
690 * Add the huge page range represented by [f, t) to the reserve
0db9d74e
MA
691 * map. Regions will be taken from the cache to fill in this range.
692 * Sufficient regions should exist in the cache due to the previous
693 * call to region_chg with the same range, but in some cases the cache will not
694 * have sufficient entries due to races with other code doing region_add or
695 * region_del. The extra needed entries will be allocated.
cf3ad20b 696 *
0db9d74e
MA
697 * regions_needed is the out value provided by a previous call to region_chg.
698 *
699 * Return the number of new huge pages added to the map. This number is greater
700 * than or equal to zero. If file_region entries needed to be allocated for
7c8de358 701 * this operation and we were not able to allocate, it returns -ENOMEM.
0db9d74e
MA
702 * region_add of regions of length 1 never allocate file_regions and cannot
703 * fail; region_chg will always allocate at least 1 entry and a region_add for
704 * 1 page will only require at most 1 entry.
1dd308a7 705 */
0db9d74e 706static long region_add(struct resv_map *resv, long f, long t,
075a61d0
MA
707 long in_regions_needed, struct hstate *h,
708 struct hugetlb_cgroup *h_cg)
96822904 709{
0db9d74e 710 long add = 0, actual_regions_needed = 0;
96822904 711
7b24d861 712 spin_lock(&resv->lock);
0db9d74e
MA
713retry:
714
715 /* Count how many regions are actually needed to execute this add. */
972a3da3
WY
716 add_reservation_in_range(resv, f, t, NULL, NULL,
717 &actual_regions_needed);
96822904 718
5e911373 719 /*
0db9d74e
MA
720 * Check for sufficient descriptors in the cache to accommodate
721 * this add operation. Note that actual_regions_needed may be greater
722 * than in_regions_needed, as the resv_map may have been modified since
723 * the region_chg call. In this case, we need to make sure that we
724 * allocate extra entries, such that we have enough for all the
725 * existing adds_in_progress, plus the excess needed for this
726 * operation.
5e911373 727 */
0db9d74e
MA
728 if (actual_regions_needed > in_regions_needed &&
729 resv->region_cache_count <
730 resv->adds_in_progress +
731 (actual_regions_needed - in_regions_needed)) {
732 /* region_add operation of range 1 should never need to
733 * allocate file_region entries.
734 */
735 VM_BUG_ON(t - f <= 1);
5e911373 736
0db9d74e
MA
737 if (allocate_file_region_entries(
738 resv, actual_regions_needed - in_regions_needed)) {
739 return -ENOMEM;
740 }
5e911373 741
0db9d74e 742 goto retry;
5e911373
MK
743 }
744
972a3da3 745 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
0db9d74e
MA
746
747 resv->adds_in_progress -= in_regions_needed;
cf3ad20b 748
7b24d861 749 spin_unlock(&resv->lock);
cf3ad20b 750 return add;
96822904
AW
751}
752
1dd308a7
MK
753/*
754 * Examine the existing reserve map and determine how many
755 * huge pages in the specified range [f, t) are NOT currently
756 * represented. This routine is called before a subsequent
757 * call to region_add that will actually modify the reserve
758 * map to add the specified range [f, t). region_chg does
759 * not change the number of huge pages represented by the
0db9d74e
MA
760 * map. A number of new file_region structures is added to the cache as a
761 * placeholder, for the subsequent region_add call to use. At least 1
762 * file_region structure is added.
763 *
764 * out_regions_needed is the number of regions added to the
765 * resv->adds_in_progress. This value needs to be provided to a follow up call
766 * to region_add or region_abort for proper accounting.
5e911373
MK
767 *
768 * Returns the number of huge pages that need to be added to the existing
769 * reservation map for the range [f, t). This number is greater or equal to
770 * zero. -ENOMEM is returned if a new file_region structure or cache entry
771 * is needed and can not be allocated.
1dd308a7 772 */
0db9d74e
MA
773static long region_chg(struct resv_map *resv, long f, long t,
774 long *out_regions_needed)
96822904 775{
96822904
AW
776 long chg = 0;
777
7b24d861 778 spin_lock(&resv->lock);
5e911373 779
972a3da3 780 /* Count how many hugepages in this range are NOT represented. */
075a61d0 781 chg = add_reservation_in_range(resv, f, t, NULL, NULL,
972a3da3 782 out_regions_needed);
5e911373 783
0db9d74e
MA
784 if (*out_regions_needed == 0)
785 *out_regions_needed = 1;
5e911373 786
0db9d74e
MA
787 if (allocate_file_region_entries(resv, *out_regions_needed))
788 return -ENOMEM;
5e911373 789
0db9d74e 790 resv->adds_in_progress += *out_regions_needed;
7b24d861 791
7b24d861 792 spin_unlock(&resv->lock);
96822904
AW
793 return chg;
794}
795
5e911373
MK
796/*
797 * Abort the in progress add operation. The adds_in_progress field
798 * of the resv_map keeps track of the operations in progress between
799 * calls to region_chg and region_add. Operations are sometimes
800 * aborted after the call to region_chg. In such cases, region_abort
0db9d74e
MA
801 * is called to decrement the adds_in_progress counter. regions_needed
802 * is the value returned by the region_chg call, it is used to decrement
803 * the adds_in_progress counter.
5e911373
MK
804 *
805 * NOTE: The range arguments [f, t) are not needed or used in this
806 * routine. They are kept to make reading the calling code easier as
807 * arguments will match the associated region_chg call.
808 */
0db9d74e
MA
809static void region_abort(struct resv_map *resv, long f, long t,
810 long regions_needed)
5e911373
MK
811{
812 spin_lock(&resv->lock);
813 VM_BUG_ON(!resv->region_cache_count);
0db9d74e 814 resv->adds_in_progress -= regions_needed;
5e911373
MK
815 spin_unlock(&resv->lock);
816}
817
1dd308a7 818/*
feba16e2
MK
819 * Delete the specified range [f, t) from the reserve map. If the
820 * t parameter is LONG_MAX, this indicates that ALL regions after f
821 * should be deleted. Locate the regions which intersect [f, t)
822 * and either trim, delete or split the existing regions.
823 *
824 * Returns the number of huge pages deleted from the reserve map.
825 * In the normal case, the return value is zero or more. In the
826 * case where a region must be split, a new region descriptor must
827 * be allocated. If the allocation fails, -ENOMEM will be returned.
828 * NOTE: If the parameter t == LONG_MAX, then we will never split
829 * a region and possibly return -ENOMEM. Callers specifying
830 * t == LONG_MAX do not need to check for -ENOMEM error.
1dd308a7 831 */
feba16e2 832static long region_del(struct resv_map *resv, long f, long t)
96822904 833{
1406ec9b 834 struct list_head *head = &resv->regions;
96822904 835 struct file_region *rg, *trg;
feba16e2
MK
836 struct file_region *nrg = NULL;
837 long del = 0;
96822904 838
feba16e2 839retry:
7b24d861 840 spin_lock(&resv->lock);
feba16e2 841 list_for_each_entry_safe(rg, trg, head, link) {
dbe409e4
MK
842 /*
843 * Skip regions before the range to be deleted. file_region
844 * ranges are normally of the form [from, to). However, there
845 * may be a "placeholder" entry in the map which is of the form
846 * (from, to) with from == to. Check for placeholder entries
847 * at the beginning of the range to be deleted.
848 */
849 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
feba16e2 850 continue;
dbe409e4 851
feba16e2 852 if (rg->from >= t)
96822904 853 break;
96822904 854
feba16e2
MK
855 if (f > rg->from && t < rg->to) { /* Must split region */
856 /*
857 * Check for an entry in the cache before dropping
858 * lock and attempting allocation.
859 */
860 if (!nrg &&
861 resv->region_cache_count > resv->adds_in_progress) {
862 nrg = list_first_entry(&resv->region_cache,
863 struct file_region,
864 link);
865 list_del(&nrg->link);
866 resv->region_cache_count--;
867 }
96822904 868
feba16e2
MK
869 if (!nrg) {
870 spin_unlock(&resv->lock);
871 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
872 if (!nrg)
873 return -ENOMEM;
874 goto retry;
875 }
876
877 del += t - f;
79aa925b 878 hugetlb_cgroup_uncharge_file_region(
d85aecf2 879 resv, rg, t - f, false);
feba16e2
MK
880
881 /* New entry for end of split region */
882 nrg->from = t;
883 nrg->to = rg->to;
075a61d0
MA
884
885 copy_hugetlb_cgroup_uncharge_info(nrg, rg);
886
feba16e2
MK
887 INIT_LIST_HEAD(&nrg->link);
888
889 /* Original entry is trimmed */
890 rg->to = f;
891
892 list_add(&nrg->link, &rg->link);
893 nrg = NULL;
96822904 894 break;
feba16e2
MK
895 }
896
897 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
898 del += rg->to - rg->from;
075a61d0 899 hugetlb_cgroup_uncharge_file_region(resv, rg,
d85aecf2 900 rg->to - rg->from, true);
feba16e2
MK
901 list_del(&rg->link);
902 kfree(rg);
903 continue;
904 }
905
906 if (f <= rg->from) { /* Trim beginning of region */
075a61d0 907 hugetlb_cgroup_uncharge_file_region(resv, rg,
d85aecf2 908 t - rg->from, false);
075a61d0 909
79aa925b
MK
910 del += t - rg->from;
911 rg->from = t;
912 } else { /* Trim end of region */
075a61d0 913 hugetlb_cgroup_uncharge_file_region(resv, rg,
d85aecf2 914 rg->to - f, false);
79aa925b
MK
915
916 del += rg->to - f;
917 rg->to = f;
feba16e2 918 }
96822904 919 }
7b24d861 920
7b24d861 921 spin_unlock(&resv->lock);
feba16e2
MK
922 kfree(nrg);
923 return del;
96822904
AW
924}
925
b5cec28d
MK
926/*
927 * A rare out of memory error was encountered which prevented removal of
928 * the reserve map region for a page. The huge page itself was free'ed
929 * and removed from the page cache. This routine will adjust the subpool
930 * usage count, and the global reserve count if needed. By incrementing
931 * these counts, the reserve map entry which could not be deleted will
932 * appear as a "reserved" entry instead of simply dangling with incorrect
933 * counts.
934 */
72e2936c 935void hugetlb_fix_reserve_counts(struct inode *inode)
b5cec28d
MK
936{
937 struct hugepage_subpool *spool = subpool_inode(inode);
938 long rsv_adjust;
da56388c 939 bool reserved = false;
b5cec28d
MK
940
941 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
da56388c 942 if (rsv_adjust > 0) {
b5cec28d
MK
943 struct hstate *h = hstate_inode(inode);
944
da56388c
ML
945 if (!hugetlb_acct_memory(h, 1))
946 reserved = true;
947 } else if (!rsv_adjust) {
948 reserved = true;
b5cec28d 949 }
da56388c
ML
950
951 if (!reserved)
952 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
b5cec28d
MK
953}
954
1dd308a7
MK
955/*
956 * Count and return the number of huge pages in the reserve map
957 * that intersect with the range [f, t).
958 */
1406ec9b 959static long region_count(struct resv_map *resv, long f, long t)
84afd99b 960{
1406ec9b 961 struct list_head *head = &resv->regions;
84afd99b
AW
962 struct file_region *rg;
963 long chg = 0;
964
7b24d861 965 spin_lock(&resv->lock);
84afd99b
AW
966 /* Locate each segment we overlap with, and count that overlap. */
967 list_for_each_entry(rg, head, link) {
f2135a4a
WSH
968 long seg_from;
969 long seg_to;
84afd99b
AW
970
971 if (rg->to <= f)
972 continue;
973 if (rg->from >= t)
974 break;
975
976 seg_from = max(rg->from, f);
977 seg_to = min(rg->to, t);
978
979 chg += seg_to - seg_from;
980 }
7b24d861 981 spin_unlock(&resv->lock);
84afd99b
AW
982
983 return chg;
984}
985
e7c4b0bf
AW
986/*
987 * Convert the address within this vma to the page offset within
a08c7193 988 * the mapping, huge page units here.
e7c4b0bf 989 */
a5516438
AK
990static pgoff_t vma_hugecache_offset(struct hstate *h,
991 struct vm_area_struct *vma, unsigned long address)
e7c4b0bf 992{
a5516438
AK
993 return ((address - vma->vm_start) >> huge_page_shift(h)) +
994 (vma->vm_pgoff >> huge_page_order(h));
e7c4b0bf
AW
995}
996
8cfd014e
MWO
997/**
998 * vma_kernel_pagesize - Page size granularity for this VMA.
999 * @vma: The user mapping.
1000 *
1001 * Folios in this VMA will be aligned to, and at least the size of the
1002 * number of bytes returned by this function.
1003 *
1004 * Return: The default size of the folios allocated when backing a VMA.
08fba699
MG
1005 */
1006unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1007{
05ea8860
DW
1008 if (vma->vm_ops && vma->vm_ops->pagesize)
1009 return vma->vm_ops->pagesize(vma);
1010 return PAGE_SIZE;
08fba699 1011}
f340ca0f 1012EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
08fba699 1013
3340289d
MG
1014/*
1015 * Return the page size being used by the MMU to back a VMA. In the majority
1016 * of cases, the page size used by the kernel matches the MMU size. On
09135cc5
DW
1017 * architectures where it differs, an architecture-specific 'strong'
1018 * version of this symbol is required.
3340289d 1019 */
09135cc5 1020__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
3340289d
MG
1021{
1022 return vma_kernel_pagesize(vma);
1023}
3340289d 1024
84afd99b
AW
1025/*
1026 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
1027 * bits of the reservation map pointer, which are always clear due to
1028 * alignment.
1029 */
1030#define HPAGE_RESV_OWNER (1UL << 0)
1031#define HPAGE_RESV_UNMAPPED (1UL << 1)
04f2cbe3 1032#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
84afd99b 1033
a1e78772
MG
1034/*
1035 * These helpers are used to track how many pages are reserved for
1036 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1037 * is guaranteed to have their future faults succeed.
1038 *
8d9bfb26 1039 * With the exception of hugetlb_dup_vma_private() which is called at fork(),
a1e78772
MG
1040 * the reserve counters are updated with the hugetlb_lock held. It is safe
1041 * to reset the VMA at fork() time as it is not in use yet and there is no
1042 * chance of the global counters getting corrupted as a result of the values.
84afd99b
AW
1043 *
1044 * The private mapping reservation is represented in a subtly different
1045 * manner to a shared mapping. A shared mapping has a region map associated
1046 * with the underlying file, this region map represents the backing file
1047 * pages which have ever had a reservation assigned which this persists even
1048 * after the page is instantiated. A private mapping has a region map
1049 * associated with the original mmap which is attached to all VMAs which
1050 * reference it, this region map represents those offsets which have consumed
1051 * reservation ie. where pages have been instantiated.
a1e78772 1052 */
e7c4b0bf
AW
1053static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1054{
1055 return (unsigned long)vma->vm_private_data;
1056}
1057
1058static void set_vma_private_data(struct vm_area_struct *vma,
1059 unsigned long value)
1060{
1061 vma->vm_private_data = (void *)value;
1062}
1063
e9fe92ae
MA
1064static void
1065resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1066 struct hugetlb_cgroup *h_cg,
1067 struct hstate *h)
1068{
1069#ifdef CONFIG_CGROUP_HUGETLB
1070 if (!h_cg || !h) {
1071 resv_map->reservation_counter = NULL;
1072 resv_map->pages_per_hpage = 0;
1073 resv_map->css = NULL;
1074 } else {
1075 resv_map->reservation_counter =
1076 &h_cg->rsvd_hugepage[hstate_index(h)];
1077 resv_map->pages_per_hpage = pages_per_huge_page(h);
1078 resv_map->css = &h_cg->css;
1079 }
1080#endif
1081}
1082
9119a41e 1083struct resv_map *resv_map_alloc(void)
84afd99b
AW
1084{
1085 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
5e911373
MK
1086 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
1087
1088 if (!resv_map || !rg) {
1089 kfree(resv_map);
1090 kfree(rg);
84afd99b 1091 return NULL;
5e911373 1092 }
84afd99b
AW
1093
1094 kref_init(&resv_map->refs);
7b24d861 1095 spin_lock_init(&resv_map->lock);
84afd99b 1096 INIT_LIST_HEAD(&resv_map->regions);
bf491692 1097 init_rwsem(&resv_map->rw_sema);
84afd99b 1098
5e911373 1099 resv_map->adds_in_progress = 0;
e9fe92ae
MA
1100 /*
1101 * Initialize these to 0. On shared mappings, 0's here indicate these
1102 * fields don't do cgroup accounting. On private mappings, these will be
1103 * re-initialized to the proper values, to indicate that hugetlb cgroup
1104 * reservations are to be un-charged from here.
1105 */
1106 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
5e911373
MK
1107
1108 INIT_LIST_HEAD(&resv_map->region_cache);
1109 list_add(&rg->link, &resv_map->region_cache);
1110 resv_map->region_cache_count = 1;
1111
84afd99b
AW
1112 return resv_map;
1113}
1114
9119a41e 1115void resv_map_release(struct kref *ref)
84afd99b
AW
1116{
1117 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
5e911373
MK
1118 struct list_head *head = &resv_map->region_cache;
1119 struct file_region *rg, *trg;
84afd99b
AW
1120
1121 /* Clear out any active regions before we release the map. */
feba16e2 1122 region_del(resv_map, 0, LONG_MAX);
5e911373
MK
1123
1124 /* ... and any entries left in the cache */
1125 list_for_each_entry_safe(rg, trg, head, link) {
1126 list_del(&rg->link);
1127 kfree(rg);
1128 }
1129
1130 VM_BUG_ON(resv_map->adds_in_progress);
1131
84afd99b
AW
1132 kfree(resv_map);
1133}
1134
4e35f483
JK
1135static inline struct resv_map *inode_resv_map(struct inode *inode)
1136{
f27a5136
MK
1137 /*
1138 * At inode evict time, i_mapping may not point to the original
1139 * address space within the inode. This original address space
1140 * contains the pointer to the resv_map. So, always use the
1141 * address space embedded within the inode.
1142 * The VERY common case is inode->mapping == &inode->i_data but,
1143 * this may not be true for device special inodes.
1144 */
600f111e 1145 return (struct resv_map *)(&inode->i_data)->i_private_data;
4e35f483
JK
1146}
1147
84afd99b 1148static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
a1e78772 1149{
81d1b09c 1150 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
4e35f483
JK
1151 if (vma->vm_flags & VM_MAYSHARE) {
1152 struct address_space *mapping = vma->vm_file->f_mapping;
1153 struct inode *inode = mapping->host;
1154
1155 return inode_resv_map(inode);
1156
1157 } else {
84afd99b
AW
1158 return (struct resv_map *)(get_vma_private_data(vma) &
1159 ~HPAGE_RESV_MASK);
4e35f483 1160 }
a1e78772
MG
1161}
1162
84afd99b 1163static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
a1e78772 1164{
81d1b09c
SL
1165 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1166 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
a1e78772 1167
92fe9dcb 1168 set_vma_private_data(vma, (unsigned long)map);
04f2cbe3
MG
1169}
1170
1171static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1172{
81d1b09c
SL
1173 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1174 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
e7c4b0bf
AW
1175
1176 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
04f2cbe3
MG
1177}
1178
1179static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1180{
81d1b09c 1181 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
e7c4b0bf
AW
1182
1183 return (get_vma_private_data(vma) & flag) != 0;
a1e78772
MG
1184}
1185
187da0f8
MK
1186bool __vma_private_lock(struct vm_area_struct *vma)
1187{
1188 return !(vma->vm_flags & VM_MAYSHARE) &&
1189 get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1190 is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1191}
1192
8d9bfb26 1193void hugetlb_dup_vma_private(struct vm_area_struct *vma)
a1e78772 1194{
81d1b09c 1195 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
8d9bfb26
MK
1196 /*
1197 * Clear vm_private_data
612b8a31
MK
1198 * - For shared mappings this is a per-vma semaphore that may be
1199 * allocated in a subsequent call to hugetlb_vm_op_open.
1200 * Before clearing, make sure pointer is not associated with vma
1201 * as this will leak the structure. This is the case when called
1202 * via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1203 * been called to allocate a new structure.
8d9bfb26
MK
1204 * - For MAP_PRIVATE mappings, this is the reserve map which does
1205 * not apply to children. Faults generated by the children are
1206 * not guaranteed to succeed, even if read-only.
8d9bfb26 1207 */
612b8a31
MK
1208 if (vma->vm_flags & VM_MAYSHARE) {
1209 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1210
1211 if (vma_lock && vma_lock->vma != vma)
1212 vma->vm_private_data = NULL;
1213 } else
1214 vma->vm_private_data = NULL;
a1e78772
MG
1215}
1216
550a7d60
MA
1217/*
1218 * Reset and decrement one ref on hugepage private reservation.
8651a137 1219 * Called with mm->mmap_lock writer semaphore held.
550a7d60
MA
1220 * This function should be only used by move_vma() and operate on
1221 * same sized vma. It should never come here with last ref on the
1222 * reservation.
1223 */
1224void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1225{
1226 /*
1227 * Clear the old hugetlb private page reservation.
1228 * It has already been transferred to new_vma.
1229 *
1230 * During a mremap() operation of a hugetlb vma we call move_vma()
1231 * which copies vma into new_vma and unmaps vma. After the copy
1232 * operation both new_vma and vma share a reference to the resv_map
1233 * struct, and at that point vma is about to be unmapped. We don't
1234 * want to return the reservation to the pool at unmap of vma because
1235 * the reservation still lives on in new_vma, so simply decrement the
1236 * ref here and remove the resv_map reference from this vma.
1237 */
1238 struct resv_map *reservations = vma_resv_map(vma);
1239
afe041c2
BQM
1240 if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1241 resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
550a7d60 1242 kref_put(&reservations->refs, resv_map_release);
afe041c2 1243 }
550a7d60 1244
8d9bfb26 1245 hugetlb_dup_vma_private(vma);
550a7d60
MA
1246}
1247
a1e78772 1248/* Returns true if the VMA has associated reserve pages */
559ec2f8 1249static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
a1e78772 1250{
af0ed73e
JK
1251 if (vma->vm_flags & VM_NORESERVE) {
1252 /*
1253 * This address is already reserved by other process(chg == 0),
1254 * so, we should decrement reserved count. Without decrementing,
1255 * reserve count remains after releasing inode, because this
1256 * allocated page will go into page cache and is regarded as
1257 * coming from reserved pool in releasing step. Currently, we
1258 * don't have any other solution to deal with this situation
1259 * properly, so add work-around here.
1260 */
1261 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
559ec2f8 1262 return true;
af0ed73e 1263 else
559ec2f8 1264 return false;
af0ed73e 1265 }
a63884e9
JK
1266
1267 /* Shared mappings always use reserves */
1fb1b0e9
MK
1268 if (vma->vm_flags & VM_MAYSHARE) {
1269 /*
1270 * We know VM_NORESERVE is not set. Therefore, there SHOULD
1271 * be a region map for all pages. The only situation where
1272 * there is no region map is if a hole was punched via
7c8de358 1273 * fallocate. In this case, there really are no reserves to
1fb1b0e9
MK
1274 * use. This situation is indicated if chg != 0.
1275 */
1276 if (chg)
1277 return false;
1278 else
1279 return true;
1280 }
a63884e9
JK
1281
1282 /*
1283 * Only the process that called mmap() has reserves for
1284 * private mappings.
1285 */
67961f9d
MK
1286 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1287 /*
1288 * Like the shared case above, a hole punch or truncate
1289 * could have been performed on the private mapping.
1290 * Examine the value of chg to determine if reserves
1291 * actually exist or were previously consumed.
1292 * Very Subtle - The value of chg comes from a previous
1293 * call to vma_needs_reserves(). The reserve map for
1294 * private mappings has different (opposite) semantics
1295 * than that of shared mappings. vma_needs_reserves()
1296 * has already taken this difference in semantics into
1297 * account. Therefore, the meaning of chg is the same
1298 * as in the shared case above. Code could easily be
1299 * combined, but keeping it separate draws attention to
1300 * subtle differences.
1301 */
1302 if (chg)
1303 return false;
1304 else
1305 return true;
1306 }
a63884e9 1307
559ec2f8 1308 return false;
a1e78772
MG
1309}
1310
240d67a8 1311static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1da177e4 1312{
240d67a8 1313 int nid = folio_nid(folio);
9487ca60
MK
1314
1315 lockdep_assert_held(&hugetlb_lock);
240d67a8 1316 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
b65a4eda 1317
240d67a8 1318 list_move(&folio->lru, &h->hugepage_freelists[nid]);
a5516438
AK
1319 h->free_huge_pages++;
1320 h->free_huge_pages_node[nid]++;
240d67a8 1321 folio_set_hugetlb_freed(folio);
1da177e4
LT
1322}
1323
a36f1e90
SK
1324static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1325 int nid)
bf50bab2 1326{
a36f1e90 1327 struct folio *folio;
1a08ae36 1328 bool pin = !!(current->flags & PF_MEMALLOC_PIN);
bbe88753 1329
9487ca60 1330 lockdep_assert_held(&hugetlb_lock);
a36f1e90
SK
1331 list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1332 if (pin && !folio_is_longterm_pinnable(folio))
bbe88753 1333 continue;
bf50bab2 1334
a36f1e90 1335 if (folio_test_hwpoison(folio))
6664bfc8
WY
1336 continue;
1337
a36f1e90
SK
1338 list_move(&folio->lru, &h->hugepage_activelist);
1339 folio_ref_unfreeze(folio, 1);
1340 folio_clear_hugetlb_freed(folio);
6664bfc8
WY
1341 h->free_huge_pages--;
1342 h->free_huge_pages_node[nid]--;
a36f1e90 1343 return folio;
bbe88753
JK
1344 }
1345
6664bfc8 1346 return NULL;
bf50bab2
NH
1347}
1348
a36f1e90
SK
1349static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1350 int nid, nodemask_t *nmask)
94310cbc 1351{
3e59fcb0
MH
1352 unsigned int cpuset_mems_cookie;
1353 struct zonelist *zonelist;
1354 struct zone *zone;
1355 struct zoneref *z;
98fa15f3 1356 int node = NUMA_NO_NODE;
94310cbc 1357
3e59fcb0
MH
1358 zonelist = node_zonelist(nid, gfp_mask);
1359
1360retry_cpuset:
1361 cpuset_mems_cookie = read_mems_allowed_begin();
1362 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
a36f1e90 1363 struct folio *folio;
3e59fcb0
MH
1364
1365 if (!cpuset_zone_allowed(zone, gfp_mask))
1366 continue;
1367 /*
1368 * no need to ask again on the same node. Pool is node rather than
1369 * zone aware
1370 */
1371 if (zone_to_nid(zone) == node)
1372 continue;
1373 node = zone_to_nid(zone);
94310cbc 1374
a36f1e90
SK
1375 folio = dequeue_hugetlb_folio_node_exact(h, node);
1376 if (folio)
1377 return folio;
94310cbc 1378 }
3e59fcb0
MH
1379 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1380 goto retry_cpuset;
1381
94310cbc
AK
1382 return NULL;
1383}
1384
8346d69d
XH
1385static unsigned long available_huge_pages(struct hstate *h)
1386{
1387 return h->free_huge_pages - h->resv_huge_pages;
1388}
1389
ff7d853b 1390static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
a5516438 1391 struct vm_area_struct *vma,
af0ed73e
JK
1392 unsigned long address, int avoid_reserve,
1393 long chg)
1da177e4 1394{
a36f1e90 1395 struct folio *folio = NULL;
480eccf9 1396 struct mempolicy *mpol;
04ec6264 1397 gfp_t gfp_mask;
3e59fcb0 1398 nodemask_t *nodemask;
04ec6264 1399 int nid;
1da177e4 1400
a1e78772
MG
1401 /*
1402 * A child process with MAP_PRIVATE mappings created by their parent
1403 * have no page reserves. This check ensures that reservations are
1404 * not "stolen". The child may still get SIGKILLed
1405 */
8346d69d 1406 if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
c0ff7453 1407 goto err;
a1e78772 1408
04f2cbe3 1409 /* If reserves cannot be used, ensure enough pages are in the pool */
8346d69d 1410 if (avoid_reserve && !available_huge_pages(h))
6eab04a8 1411 goto err;
04f2cbe3 1412
04ec6264
VB
1413 gfp_mask = htlb_alloc_mask(h);
1414 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
cfcaa66f
BW
1415
1416 if (mpol_is_preferred_many(mpol)) {
a36f1e90
SK
1417 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1418 nid, nodemask);
cfcaa66f
BW
1419
1420 /* Fallback to all nodes if page==NULL */
1421 nodemask = NULL;
1422 }
1423
a36f1e90
SK
1424 if (!folio)
1425 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1426 nid, nodemask);
cfcaa66f 1427
a36f1e90
SK
1428 if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
1429 folio_set_hugetlb_restore_reserve(folio);
3e59fcb0 1430 h->resv_huge_pages--;
1da177e4 1431 }
cc9a6c87 1432
52cd3b07 1433 mpol_cond_put(mpol);
ff7d853b 1434 return folio;
cc9a6c87
MG
1435
1436err:
cc9a6c87 1437 return NULL;
1da177e4
LT
1438}
1439
1cac6f2c
LC
1440/*
1441 * common helper functions for hstate_next_node_to_{alloc|free}.
1442 * We may have allocated or freed a huge page based on a different
1443 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1444 * be outside of *nodes_allowed. Ensure that we use an allowed
1445 * node for alloc or free.
1446 */
1447static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1448{
0edaf86c 1449 nid = next_node_in(nid, *nodes_allowed);
1cac6f2c
LC
1450 VM_BUG_ON(nid >= MAX_NUMNODES);
1451
1452 return nid;
1453}
1454
1455static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1456{
1457 if (!node_isset(nid, *nodes_allowed))
1458 nid = next_node_allowed(nid, nodes_allowed);
1459 return nid;
1460}
1461
1462/*
1463 * returns the previously saved node ["this node"] from which to
1464 * allocate a persistent huge page for the pool and advance the
1465 * next node from which to allocate, handling wrap at end of node
1466 * mask.
1467 */
2e73ff23 1468static int hstate_next_node_to_alloc(int *next_node,
1cac6f2c
LC
1469 nodemask_t *nodes_allowed)
1470{
1471 int nid;
1472
1473 VM_BUG_ON(!nodes_allowed);
1474
2e73ff23
GL
1475 nid = get_valid_node_allowed(*next_node, nodes_allowed);
1476 *next_node = next_node_allowed(nid, nodes_allowed);
1cac6f2c
LC
1477
1478 return nid;
1479}
1480
1481/*
d5b43e96 1482 * helper for remove_pool_hugetlb_folio() - return the previously saved
1cac6f2c
LC
1483 * node ["this node"] from which to free a huge page. Advance the
1484 * next node id whether or not we find a free huge page to free so
1485 * that the next attempt to free addresses the next node.
1486 */
1487static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1488{
1489 int nid;
1490
1491 VM_BUG_ON(!nodes_allowed);
1492
1493 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1494 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1495
1496 return nid;
1497}
1498
2e73ff23 1499#define for_each_node_mask_to_alloc(next_node, nr_nodes, node, mask) \
1cac6f2c
LC
1500 for (nr_nodes = nodes_weight(*mask); \
1501 nr_nodes > 0 && \
2e73ff23 1502 ((node = hstate_next_node_to_alloc(next_node, mask)) || 1); \
1cac6f2c
LC
1503 nr_nodes--)
1504
1505#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1506 for (nr_nodes = nodes_weight(*mask); \
1507 nr_nodes > 0 && \
1508 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1509 nr_nodes--)
1510
8531fc6f 1511/* used to demote non-gigantic_huge pages as well */
911565b8 1512static void __destroy_compound_gigantic_folio(struct folio *folio,
34d9e35b 1513 unsigned int order, bool demote)
944d9fec
LC
1514{
1515 int i;
1516 int nr_pages = 1 << order;
14455eab 1517 struct page *p;
944d9fec 1518
46f27228 1519 atomic_set(&folio->_entire_mapcount, 0);
eec20426 1520 atomic_set(&folio->_nr_pages_mapped, 0);
94688e8e 1521 atomic_set(&folio->_pincount, 0);
47e29d32 1522
14455eab 1523 for (i = 1; i < nr_pages; i++) {
911565b8 1524 p = folio_page(folio, i);
6c141973 1525 p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
a01f4390 1526 p->mapping = NULL;
1d798ca3 1527 clear_compound_head(p);
34d9e35b
MK
1528 if (!demote)
1529 set_page_refcounted(p);
944d9fec
LC
1530 }
1531
911565b8 1532 __folio_clear_head(folio);
944d9fec
LC
1533}
1534
911565b8 1535static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
8531fc6f
MK
1536 unsigned int order)
1537{
911565b8 1538 __destroy_compound_gigantic_folio(folio, order, true);
8531fc6f
MK
1539}
1540
1541#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
911565b8 1542static void destroy_compound_gigantic_folio(struct folio *folio,
34d9e35b
MK
1543 unsigned int order)
1544{
911565b8 1545 __destroy_compound_gigantic_folio(folio, order, false);
34d9e35b
MK
1546}
1547
7f325a8d 1548static void free_gigantic_folio(struct folio *folio, unsigned int order)
944d9fec 1549{
cf11e85f
RG
1550 /*
1551 * If the page isn't allocated using the cma allocator,
1552 * cma_release() returns false.
1553 */
dbda8fea 1554#ifdef CONFIG_CMA
7f325a8d
SK
1555 int nid = folio_nid(folio);
1556
1557 if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
cf11e85f 1558 return;
dbda8fea 1559#endif
cf11e85f 1560
7f325a8d 1561 free_contig_range(folio_pfn(folio), 1 << order);
944d9fec
LC
1562}
1563
4eb0716e 1564#ifdef CONFIG_CONTIG_ALLOC
19fc1a7e 1565static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
d9cc948f 1566 int nid, nodemask_t *nodemask)
944d9fec 1567{
19fc1a7e 1568 struct page *page;
04adbc3f 1569 unsigned long nr_pages = pages_per_huge_page(h);
953f064a
LX
1570 if (nid == NUMA_NO_NODE)
1571 nid = numa_mem_id();
944d9fec 1572
dbda8fea
BS
1573#ifdef CONFIG_CMA
1574 {
cf11e85f
RG
1575 int node;
1576
953f064a
LX
1577 if (hugetlb_cma[nid]) {
1578 page = cma_alloc(hugetlb_cma[nid], nr_pages,
1579 huge_page_order(h), true);
cf11e85f 1580 if (page)
19fc1a7e 1581 return page_folio(page);
cf11e85f 1582 }
953f064a
LX
1583
1584 if (!(gfp_mask & __GFP_THISNODE)) {
1585 for_each_node_mask(node, *nodemask) {
1586 if (node == nid || !hugetlb_cma[node])
1587 continue;
1588
1589 page = cma_alloc(hugetlb_cma[node], nr_pages,
1590 huge_page_order(h), true);
1591 if (page)
19fc1a7e 1592 return page_folio(page);
953f064a
LX
1593 }
1594 }
cf11e85f 1595 }
dbda8fea 1596#endif
cf11e85f 1597
19fc1a7e
SK
1598 page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1599 return page ? page_folio(page) : NULL;
944d9fec
LC
1600}
1601
4eb0716e 1602#else /* !CONFIG_CONTIG_ALLOC */
19fc1a7e 1603static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
4eb0716e
AG
1604 int nid, nodemask_t *nodemask)
1605{
1606 return NULL;
1607}
1608#endif /* CONFIG_CONTIG_ALLOC */
944d9fec 1609
e1073d1e 1610#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
19fc1a7e 1611static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
4eb0716e
AG
1612 int nid, nodemask_t *nodemask)
1613{
1614 return NULL;
1615}
7f325a8d
SK
1616static inline void free_gigantic_folio(struct folio *folio,
1617 unsigned int order) { }
911565b8 1618static inline void destroy_compound_gigantic_folio(struct folio *folio,
d00181b9 1619 unsigned int order) { }
944d9fec
LC
1620#endif
1621
32c87719
MK
1622static inline void __clear_hugetlb_destructor(struct hstate *h,
1623 struct folio *folio)
1624{
1625 lockdep_assert_held(&hugetlb_lock);
1626
9c5ccf2d 1627 folio_clear_hugetlb(folio);
32c87719
MK
1628}
1629
6eb4e88a 1630/*
32c87719
MK
1631 * Remove hugetlb folio from lists.
1632 * If vmemmap exists for the folio, update dtor so that the folio appears
1633 * as just a compound page. Otherwise, wait until after allocating vmemmap
1634 * to update dtor.
34d9e35b 1635 *
cfd5082b 1636 * A reference is held on the folio, except in the case of demote.
6eb4e88a
MK
1637 *
1638 * Must be called with hugetlb lock held.
1639 */
cfd5082b 1640static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
34d9e35b
MK
1641 bool adjust_surplus,
1642 bool demote)
6eb4e88a 1643{
cfd5082b 1644 int nid = folio_nid(folio);
6eb4e88a 1645
f074732d
SK
1646 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1647 VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
6eb4e88a 1648
9487ca60 1649 lockdep_assert_held(&hugetlb_lock);
6eb4e88a
MK
1650 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1651 return;
1652
cfd5082b 1653 list_del(&folio->lru);
6eb4e88a 1654
cfd5082b 1655 if (folio_test_hugetlb_freed(folio)) {
6eb4e88a
MK
1656 h->free_huge_pages--;
1657 h->free_huge_pages_node[nid]--;
1658 }
1659 if (adjust_surplus) {
1660 h->surplus_huge_pages--;
1661 h->surplus_huge_pages_node[nid]--;
1662 }
1663
e32d20c0 1664 /*
32c87719
MK
1665 * We can only clear the hugetlb destructor after allocating vmemmap
1666 * pages. Otherwise, someone (memory error handling) may try to write
1667 * to tail struct pages.
1668 */
1669 if (!folio_test_hugetlb_vmemmap_optimized(folio))
1670 __clear_hugetlb_destructor(h, folio);
1671
1672 /*
1673 * In the case of demote we do not ref count the page as it will soon
1674 * be turned into a page of smaller size.
e32d20c0 1675 */
34d9e35b 1676 if (!demote)
cfd5082b 1677 folio_ref_unfreeze(folio, 1);
6eb4e88a
MK
1678
1679 h->nr_huge_pages--;
1680 h->nr_huge_pages_node[nid]--;
1681}
1682
cfd5082b 1683static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
34d9e35b
MK
1684 bool adjust_surplus)
1685{
cfd5082b 1686 __remove_hugetlb_folio(h, folio, adjust_surplus, false);
34d9e35b
MK
1687}
1688
cfd5082b 1689static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
8531fc6f
MK
1690 bool adjust_surplus)
1691{
cfd5082b 1692 __remove_hugetlb_folio(h, folio, adjust_surplus, true);
8531fc6f
MK
1693}
1694
2f6c57d6 1695static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
ad2fa371
MS
1696 bool adjust_surplus)
1697{
1698 int zeroed;
2f6c57d6 1699 int nid = folio_nid(folio);
ad2fa371 1700
2f6c57d6 1701 VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
ad2fa371
MS
1702
1703 lockdep_assert_held(&hugetlb_lock);
1704
2f6c57d6 1705 INIT_LIST_HEAD(&folio->lru);
ad2fa371
MS
1706 h->nr_huge_pages++;
1707 h->nr_huge_pages_node[nid]++;
1708
1709 if (adjust_surplus) {
1710 h->surplus_huge_pages++;
1711 h->surplus_huge_pages_node[nid]++;
1712 }
1713
9c5ccf2d 1714 folio_set_hugetlb(folio);
2f6c57d6 1715 folio_change_private(folio, NULL);
a9e1eab2 1716 /*
2f6c57d6
SK
1717 * We have to set hugetlb_vmemmap_optimized again as above
1718 * folio_change_private(folio, NULL) cleared it.
a9e1eab2 1719 */
2f6c57d6 1720 folio_set_hugetlb_vmemmap_optimized(folio);
ad2fa371
MS
1721
1722 /*
2f6c57d6 1723 * This folio is about to be managed by the hugetlb allocator and
b65a4eda
MK
1724 * should have no users. Drop our reference, and check for others
1725 * just in case.
ad2fa371 1726 */
2f6c57d6
SK
1727 zeroed = folio_put_testzero(folio);
1728 if (unlikely(!zeroed))
b65a4eda 1729 /*
454a00c4
MWO
1730 * It is VERY unlikely soneone else has taken a ref
1731 * on the folio. In this case, we simply return as
1732 * free_huge_folio() will be called when this other ref
1733 * is dropped.
b65a4eda
MK
1734 */
1735 return;
1736
2f6c57d6 1737 arch_clear_hugepage_flags(&folio->page);
240d67a8 1738 enqueue_hugetlb_folio(h, folio);
ad2fa371
MS
1739}
1740
6f6956cf
SK
1741static void __update_and_free_hugetlb_folio(struct hstate *h,
1742 struct folio *folio)
6af2acb6 1743{
32c87719 1744 bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
a5516438 1745
4eb0716e 1746 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
944d9fec 1747 return;
18229df5 1748
161df60e
NH
1749 /*
1750 * If we don't know which subpages are hwpoisoned, we can't free
1751 * the hugepage, so it's leaked intentionally.
1752 */
7f325a8d 1753 if (folio_test_hugetlb_raw_hwp_unreliable(folio))
161df60e
NH
1754 return;
1755
d8f5f7e4
MK
1756 /*
1757 * If folio is not vmemmap optimized (!clear_dtor), then the folio
c5ad3233 1758 * is no longer identified as a hugetlb page. hugetlb_vmemmap_restore_folio
d8f5f7e4
MK
1759 * can only be passed hugetlb pages and will BUG otherwise.
1760 */
c5ad3233 1761 if (clear_dtor && hugetlb_vmemmap_restore_folio(h, folio)) {
ad2fa371
MS
1762 spin_lock_irq(&hugetlb_lock);
1763 /*
1764 * If we cannot allocate vmemmap pages, just refuse to free the
1765 * page and put the page back on the hugetlb free list and treat
1766 * as a surplus page.
1767 */
7f325a8d 1768 add_hugetlb_folio(h, folio, true);
ad2fa371
MS
1769 spin_unlock_irq(&hugetlb_lock);
1770 return;
1771 }
1772
161df60e
NH
1773 /*
1774 * Move PageHWPoison flag from head page to the raw error pages,
1775 * which makes any healthy subpages reusable.
1776 */
911565b8 1777 if (unlikely(folio_test_hwpoison(folio)))
2ff6cece 1778 folio_clear_hugetlb_hwpoison(folio);
161df60e 1779
32c87719
MK
1780 /*
1781 * If vmemmap pages were allocated above, then we need to clear the
1782 * hugetlb destructor under the hugetlb lock.
1783 */
1784 if (clear_dtor) {
1785 spin_lock_irq(&hugetlb_lock);
1786 __clear_hugetlb_destructor(h, folio);
1787 spin_unlock_irq(&hugetlb_lock);
1788 }
1789
a01f4390
MK
1790 /*
1791 * Non-gigantic pages demoted from CMA allocated gigantic pages
7f325a8d 1792 * need to be given back to CMA in free_gigantic_folio.
a01f4390
MK
1793 */
1794 if (hstate_is_gigantic(h) ||
2f6c57d6 1795 hugetlb_cma_folio(folio, huge_page_order(h))) {
911565b8 1796 destroy_compound_gigantic_folio(folio, huge_page_order(h));
7f325a8d 1797 free_gigantic_folio(folio, huge_page_order(h));
944d9fec 1798 } else {
6f6956cf 1799 __free_pages(&folio->page, huge_page_order(h));
944d9fec 1800 }
6af2acb6
AL
1801}
1802
b65d4adb 1803/*
d6ef19e2 1804 * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
b65d4adb
MS
1805 * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1806 * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1807 * the vmemmap pages.
1808 *
1809 * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1810 * freed and frees them one-by-one. As the page->mapping pointer is going
1811 * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1812 * structure of a lockless linked list of huge pages to be freed.
1813 */
1814static LLIST_HEAD(hpage_freelist);
1815
1816static void free_hpage_workfn(struct work_struct *work)
1817{
1818 struct llist_node *node;
1819
1820 node = llist_del_all(&hpage_freelist);
1821
1822 while (node) {
3ec145f9 1823 struct folio *folio;
b65d4adb
MS
1824 struct hstate *h;
1825
3ec145f9
MWO
1826 folio = container_of((struct address_space **)node,
1827 struct folio, mapping);
b65d4adb 1828 node = node->next;
3ec145f9 1829 folio->mapping = NULL;
b65d4adb 1830 /*
affd26b1
SK
1831 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1832 * folio_hstate() is going to trigger because a previous call to
9c5ccf2d
MWO
1833 * remove_hugetlb_folio() will clear the hugetlb bit, so do
1834 * not use folio_hstate() directly.
b65d4adb 1835 */
3ec145f9 1836 h = size_to_hstate(folio_size(folio));
b65d4adb 1837
3ec145f9 1838 __update_and_free_hugetlb_folio(h, folio);
b65d4adb
MS
1839
1840 cond_resched();
1841 }
1842}
1843static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1844
1845static inline void flush_free_hpage_work(struct hstate *h)
1846{
6213834c 1847 if (hugetlb_vmemmap_optimizable(h))
b65d4adb
MS
1848 flush_work(&free_hpage_work);
1849}
1850
d6ef19e2 1851static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
b65d4adb
MS
1852 bool atomic)
1853{
d6ef19e2 1854 if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
6f6956cf 1855 __update_and_free_hugetlb_folio(h, folio);
b65d4adb
MS
1856 return;
1857 }
1858
1859 /*
1860 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1861 *
1862 * Only call schedule_work() if hpage_freelist is previously
1863 * empty. Otherwise, schedule_work() had been called but the workfn
1864 * hasn't retrieved the list yet.
1865 */
d6ef19e2 1866 if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
b65d4adb
MS
1867 schedule_work(&free_hpage_work);
1868}
1869
cfb8c750
MK
1870static void bulk_vmemmap_restore_error(struct hstate *h,
1871 struct list_head *folio_list,
1872 struct list_head *non_hvo_folios)
10c6ec49 1873{
04bbfd84 1874 struct folio *folio, *t_folio;
10c6ec49 1875
cfb8c750
MK
1876 if (!list_empty(non_hvo_folios)) {
1877 /*
1878 * Free any restored hugetlb pages so that restore of the
1879 * entire list can be retried.
1880 * The idea is that in the common case of ENOMEM errors freeing
1881 * hugetlb pages with vmemmap we will free up memory so that we
1882 * can allocate vmemmap for more hugetlb pages.
1883 */
1884 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1885 list_del(&folio->lru);
1886 spin_lock_irq(&hugetlb_lock);
1887 __clear_hugetlb_destructor(h, folio);
1888 spin_unlock_irq(&hugetlb_lock);
1889 update_and_free_hugetlb_folio(h, folio, false);
1890 cond_resched();
1891 }
1892 } else {
1893 /*
1894 * In the case where there are no folios which can be
1895 * immediately freed, we loop through the list trying to restore
1896 * vmemmap individually in the hope that someone elsewhere may
1897 * have done something to cause success (such as freeing some
1898 * memory). If unable to restore a hugetlb page, the hugetlb
1899 * page is made a surplus page and removed from the list.
1900 * If are able to restore vmemmap and free one hugetlb page, we
1901 * quit processing the list to retry the bulk operation.
1902 */
1903 list_for_each_entry_safe(folio, t_folio, folio_list, lru)
c5ad3233 1904 if (hugetlb_vmemmap_restore_folio(h, folio)) {
cfb8c750 1905 list_del(&folio->lru);
d2cf88c2
MK
1906 spin_lock_irq(&hugetlb_lock);
1907 add_hugetlb_folio(h, folio, true);
1908 spin_unlock_irq(&hugetlb_lock);
cfb8c750
MK
1909 } else {
1910 list_del(&folio->lru);
1911 spin_lock_irq(&hugetlb_lock);
1912 __clear_hugetlb_destructor(h, folio);
1913 spin_unlock_irq(&hugetlb_lock);
1914 update_and_free_hugetlb_folio(h, folio, false);
1915 cond_resched();
1916 break;
1917 }
d2cf88c2 1918 }
cfb8c750
MK
1919}
1920
1921static void update_and_free_pages_bulk(struct hstate *h,
1922 struct list_head *folio_list)
1923{
1924 long ret;
1925 struct folio *folio, *t_folio;
1926 LIST_HEAD(non_hvo_folios);
d2cf88c2
MK
1927
1928 /*
cfb8c750
MK
1929 * First allocate required vmemmmap (if necessary) for all folios.
1930 * Carefully handle errors and free up any available hugetlb pages
1931 * in an effort to make forward progress.
d2cf88c2 1932 */
cfb8c750
MK
1933retry:
1934 ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1935 if (ret < 0) {
1936 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1937 goto retry;
1938 }
1939
1940 /*
1941 * At this point, list should be empty, ret should be >= 0 and there
1942 * should only be pages on the non_hvo_folios list.
1943 * Do note that the non_hvo_folios list could be empty.
1944 * Without HVO enabled, ret will be 0 and there is no need to call
1945 * __clear_hugetlb_destructor as this was done previously.
1946 */
1947 VM_WARN_ON(!list_empty(folio_list));
1948 VM_WARN_ON(ret < 0);
1949 if (!list_empty(&non_hvo_folios) && ret) {
d2cf88c2 1950 spin_lock_irq(&hugetlb_lock);
cfb8c750 1951 list_for_each_entry(folio, &non_hvo_folios, lru)
d2cf88c2
MK
1952 __clear_hugetlb_destructor(h, folio);
1953 spin_unlock_irq(&hugetlb_lock);
1954 }
1955
cfb8c750 1956 list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
d6ef19e2 1957 update_and_free_hugetlb_folio(h, folio, false);
10c6ec49
MK
1958 cond_resched();
1959 }
1960}
1961
e5ff2159
AK
1962struct hstate *size_to_hstate(unsigned long size)
1963{
1964 struct hstate *h;
1965
1966 for_each_hstate(h) {
1967 if (huge_page_size(h) == size)
1968 return h;
1969 }
1970 return NULL;
1971}
1972
454a00c4 1973void free_huge_folio(struct folio *folio)
27a85ef1 1974{
a5516438
AK
1975 /*
1976 * Can't pass hstate in here because it is called from the
1977 * compound page destructor.
1978 */
0356c4b9
SK
1979 struct hstate *h = folio_hstate(folio);
1980 int nid = folio_nid(folio);
1981 struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
07443a85 1982 bool restore_reserve;
db71ef79 1983 unsigned long flags;
27a85ef1 1984
0356c4b9
SK
1985 VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1986 VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
8ace22bc 1987
0356c4b9
SK
1988 hugetlb_set_folio_subpool(folio, NULL);
1989 if (folio_test_anon(folio))
1990 __ClearPageAnonExclusive(&folio->page);
1991 folio->mapping = NULL;
1992 restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1993 folio_clear_hugetlb_restore_reserve(folio);
27a85ef1 1994
1c5ecae3 1995 /*
d6995da3 1996 * If HPageRestoreReserve was set on page, page allocation consumed a
0919e1b6
MK
1997 * reservation. If the page was associated with a subpool, there
1998 * would have been a page reserved in the subpool before allocation
1999 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
6c26d310 2000 * reservation, do not call hugepage_subpool_put_pages() as this will
0919e1b6 2001 * remove the reserved page from the subpool.
1c5ecae3 2002 */
0919e1b6
MK
2003 if (!restore_reserve) {
2004 /*
2005 * A return code of zero implies that the subpool will be
2006 * under its minimum size if the reservation is not restored
2007 * after page is free. Therefore, force restore_reserve
2008 * operation.
2009 */
2010 if (hugepage_subpool_put_pages(spool, 1) == 0)
2011 restore_reserve = true;
2012 }
1c5ecae3 2013
db71ef79 2014 spin_lock_irqsave(&hugetlb_lock, flags);
0356c4b9 2015 folio_clear_hugetlb_migratable(folio);
d4ab0316
SK
2016 hugetlb_cgroup_uncharge_folio(hstate_index(h),
2017 pages_per_huge_page(h), folio);
2018 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
2019 pages_per_huge_page(h), folio);
8cba9576 2020 mem_cgroup_uncharge(folio);
07443a85
JK
2021 if (restore_reserve)
2022 h->resv_huge_pages++;
2023
0356c4b9 2024 if (folio_test_hugetlb_temporary(folio)) {
cfd5082b 2025 remove_hugetlb_folio(h, folio, false);
db71ef79 2026 spin_unlock_irqrestore(&hugetlb_lock, flags);
d6ef19e2 2027 update_and_free_hugetlb_folio(h, folio, true);
ab5ac90a 2028 } else if (h->surplus_huge_pages_node[nid]) {
0edaecfa 2029 /* remove the page from active list */
cfd5082b 2030 remove_hugetlb_folio(h, folio, true);
db71ef79 2031 spin_unlock_irqrestore(&hugetlb_lock, flags);
d6ef19e2 2032 update_and_free_hugetlb_folio(h, folio, true);
7893d1d5 2033 } else {
454a00c4 2034 arch_clear_hugepage_flags(&folio->page);
240d67a8 2035 enqueue_hugetlb_folio(h, folio);
db71ef79 2036 spin_unlock_irqrestore(&hugetlb_lock, flags);
c77c0a8a 2037 }
c77c0a8a
WL
2038}
2039
d3d99fcc
OS
2040/*
2041 * Must be called with the hugetlb lock held
2042 */
2043static void __prep_account_new_huge_page(struct hstate *h, int nid)
2044{
2045 lockdep_assert_held(&hugetlb_lock);
2046 h->nr_huge_pages++;
2047 h->nr_huge_pages_node[nid]++;
2048}
2049
d67e32f2 2050static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
b7ba30c6 2051{
d8f5f7e4 2052 folio_set_hugetlb(folio);
de656ed3 2053 INIT_LIST_HEAD(&folio->lru);
de656ed3
SK
2054 hugetlb_set_folio_subpool(folio, NULL);
2055 set_hugetlb_cgroup(folio, NULL);
2056 set_hugetlb_cgroup_rsvd(folio, NULL);
d3d99fcc
OS
2057}
2058
d67e32f2
MK
2059static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
2060{
2061 init_new_hugetlb_folio(h, folio);
c5ad3233 2062 hugetlb_vmemmap_optimize_folio(h, folio);
d67e32f2
MK
2063}
2064
d1c60955 2065static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
d3d99fcc 2066{
de656ed3 2067 __prep_new_hugetlb_folio(h, folio);
db71ef79 2068 spin_lock_irq(&hugetlb_lock);
d3d99fcc 2069 __prep_account_new_huge_page(h, nid);
db71ef79 2070 spin_unlock_irq(&hugetlb_lock);
b7ba30c6
AK
2071}
2072
d1c60955
SK
2073static bool __prep_compound_gigantic_folio(struct folio *folio,
2074 unsigned int order, bool demote)
20a0307c 2075{
7118fc29 2076 int i, j;
20a0307c 2077 int nr_pages = 1 << order;
14455eab 2078 struct page *p;
20a0307c 2079
d1c60955 2080 __folio_clear_reserved(folio);
2b21624f 2081 for (i = 0; i < nr_pages; i++) {
d1c60955 2082 p = folio_page(folio, i);
14455eab 2083
ef5a22be
AA
2084 /*
2085 * For gigantic hugepages allocated through bootmem at
2086 * boot, it's safer to be consistent with the not-gigantic
2087 * hugepages and clear the PG_reserved bit from all tail pages
7c8de358 2088 * too. Otherwise drivers using get_user_pages() to access tail
ef5a22be
AA
2089 * pages may get the reference counting wrong if they see
2090 * PG_reserved set on a tail page (despite the head page not
2091 * having PG_reserved set). Enforcing this consistency between
2092 * head and tail pages allows drivers to optimize away a check
2093 * on the head page when they need know if put_page() is needed
2094 * after get_user_pages().
2095 */
7fb0728a
MK
2096 if (i != 0) /* head page cleared above */
2097 __ClearPageReserved(p);
7118fc29
MK
2098 /*
2099 * Subtle and very unlikely
2100 *
2101 * Gigantic 'page allocators' such as memblock or cma will
2102 * return a set of pages with each page ref counted. We need
2103 * to turn this set of pages into a compound page with tail
2104 * page ref counts set to zero. Code such as speculative page
2105 * cache adding could take a ref on a 'to be' tail page.
2106 * We need to respect any increased ref count, and only set
2107 * the ref count to zero if count is currently 1. If count
416d85ed
MK
2108 * is not 1, we return an error. An error return indicates
2109 * the set of pages can not be converted to a gigantic page.
2110 * The caller who allocated the pages should then discard the
2111 * pages using the appropriate free interface.
34d9e35b
MK
2112 *
2113 * In the case of demote, the ref count will be zero.
7118fc29 2114 */
34d9e35b
MK
2115 if (!demote) {
2116 if (!page_ref_freeze(p, 1)) {
2117 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
2118 goto out_error;
2119 }
2120 } else {
2121 VM_BUG_ON_PAGE(page_count(p), p);
7118fc29 2122 }
2b21624f 2123 if (i != 0)
d1c60955 2124 set_compound_head(p, &folio->page);
20a0307c 2125 }
e3b7bf97
TS
2126 __folio_set_head(folio);
2127 /* we rely on prep_new_hugetlb_folio to set the destructor */
2128 folio_set_order(folio, order);
46f27228 2129 atomic_set(&folio->_entire_mapcount, -1);
eec20426 2130 atomic_set(&folio->_nr_pages_mapped, 0);
94688e8e 2131 atomic_set(&folio->_pincount, 0);
7118fc29
MK
2132 return true;
2133
2134out_error:
2b21624f
MK
2135 /* undo page modifications made above */
2136 for (j = 0; j < i; j++) {
d1c60955 2137 p = folio_page(folio, j);
2b21624f
MK
2138 if (j != 0)
2139 clear_compound_head(p);
7118fc29
MK
2140 set_page_refcounted(p);
2141 }
2142 /* need to clear PG_reserved on remaining tail pages */
14455eab 2143 for (; j < nr_pages; j++) {
d1c60955 2144 p = folio_page(folio, j);
7118fc29 2145 __ClearPageReserved(p);
14455eab 2146 }
7118fc29 2147 return false;
20a0307c
WF
2148}
2149
d1c60955
SK
2150static bool prep_compound_gigantic_folio(struct folio *folio,
2151 unsigned int order)
34d9e35b 2152{
d1c60955 2153 return __prep_compound_gigantic_folio(folio, order, false);
34d9e35b
MK
2154}
2155
d1c60955 2156static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
8531fc6f
MK
2157 unsigned int order)
2158{
d1c60955 2159 return __prep_compound_gigantic_folio(folio, order, true);
8531fc6f
MK
2160}
2161
7795912c
AM
2162/*
2163 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
2164 * transparent huge pages. See the PageTransHuge() documentation for more
2165 * details.
2166 */
29cfe755 2167int PageHuge(const struct page *page)
20a0307c 2168{
29cfe755 2169 const struct folio *folio;
2d678c64 2170
20a0307c
WF
2171 if (!PageCompound(page))
2172 return 0;
2d678c64 2173 folio = page_folio(page);
9c5ccf2d 2174 return folio_test_hugetlb(folio);
20a0307c 2175}
43131e14
NH
2176EXPORT_SYMBOL_GPL(PageHuge);
2177
c0d0381a
MK
2178/*
2179 * Find and lock address space (mapping) in write mode.
2180 *
336bf30e
MK
2181 * Upon entry, the page is locked which means that page_mapping() is
2182 * stable. Due to locking order, we can only trylock_write. If we can
2183 * not get the lock, simply return NULL to caller.
c0d0381a
MK
2184 */
2185struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
2186{
336bf30e 2187 struct address_space *mapping = page_mapping(hpage);
c0d0381a 2188
c0d0381a
MK
2189 if (!mapping)
2190 return mapping;
2191
c0d0381a
MK
2192 if (i_mmap_trylock_write(mapping))
2193 return mapping;
2194
336bf30e 2195 return NULL;
c0d0381a
MK
2196}
2197
19fc1a7e 2198static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
f60858f9
MK
2199 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2200 nodemask_t *node_alloc_noretry)
1da177e4 2201{
af0fb9df 2202 int order = huge_page_order(h);
1da177e4 2203 struct page *page;
f60858f9 2204 bool alloc_try_hard = true;
2b21624f 2205 bool retry = true;
f96efd58 2206
f60858f9
MK
2207 /*
2208 * By default we always try hard to allocate the page with
2209 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
2210 * a loop (to adjust global huge page counts) and previous allocation
2211 * failed, do not continue to try hard on the same node. Use the
2212 * node_alloc_noretry bitmap to manage this state information.
2213 */
2214 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
2215 alloc_try_hard = false;
2216 gfp_mask |= __GFP_COMP|__GFP_NOWARN;
2217 if (alloc_try_hard)
2218 gfp_mask |= __GFP_RETRY_MAYFAIL;
af0fb9df
MH
2219 if (nid == NUMA_NO_NODE)
2220 nid = numa_mem_id();
2b21624f 2221retry:
84172f4b 2222 page = __alloc_pages(gfp_mask, order, nid, nmask);
2b21624f
MK
2223
2224 /* Freeze head page */
2225 if (page && !page_ref_freeze(page, 1)) {
2226 __free_pages(page, order);
2227 if (retry) { /* retry once */
2228 retry = false;
2229 goto retry;
2230 }
2231 /* WOW! twice in a row. */
2232 pr_warn("HugeTLB head page unexpected inflated ref count\n");
2233 page = NULL;
2234 }
2235
f60858f9
MK
2236 /*
2237 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
2238 * indicates an overall state change. Clear bit so that we resume
2239 * normal 'try hard' allocations.
2240 */
2241 if (node_alloc_noretry && page && !alloc_try_hard)
2242 node_clear(nid, *node_alloc_noretry);
2243
2244 /*
2245 * If we tried hard to get a page but failed, set bit so that
2246 * subsequent attempts will not try as hard until there is an
2247 * overall state change.
2248 */
2249 if (node_alloc_noretry && !page && alloc_try_hard)
2250 node_set(nid, *node_alloc_noretry);
2251
19fc1a7e
SK
2252 if (!page) {
2253 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
2254 return NULL;
2255 }
2256
2257 __count_vm_event(HTLB_BUDDY_PGALLOC);
2258 return page_folio(page);
63b4613c
NA
2259}
2260
d67e32f2
MK
2261static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h,
2262 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2263 nodemask_t *node_alloc_noretry)
0c397dae 2264{
7f325a8d 2265 struct folio *folio;
7118fc29 2266 bool retry = false;
0c397dae 2267
7118fc29 2268retry:
0c397dae 2269 if (hstate_is_gigantic(h))
19fc1a7e 2270 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
0c397dae 2271 else
19fc1a7e 2272 folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
f60858f9 2273 nid, nmask, node_alloc_noretry);
19fc1a7e 2274 if (!folio)
0c397dae 2275 return NULL;
d67e32f2 2276
7118fc29 2277 if (hstate_is_gigantic(h)) {
d1c60955 2278 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
7118fc29
MK
2279 /*
2280 * Rare failure to convert pages to compound page.
2281 * Free pages and try again - ONCE!
2282 */
7f325a8d 2283 free_gigantic_folio(folio, huge_page_order(h));
7118fc29
MK
2284 if (!retry) {
2285 retry = true;
2286 goto retry;
2287 }
7118fc29
MK
2288 return NULL;
2289 }
2290 }
0c397dae 2291
19fc1a7e 2292 return folio;
0c397dae
MH
2293}
2294
d67e32f2
MK
2295static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
2296 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2297 nodemask_t *node_alloc_noretry)
2298{
2299 struct folio *folio;
2300
2301 folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
2302 node_alloc_noretry);
2303 if (folio)
2304 init_new_hugetlb_folio(h, folio);
2305 return folio;
2306}
2307
af0fb9df 2308/*
d67e32f2
MK
2309 * Common helper to allocate a fresh hugetlb page. All specific allocators
2310 * should use this function to get new hugetlb pages
2311 *
2312 * Note that returned page is 'frozen': ref count of head page and all tail
2313 * pages is zero.
af0fb9df 2314 */
d67e32f2
MK
2315static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
2316 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2317 nodemask_t *node_alloc_noretry)
b2261026 2318{
19fc1a7e 2319 struct folio *folio;
d67e32f2
MK
2320
2321 folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
2322 node_alloc_noretry);
2323 if (!folio)
2324 return NULL;
2325
2326 prep_new_hugetlb_folio(h, folio, folio_nid(folio));
2327 return folio;
2328}
2329
2330static void prep_and_add_allocated_folios(struct hstate *h,
2331 struct list_head *folio_list)
2332{
2333 unsigned long flags;
2334 struct folio *folio, *tmp_f;
2335
79359d6d
MK
2336 /* Send list for bulk vmemmap optimization processing */
2337 hugetlb_vmemmap_optimize_folios(h, folio_list);
2338
d67e32f2
MK
2339 /* Add all new pool pages to free lists in one lock cycle */
2340 spin_lock_irqsave(&hugetlb_lock, flags);
2341 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
2342 __prep_account_new_huge_page(h, folio_nid(folio));
2343 enqueue_hugetlb_folio(h, folio);
2344 }
2345 spin_unlock_irqrestore(&hugetlb_lock, flags);
2346}
2347
2348/*
2349 * Allocates a fresh hugetlb page in a node interleaved manner. The page
2350 * will later be added to the appropriate hugetlb pool.
2351 */
2352static struct folio *alloc_pool_huge_folio(struct hstate *h,
2353 nodemask_t *nodes_allowed,
2e73ff23
GL
2354 nodemask_t *node_alloc_noretry,
2355 int *next_node)
d67e32f2 2356{
af0fb9df 2357 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
d67e32f2 2358 int nr_nodes, node;
b2261026 2359
2e73ff23 2360 for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
d67e32f2
MK
2361 struct folio *folio;
2362
2363 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
19fc1a7e 2364 nodes_allowed, node_alloc_noretry);
d67e32f2
MK
2365 if (folio)
2366 return folio;
b2261026
JK
2367 }
2368
d67e32f2 2369 return NULL;
b2261026
JK
2370}
2371
e8c5c824 2372/*
10c6ec49
MK
2373 * Remove huge page from pool from next node to free. Attempt to keep
2374 * persistent huge pages more or less balanced over allowed nodes.
2375 * This routine only 'removes' the hugetlb page. The caller must make
2376 * an additional call to free the page to low level allocators.
e8c5c824
LS
2377 * Called with hugetlb_lock locked.
2378 */
d5b43e96
MWO
2379static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
2380 nodemask_t *nodes_allowed, bool acct_surplus)
e8c5c824 2381{
b2261026 2382 int nr_nodes, node;
04bbfd84 2383 struct folio *folio = NULL;
e8c5c824 2384
9487ca60 2385 lockdep_assert_held(&hugetlb_lock);
b2261026 2386 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
685f3457
LS
2387 /*
2388 * If we're returning unused surplus pages, only examine
2389 * nodes with surplus pages.
2390 */
b2261026
JK
2391 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2392 !list_empty(&h->hugepage_freelists[node])) {
04bbfd84
MWO
2393 folio = list_entry(h->hugepage_freelists[node].next,
2394 struct folio, lru);
cfd5082b 2395 remove_hugetlb_folio(h, folio, acct_surplus);
9a76db09 2396 break;
e8c5c824 2397 }
b2261026 2398 }
e8c5c824 2399
d5b43e96 2400 return folio;
e8c5c824
LS
2401}
2402
c8721bbb
NH
2403/*
2404 * Dissolve a given free hugepage into free buddy pages. This function does
faf53def
NH
2405 * nothing for in-use hugepages and non-hugepages.
2406 * This function returns values like below:
2407 *
ad2fa371
MS
2408 * -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2409 * when the system is under memory pressure and the feature of
2410 * freeing unused vmemmap pages associated with each hugetlb page
2411 * is enabled.
2412 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
2413 * (allocated or reserved.)
2414 * 0: successfully dissolved free hugepages or the page is not a
2415 * hugepage (considered as already dissolved)
c8721bbb 2416 */
c3114a84 2417int dissolve_free_huge_page(struct page *page)
c8721bbb 2418{
6bc9b564 2419 int rc = -EBUSY;
1a7cdab5 2420 struct folio *folio = page_folio(page);
082d5b6b 2421
7ffddd49 2422retry:
faf53def 2423 /* Not to disrupt normal path by vainly holding hugetlb_lock */
1a7cdab5 2424 if (!folio_test_hugetlb(folio))
faf53def
NH
2425 return 0;
2426
db71ef79 2427 spin_lock_irq(&hugetlb_lock);
1a7cdab5 2428 if (!folio_test_hugetlb(folio)) {
faf53def
NH
2429 rc = 0;
2430 goto out;
2431 }
2432
1a7cdab5
SK
2433 if (!folio_ref_count(folio)) {
2434 struct hstate *h = folio_hstate(folio);
8346d69d 2435 if (!available_huge_pages(h))
082d5b6b 2436 goto out;
7ffddd49
MS
2437
2438 /*
2439 * We should make sure that the page is already on the free list
2440 * when it is dissolved.
2441 */
1a7cdab5 2442 if (unlikely(!folio_test_hugetlb_freed(folio))) {
db71ef79 2443 spin_unlock_irq(&hugetlb_lock);
7ffddd49
MS
2444 cond_resched();
2445
2446 /*
2447 * Theoretically, we should return -EBUSY when we
2448 * encounter this race. In fact, we have a chance
2449 * to successfully dissolve the page if we do a
2450 * retry. Because the race window is quite small.
2451 * If we seize this opportunity, it is an optimization
2452 * for increasing the success rate of dissolving page.
2453 */
2454 goto retry;
2455 }
2456
cfd5082b 2457 remove_hugetlb_folio(h, folio, false);
c1470b33 2458 h->max_huge_pages--;
db71ef79 2459 spin_unlock_irq(&hugetlb_lock);
ad2fa371
MS
2460
2461 /*
d6ef19e2
SK
2462 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2463 * before freeing the page. update_and_free_hugtlb_folio will fail to
ad2fa371
MS
2464 * free the page if it can not allocate required vmemmap. We
2465 * need to adjust max_huge_pages if the page is not freed.
2466 * Attempt to allocate vmemmmap here so that we can take
2467 * appropriate action on failure.
30a89adf
MK
2468 *
2469 * The folio_test_hugetlb check here is because
2470 * remove_hugetlb_folio will clear hugetlb folio flag for
2471 * non-vmemmap optimized hugetlb folios.
ad2fa371 2472 */
30a89adf 2473 if (folio_test_hugetlb(folio)) {
c5ad3233 2474 rc = hugetlb_vmemmap_restore_folio(h, folio);
30a89adf
MK
2475 if (rc) {
2476 spin_lock_irq(&hugetlb_lock);
2477 add_hugetlb_folio(h, folio, false);
2478 h->max_huge_pages++;
2479 goto out;
2480 }
2481 } else
2482 rc = 0;
ad2fa371 2483
30a89adf 2484 update_and_free_hugetlb_folio(h, folio, false);
ad2fa371 2485 return rc;
c8721bbb 2486 }
082d5b6b 2487out:
db71ef79 2488 spin_unlock_irq(&hugetlb_lock);
082d5b6b 2489 return rc;
c8721bbb
NH
2490}
2491
2492/*
2493 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2494 * make specified memory blocks removable from the system.
2247bb33
GS
2495 * Note that this will dissolve a free gigantic hugepage completely, if any
2496 * part of it lies within the given range.
082d5b6b
GS
2497 * Also note that if dissolve_free_huge_page() returns with an error, all
2498 * free hugepages that were dissolved before that error are lost.
c8721bbb 2499 */
082d5b6b 2500int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
c8721bbb 2501{
c8721bbb 2502 unsigned long pfn;
eb03aa00 2503 struct page *page;
082d5b6b 2504 int rc = 0;
dc2628f3
MS
2505 unsigned int order;
2506 struct hstate *h;
c8721bbb 2507
d0177639 2508 if (!hugepages_supported())
082d5b6b 2509 return rc;
d0177639 2510
dc2628f3
MS
2511 order = huge_page_order(&default_hstate);
2512 for_each_hstate(h)
2513 order = min(order, huge_page_order(h));
2514
2515 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
eb03aa00 2516 page = pfn_to_page(pfn);
faf53def
NH
2517 rc = dissolve_free_huge_page(page);
2518 if (rc)
2519 break;
eb03aa00 2520 }
082d5b6b
GS
2521
2522 return rc;
c8721bbb
NH
2523}
2524
ab5ac90a
MH
2525/*
2526 * Allocates a fresh surplus page from the page allocator.
2527 */
3a740e8b
SK
2528static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2529 gfp_t gfp_mask, int nid, nodemask_t *nmask)
7893d1d5 2530{
19fc1a7e 2531 struct folio *folio = NULL;
7893d1d5 2532
bae7f4ae 2533 if (hstate_is_gigantic(h))
aa888a74
AK
2534 return NULL;
2535
db71ef79 2536 spin_lock_irq(&hugetlb_lock);
9980d744
MH
2537 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2538 goto out_unlock;
db71ef79 2539 spin_unlock_irq(&hugetlb_lock);
d1c3fb1f 2540
19fc1a7e
SK
2541 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2542 if (!folio)
0c397dae 2543 return NULL;
d1c3fb1f 2544
db71ef79 2545 spin_lock_irq(&hugetlb_lock);
9980d744
MH
2546 /*
2547 * We could have raced with the pool size change.
2548 * Double check that and simply deallocate the new page
2549 * if we would end up overcommiting the surpluses. Abuse
454a00c4 2550 * temporary page to workaround the nasty free_huge_folio
9980d744
MH
2551 * codeflow
2552 */
2553 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
19fc1a7e 2554 folio_set_hugetlb_temporary(folio);
db71ef79 2555 spin_unlock_irq(&hugetlb_lock);
454a00c4 2556 free_huge_folio(folio);
2bf753e6 2557 return NULL;
7893d1d5 2558 }
9980d744 2559
b65a4eda 2560 h->surplus_huge_pages++;
19fc1a7e 2561 h->surplus_huge_pages_node[folio_nid(folio)]++;
b65a4eda 2562
9980d744 2563out_unlock:
db71ef79 2564 spin_unlock_irq(&hugetlb_lock);
7893d1d5 2565
3a740e8b 2566 return folio;
7893d1d5
AL
2567}
2568
e37d3e83 2569static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
9a4e9f3b 2570 int nid, nodemask_t *nmask)
ab5ac90a 2571{
19fc1a7e 2572 struct folio *folio;
ab5ac90a
MH
2573
2574 if (hstate_is_gigantic(h))
2575 return NULL;
2576
19fc1a7e
SK
2577 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2578 if (!folio)
ab5ac90a
MH
2579 return NULL;
2580
2b21624f 2581 /* fresh huge pages are frozen */
19fc1a7e 2582 folio_ref_unfreeze(folio, 1);
ab5ac90a
MH
2583 /*
2584 * We do not account these pages as surplus because they are only
2585 * temporary and will be released properly on the last reference
2586 */
19fc1a7e 2587 folio_set_hugetlb_temporary(folio);
ab5ac90a 2588
e37d3e83 2589 return folio;
ab5ac90a
MH
2590}
2591
099730d6
DH
2592/*
2593 * Use the VMA's mpolicy to allocate a huge page from the buddy.
2594 */
e0ec90ee 2595static
ff7d853b 2596struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
099730d6
DH
2597 struct vm_area_struct *vma, unsigned long addr)
2598{
3a740e8b 2599 struct folio *folio = NULL;
aaf14e40
MH
2600 struct mempolicy *mpol;
2601 gfp_t gfp_mask = htlb_alloc_mask(h);
2602 int nid;
2603 nodemask_t *nodemask;
2604
2605 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
cfcaa66f
BW
2606 if (mpol_is_preferred_many(mpol)) {
2607 gfp_t gfp = gfp_mask | __GFP_NOWARN;
2608
2609 gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
3a740e8b 2610 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
aaf14e40 2611
cfcaa66f
BW
2612 /* Fallback to all nodes if page==NULL */
2613 nodemask = NULL;
2614 }
2615
3a740e8b
SK
2616 if (!folio)
2617 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
cfcaa66f 2618 mpol_cond_put(mpol);
ff7d853b 2619 return folio;
099730d6
DH
2620}
2621
e37d3e83
SK
2622/* folio migration callback function */
2623struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
d92bbc27 2624 nodemask_t *nmask, gfp_t gfp_mask)
4db9b2ef 2625{
db71ef79 2626 spin_lock_irq(&hugetlb_lock);
8346d69d 2627 if (available_huge_pages(h)) {
a36f1e90 2628 struct folio *folio;
3e59fcb0 2629
a36f1e90
SK
2630 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2631 preferred_nid, nmask);
2632 if (folio) {
db71ef79 2633 spin_unlock_irq(&hugetlb_lock);
e37d3e83 2634 return folio;
4db9b2ef
MH
2635 }
2636 }
db71ef79 2637 spin_unlock_irq(&hugetlb_lock);
4db9b2ef 2638
e37d3e83 2639 return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
4db9b2ef
MH
2640}
2641
e4e574b7 2642/*
25985edc 2643 * Increase the hugetlb pool such that it can accommodate a reservation
e4e574b7
AL
2644 * of size 'delta'.
2645 */
0a4f3d1b 2646static int gather_surplus_pages(struct hstate *h, long delta)
1b2a1e7b 2647 __must_hold(&hugetlb_lock)
e4e574b7 2648{
34665341 2649 LIST_HEAD(surplus_list);
454a00c4 2650 struct folio *folio, *tmp;
0a4f3d1b
LX
2651 int ret;
2652 long i;
2653 long needed, allocated;
28073b02 2654 bool alloc_ok = true;
e4e574b7 2655
9487ca60 2656 lockdep_assert_held(&hugetlb_lock);
a5516438 2657 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
ac09b3a1 2658 if (needed <= 0) {
a5516438 2659 h->resv_huge_pages += delta;
e4e574b7 2660 return 0;
ac09b3a1 2661 }
e4e574b7
AL
2662
2663 allocated = 0;
e4e574b7
AL
2664
2665 ret = -ENOMEM;
2666retry:
db71ef79 2667 spin_unlock_irq(&hugetlb_lock);
e4e574b7 2668 for (i = 0; i < needed; i++) {
3a740e8b 2669 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2b21624f 2670 NUMA_NO_NODE, NULL);
3a740e8b 2671 if (!folio) {
28073b02
HD
2672 alloc_ok = false;
2673 break;
2674 }
3a740e8b 2675 list_add(&folio->lru, &surplus_list);
69ed779a 2676 cond_resched();
e4e574b7 2677 }
28073b02 2678 allocated += i;
e4e574b7
AL
2679
2680 /*
2681 * After retaking hugetlb_lock, we need to recalculate 'needed'
2682 * because either resv_huge_pages or free_huge_pages may have changed.
2683 */
db71ef79 2684 spin_lock_irq(&hugetlb_lock);
a5516438
AK
2685 needed = (h->resv_huge_pages + delta) -
2686 (h->free_huge_pages + allocated);
28073b02
HD
2687 if (needed > 0) {
2688 if (alloc_ok)
2689 goto retry;
2690 /*
2691 * We were not able to allocate enough pages to
2692 * satisfy the entire reservation so we free what
2693 * we've allocated so far.
2694 */
2695 goto free;
2696 }
e4e574b7
AL
2697 /*
2698 * The surplus_list now contains _at_least_ the number of extra pages
25985edc 2699 * needed to accommodate the reservation. Add the appropriate number
e4e574b7 2700 * of pages to the hugetlb pool and free the extras back to the buddy
ac09b3a1
AL
2701 * allocator. Commit the entire reservation here to prevent another
2702 * process from stealing the pages as they are added to the pool but
2703 * before they are reserved.
e4e574b7
AL
2704 */
2705 needed += allocated;
a5516438 2706 h->resv_huge_pages += delta;
e4e574b7 2707 ret = 0;
a9869b83 2708
19fc3f0a 2709 /* Free the needed pages to the hugetlb pool */
454a00c4 2710 list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
19fc3f0a
AL
2711 if ((--needed) < 0)
2712 break;
b65a4eda 2713 /* Add the page to the hugetlb allocator */
454a00c4 2714 enqueue_hugetlb_folio(h, folio);
19fc3f0a 2715 }
28073b02 2716free:
db71ef79 2717 spin_unlock_irq(&hugetlb_lock);
19fc3f0a 2718
b65a4eda
MK
2719 /*
2720 * Free unnecessary surplus pages to the buddy allocator.
454a00c4 2721 * Pages have no ref count, call free_huge_folio directly.
b65a4eda 2722 */
454a00c4
MWO
2723 list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2724 free_huge_folio(folio);
db71ef79 2725 spin_lock_irq(&hugetlb_lock);
e4e574b7
AL
2726
2727 return ret;
2728}
2729
2730/*
e5bbc8a6
MK
2731 * This routine has two main purposes:
2732 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2733 * in unused_resv_pages. This corresponds to the prior adjustments made
2734 * to the associated reservation map.
2735 * 2) Free any unused surplus pages that may have been allocated to satisfy
2736 * the reservation. As many as unused_resv_pages may be freed.
e4e574b7 2737 */
a5516438
AK
2738static void return_unused_surplus_pages(struct hstate *h,
2739 unsigned long unused_resv_pages)
e4e574b7 2740{
e4e574b7 2741 unsigned long nr_pages;
10c6ec49
MK
2742 LIST_HEAD(page_list);
2743
9487ca60 2744 lockdep_assert_held(&hugetlb_lock);
10c6ec49
MK
2745 /* Uncommit the reservation */
2746 h->resv_huge_pages -= unused_resv_pages;
e4e574b7 2747
c0531714 2748 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
e5bbc8a6 2749 goto out;
aa888a74 2750
e5bbc8a6
MK
2751 /*
2752 * Part (or even all) of the reservation could have been backed
2753 * by pre-allocated pages. Only free surplus pages.
2754 */
a5516438 2755 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
e4e574b7 2756
685f3457
LS
2757 /*
2758 * We want to release as many surplus pages as possible, spread
9b5e5d0f
LS
2759 * evenly across all nodes with memory. Iterate across these nodes
2760 * until we can no longer free unreserved surplus pages. This occurs
2761 * when the nodes with surplus pages have no free pages.
d5b43e96 2762 * remove_pool_hugetlb_folio() will balance the freed pages across the
9b5e5d0f 2763 * on-line nodes with memory and will handle the hstate accounting.
685f3457
LS
2764 */
2765 while (nr_pages--) {
d5b43e96
MWO
2766 struct folio *folio;
2767
2768 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2769 if (!folio)
e5bbc8a6 2770 goto out;
10c6ec49 2771
d5b43e96 2772 list_add(&folio->lru, &page_list);
e4e574b7 2773 }
e5bbc8a6
MK
2774
2775out:
db71ef79 2776 spin_unlock_irq(&hugetlb_lock);
10c6ec49 2777 update_and_free_pages_bulk(h, &page_list);
db71ef79 2778 spin_lock_irq(&hugetlb_lock);
e4e574b7
AL
2779}
2780
5e911373 2781
c37f9fb1 2782/*
feba16e2 2783 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
5e911373 2784 * are used by the huge page allocation routines to manage reservations.
cf3ad20b
MK
2785 *
2786 * vma_needs_reservation is called to determine if the huge page at addr
2787 * within the vma has an associated reservation. If a reservation is
2788 * needed, the value 1 is returned. The caller is then responsible for
2789 * managing the global reservation and subpool usage counts. After
2790 * the huge page has been allocated, vma_commit_reservation is called
feba16e2
MK
2791 * to add the page to the reservation map. If the page allocation fails,
2792 * the reservation must be ended instead of committed. vma_end_reservation
2793 * is called in such cases.
cf3ad20b
MK
2794 *
2795 * In the normal case, vma_commit_reservation returns the same value
2796 * as the preceding vma_needs_reservation call. The only time this
2797 * is not the case is if a reserve map was changed between calls. It
2798 * is the responsibility of the caller to notice the difference and
2799 * take appropriate action.
96b96a96
MK
2800 *
2801 * vma_add_reservation is used in error paths where a reservation must
2802 * be restored when a newly allocated huge page must be freed. It is
2803 * to be called after calling vma_needs_reservation to determine if a
2804 * reservation exists.
846be085
MK
2805 *
2806 * vma_del_reservation is used in error paths where an entry in the reserve
2807 * map was created during huge page allocation and must be removed. It is to
2808 * be called after calling vma_needs_reservation to determine if a reservation
2809 * exists.
c37f9fb1 2810 */
5e911373
MK
2811enum vma_resv_mode {
2812 VMA_NEEDS_RESV,
2813 VMA_COMMIT_RESV,
feba16e2 2814 VMA_END_RESV,
96b96a96 2815 VMA_ADD_RESV,
846be085 2816 VMA_DEL_RESV,
5e911373 2817};
cf3ad20b
MK
2818static long __vma_reservation_common(struct hstate *h,
2819 struct vm_area_struct *vma, unsigned long addr,
5e911373 2820 enum vma_resv_mode mode)
c37f9fb1 2821{
4e35f483
JK
2822 struct resv_map *resv;
2823 pgoff_t idx;
cf3ad20b 2824 long ret;
0db9d74e 2825 long dummy_out_regions_needed;
c37f9fb1 2826
4e35f483
JK
2827 resv = vma_resv_map(vma);
2828 if (!resv)
84afd99b 2829 return 1;
c37f9fb1 2830
4e35f483 2831 idx = vma_hugecache_offset(h, vma, addr);
5e911373
MK
2832 switch (mode) {
2833 case VMA_NEEDS_RESV:
0db9d74e
MA
2834 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2835 /* We assume that vma_reservation_* routines always operate on
2836 * 1 page, and that adding to resv map a 1 page entry can only
2837 * ever require 1 region.
2838 */
2839 VM_BUG_ON(dummy_out_regions_needed != 1);
5e911373
MK
2840 break;
2841 case VMA_COMMIT_RESV:
075a61d0 2842 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
0db9d74e
MA
2843 /* region_add calls of range 1 should never fail. */
2844 VM_BUG_ON(ret < 0);
5e911373 2845 break;
feba16e2 2846 case VMA_END_RESV:
0db9d74e 2847 region_abort(resv, idx, idx + 1, 1);
5e911373
MK
2848 ret = 0;
2849 break;
96b96a96 2850 case VMA_ADD_RESV:
0db9d74e 2851 if (vma->vm_flags & VM_MAYSHARE) {
075a61d0 2852 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
0db9d74e
MA
2853 /* region_add calls of range 1 should never fail. */
2854 VM_BUG_ON(ret < 0);
2855 } else {
2856 region_abort(resv, idx, idx + 1, 1);
96b96a96
MK
2857 ret = region_del(resv, idx, idx + 1);
2858 }
2859 break;
846be085
MK
2860 case VMA_DEL_RESV:
2861 if (vma->vm_flags & VM_MAYSHARE) {
2862 region_abort(resv, idx, idx + 1, 1);
2863 ret = region_del(resv, idx, idx + 1);
2864 } else {
2865 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2866 /* region_add calls of range 1 should never fail. */
2867 VM_BUG_ON(ret < 0);
2868 }
2869 break;
5e911373
MK
2870 default:
2871 BUG();
2872 }
84afd99b 2873
846be085 2874 if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
cf3ad20b 2875 return ret;
bf3d12b9
ML
2876 /*
2877 * We know private mapping must have HPAGE_RESV_OWNER set.
2878 *
2879 * In most cases, reserves always exist for private mappings.
2880 * However, a file associated with mapping could have been
2881 * hole punched or truncated after reserves were consumed.
2882 * As subsequent fault on such a range will not use reserves.
2883 * Subtle - The reserve map for private mappings has the
2884 * opposite meaning than that of shared mappings. If NO
2885 * entry is in the reserve map, it means a reservation exists.
2886 * If an entry exists in the reserve map, it means the
2887 * reservation has already been consumed. As a result, the
2888 * return value of this routine is the opposite of the
2889 * value returned from reserve map manipulation routines above.
2890 */
2891 if (ret > 0)
2892 return 0;
2893 if (ret == 0)
2894 return 1;
2895 return ret;
c37f9fb1 2896}
cf3ad20b
MK
2897
2898static long vma_needs_reservation(struct hstate *h,
a5516438 2899 struct vm_area_struct *vma, unsigned long addr)
c37f9fb1 2900{
5e911373 2901 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
cf3ad20b 2902}
84afd99b 2903
cf3ad20b
MK
2904static long vma_commit_reservation(struct hstate *h,
2905 struct vm_area_struct *vma, unsigned long addr)
2906{
5e911373
MK
2907 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2908}
2909
feba16e2 2910static void vma_end_reservation(struct hstate *h,
5e911373
MK
2911 struct vm_area_struct *vma, unsigned long addr)
2912{
feba16e2 2913 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
c37f9fb1
AW
2914}
2915
96b96a96
MK
2916static long vma_add_reservation(struct hstate *h,
2917 struct vm_area_struct *vma, unsigned long addr)
2918{
2919 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2920}
2921
846be085
MK
2922static long vma_del_reservation(struct hstate *h,
2923 struct vm_area_struct *vma, unsigned long addr)
2924{
2925 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2926}
2927
96b96a96 2928/*
846be085 2929 * This routine is called to restore reservation information on error paths.
d0ce0e47
SK
2930 * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2931 * and the hugetlb mutex should remain held when calling this routine.
846be085
MK
2932 *
2933 * It handles two specific cases:
d2d7bb44
SK
2934 * 1) A reservation was in place and the folio consumed the reservation.
2935 * hugetlb_restore_reserve is set in the folio.
2936 * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
d0ce0e47 2937 * not set. However, alloc_hugetlb_folio always updates the reserve map.
846be085 2938 *
454a00c4
MWO
2939 * In case 1, free_huge_folio later in the error path will increment the
2940 * global reserve count. But, free_huge_folio does not have enough context
846be085
MK
2941 * to adjust the reservation map. This case deals primarily with private
2942 * mappings. Adjust the reserve map here to be consistent with global
454a00c4 2943 * reserve count adjustments to be made by free_huge_folio. Make sure the
846be085
MK
2944 * reserve map indicates there is a reservation present.
2945 *
d0ce0e47 2946 * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
96b96a96 2947 */
846be085 2948void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
d2d7bb44 2949 unsigned long address, struct folio *folio)
96b96a96 2950{
846be085 2951 long rc = vma_needs_reservation(h, vma, address);
96b96a96 2952
0ffdc38e 2953 if (folio_test_hugetlb_restore_reserve(folio)) {
846be085 2954 if (unlikely(rc < 0))
96b96a96
MK
2955 /*
2956 * Rare out of memory condition in reserve map
0ffdc38e
SK
2957 * manipulation. Clear hugetlb_restore_reserve so
2958 * that global reserve count will not be incremented
454a00c4 2959 * by free_huge_folio. This will make it appear
0ffdc38e 2960 * as though the reservation for this folio was
96b96a96 2961 * consumed. This may prevent the task from
0ffdc38e 2962 * faulting in the folio at a later time. This
96b96a96
MK
2963 * is better than inconsistent global huge page
2964 * accounting of reserve counts.
2965 */
0ffdc38e 2966 folio_clear_hugetlb_restore_reserve(folio);
846be085
MK
2967 else if (rc)
2968 (void)vma_add_reservation(h, vma, address);
2969 else
2970 vma_end_reservation(h, vma, address);
2971 } else {
2972 if (!rc) {
2973 /*
2974 * This indicates there is an entry in the reserve map
d0ce0e47
SK
2975 * not added by alloc_hugetlb_folio. We know it was added
2976 * before the alloc_hugetlb_folio call, otherwise
0ffdc38e 2977 * hugetlb_restore_reserve would be set on the folio.
846be085
MK
2978 * Remove the entry so that a subsequent allocation
2979 * does not consume a reservation.
2980 */
2981 rc = vma_del_reservation(h, vma, address);
2982 if (rc < 0)
96b96a96 2983 /*
846be085
MK
2984 * VERY rare out of memory condition. Since
2985 * we can not delete the entry, set
0ffdc38e
SK
2986 * hugetlb_restore_reserve so that the reserve
2987 * count will be incremented when the folio
846be085
MK
2988 * is freed. This reserve will be consumed
2989 * on a subsequent allocation.
96b96a96 2990 */
0ffdc38e 2991 folio_set_hugetlb_restore_reserve(folio);
846be085
MK
2992 } else if (rc < 0) {
2993 /*
2994 * Rare out of memory condition from
2995 * vma_needs_reservation call. Memory allocation is
2996 * only attempted if a new entry is needed. Therefore,
2997 * this implies there is not an entry in the
2998 * reserve map.
2999 *
3000 * For shared mappings, no entry in the map indicates
3001 * no reservation. We are done.
3002 */
3003 if (!(vma->vm_flags & VM_MAYSHARE))
3004 /*
3005 * For private mappings, no entry indicates
3006 * a reservation is present. Since we can
0ffdc38e
SK
3007 * not add an entry, set hugetlb_restore_reserve
3008 * on the folio so reserve count will be
846be085
MK
3009 * incremented when freed. This reserve will
3010 * be consumed on a subsequent allocation.
3011 */
0ffdc38e 3012 folio_set_hugetlb_restore_reserve(folio);
96b96a96 3013 } else
846be085
MK
3014 /*
3015 * No reservation present, do nothing
3016 */
3017 vma_end_reservation(h, vma, address);
96b96a96
MK
3018 }
3019}
3020
369fa227 3021/*
19fc1a7e
SK
3022 * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
3023 * the old one
369fa227 3024 * @h: struct hstate old page belongs to
19fc1a7e 3025 * @old_folio: Old folio to dissolve
ae37c7ff 3026 * @list: List to isolate the page in case we need to
369fa227
OS
3027 * Returns 0 on success, otherwise negated error.
3028 */
19fc1a7e
SK
3029static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
3030 struct folio *old_folio, struct list_head *list)
369fa227
OS
3031{
3032 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
de656ed3 3033 int nid = folio_nid(old_folio);
831bc31a 3034 struct folio *new_folio = NULL;
369fa227
OS
3035 int ret = 0;
3036
369fa227
OS
3037retry:
3038 spin_lock_irq(&hugetlb_lock);
de656ed3 3039 if (!folio_test_hugetlb(old_folio)) {
369fa227 3040 /*
19fc1a7e 3041 * Freed from under us. Drop new_folio too.
369fa227
OS
3042 */
3043 goto free_new;
de656ed3 3044 } else if (folio_ref_count(old_folio)) {
9747b9e9
BW
3045 bool isolated;
3046
369fa227 3047 /*
19fc1a7e 3048 * Someone has grabbed the folio, try to isolate it here.
ae37c7ff 3049 * Fail with -EBUSY if not possible.
369fa227 3050 */
ae37c7ff 3051 spin_unlock_irq(&hugetlb_lock);
9747b9e9
BW
3052 isolated = isolate_hugetlb(old_folio, list);
3053 ret = isolated ? 0 : -EBUSY;
ae37c7ff 3054 spin_lock_irq(&hugetlb_lock);
369fa227 3055 goto free_new;
de656ed3 3056 } else if (!folio_test_hugetlb_freed(old_folio)) {
369fa227 3057 /*
19fc1a7e 3058 * Folio's refcount is 0 but it has not been enqueued in the
369fa227
OS
3059 * freelist yet. Race window is small, so we can succeed here if
3060 * we retry.
3061 */
3062 spin_unlock_irq(&hugetlb_lock);
3063 cond_resched();
3064 goto retry;
3065 } else {
831bc31a
BW
3066 if (!new_folio) {
3067 spin_unlock_irq(&hugetlb_lock);
3068 new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
3069 NULL, NULL);
3070 if (!new_folio)
3071 return -ENOMEM;
3072 __prep_new_hugetlb_folio(h, new_folio);
3073 goto retry;
3074 }
3075
369fa227 3076 /*
19fc1a7e 3077 * Ok, old_folio is still a genuine free hugepage. Remove it from
369fa227
OS
3078 * the freelist and decrease the counters. These will be
3079 * incremented again when calling __prep_account_new_huge_page()
240d67a8
SK
3080 * and enqueue_hugetlb_folio() for new_folio. The counters will
3081 * remain stable since this happens under the lock.
369fa227 3082 */
cfd5082b 3083 remove_hugetlb_folio(h, old_folio, false);
369fa227
OS
3084
3085 /*
19fc1a7e 3086 * Ref count on new_folio is already zero as it was dropped
b65a4eda 3087 * earlier. It can be directly added to the pool free list.
369fa227 3088 */
369fa227 3089 __prep_account_new_huge_page(h, nid);
240d67a8 3090 enqueue_hugetlb_folio(h, new_folio);
369fa227
OS
3091
3092 /*
19fc1a7e 3093 * Folio has been replaced, we can safely free the old one.
369fa227
OS
3094 */
3095 spin_unlock_irq(&hugetlb_lock);
d6ef19e2 3096 update_and_free_hugetlb_folio(h, old_folio, false);
369fa227
OS
3097 }
3098
3099 return ret;
3100
3101free_new:
3102 spin_unlock_irq(&hugetlb_lock);
831bc31a
BW
3103 if (new_folio) {
3104 /* Folio has a zero ref count, but needs a ref to be freed */
3105 folio_ref_unfreeze(new_folio, 1);
3106 update_and_free_hugetlb_folio(h, new_folio, false);
3107 }
369fa227
OS
3108
3109 return ret;
3110}
3111
ae37c7ff 3112int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
369fa227
OS
3113{
3114 struct hstate *h;
d5e33bd8 3115 struct folio *folio = page_folio(page);
ae37c7ff 3116 int ret = -EBUSY;
369fa227
OS
3117
3118 /*
3119 * The page might have been dissolved from under our feet, so make sure
3120 * to carefully check the state under the lock.
3121 * Return success when racing as if we dissolved the page ourselves.
3122 */
3123 spin_lock_irq(&hugetlb_lock);
d5e33bd8
SK
3124 if (folio_test_hugetlb(folio)) {
3125 h = folio_hstate(folio);
369fa227
OS
3126 } else {
3127 spin_unlock_irq(&hugetlb_lock);
3128 return 0;
3129 }
3130 spin_unlock_irq(&hugetlb_lock);
3131
3132 /*
3133 * Fence off gigantic pages as there is a cyclic dependency between
3134 * alloc_contig_range and them. Return -ENOMEM as this has the effect
3135 * of bailing out right away without further retrying.
3136 */
3137 if (hstate_is_gigantic(h))
3138 return -ENOMEM;
3139
9747b9e9 3140 if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
ae37c7ff 3141 ret = 0;
d5e33bd8 3142 else if (!folio_ref_count(folio))
19fc1a7e 3143 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
ae37c7ff
OS
3144
3145 return ret;
369fa227
OS
3146}
3147
d0ce0e47 3148struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
04f2cbe3 3149 unsigned long addr, int avoid_reserve)
1da177e4 3150{
90481622 3151 struct hugepage_subpool *spool = subpool_vma(vma);
a5516438 3152 struct hstate *h = hstate_vma(vma);
d4ab0316 3153 struct folio *folio;
8cba9576 3154 long map_chg, map_commit, nr_pages = pages_per_huge_page(h);
d85f69b0 3155 long gbl_chg;
8cba9576 3156 int memcg_charge_ret, ret, idx;
d0ce0e47 3157 struct hugetlb_cgroup *h_cg = NULL;
8cba9576 3158 struct mem_cgroup *memcg;
08cf9faf 3159 bool deferred_reserve;
8cba9576
NP
3160 gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
3161
3162 memcg = get_mem_cgroup_from_current();
3163 memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages);
3164 if (memcg_charge_ret == -ENOMEM) {
3165 mem_cgroup_put(memcg);
3166 return ERR_PTR(-ENOMEM);
3167 }
a1e78772 3168
6d76dcf4 3169 idx = hstate_index(h);
a1e78772 3170 /*
d85f69b0
MK
3171 * Examine the region/reserve map to determine if the process
3172 * has a reservation for the page to be allocated. A return
3173 * code of zero indicates a reservation exists (no change).
a1e78772 3174 */
d85f69b0 3175 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
8cba9576
NP
3176 if (map_chg < 0) {
3177 if (!memcg_charge_ret)
3178 mem_cgroup_cancel_charge(memcg, nr_pages);
3179 mem_cgroup_put(memcg);
76dcee75 3180 return ERR_PTR(-ENOMEM);
8cba9576 3181 }
d85f69b0
MK
3182
3183 /*
3184 * Processes that did not create the mapping will have no
3185 * reserves as indicated by the region/reserve map. Check
3186 * that the allocation will not exceed the subpool limit.
3187 * Allocations for MAP_NORESERVE mappings also need to be
3188 * checked against any subpool limit.
3189 */
3190 if (map_chg || avoid_reserve) {
3191 gbl_chg = hugepage_subpool_get_pages(spool, 1);
8cba9576
NP
3192 if (gbl_chg < 0)
3193 goto out_end_reservation;
1da177e4 3194
d85f69b0
MK
3195 /*
3196 * Even though there was no reservation in the region/reserve
3197 * map, there could be reservations associated with the
3198 * subpool that can be used. This would be indicated if the
3199 * return value of hugepage_subpool_get_pages() is zero.
3200 * However, if avoid_reserve is specified we still avoid even
3201 * the subpool reservations.
3202 */
3203 if (avoid_reserve)
3204 gbl_chg = 1;
3205 }
3206
08cf9faf
MA
3207 /* If this allocation is not consuming a reservation, charge it now.
3208 */
6501fe5f 3209 deferred_reserve = map_chg || avoid_reserve;
08cf9faf
MA
3210 if (deferred_reserve) {
3211 ret = hugetlb_cgroup_charge_cgroup_rsvd(
3212 idx, pages_per_huge_page(h), &h_cg);
3213 if (ret)
3214 goto out_subpool_put;
3215 }
3216
6d76dcf4 3217 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
8f34af6f 3218 if (ret)
08cf9faf 3219 goto out_uncharge_cgroup_reservation;
8f34af6f 3220
db71ef79 3221 spin_lock_irq(&hugetlb_lock);
d85f69b0
MK
3222 /*
3223 * glb_chg is passed to indicate whether or not a page must be taken
3224 * from the global free pool (global change). gbl_chg == 0 indicates
3225 * a reservation exists for the allocation.
3226 */
ff7d853b
SK
3227 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
3228 if (!folio) {
db71ef79 3229 spin_unlock_irq(&hugetlb_lock);
ff7d853b
SK
3230 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3231 if (!folio)
8f34af6f 3232 goto out_uncharge_cgroup;
12df140f 3233 spin_lock_irq(&hugetlb_lock);
a88c7695 3234 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
ff7d853b 3235 folio_set_hugetlb_restore_reserve(folio);
a88c7695
NH
3236 h->resv_huge_pages--;
3237 }
ff7d853b
SK
3238 list_add(&folio->lru, &h->hugepage_activelist);
3239 folio_ref_unfreeze(folio, 1);
81a6fcae 3240 /* Fall through */
68842c9b 3241 }
ff7d853b
SK
3242
3243 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
08cf9faf
MA
3244 /* If allocation is not consuming a reservation, also store the
3245 * hugetlb_cgroup pointer on the page.
3246 */
3247 if (deferred_reserve) {
3248 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
ff7d853b 3249 h_cg, folio);
08cf9faf
MA
3250 }
3251
db71ef79 3252 spin_unlock_irq(&hugetlb_lock);
348ea204 3253
ff7d853b 3254 hugetlb_set_folio_subpool(folio, spool);
90d8b7e6 3255
d85f69b0
MK
3256 map_commit = vma_commit_reservation(h, vma, addr);
3257 if (unlikely(map_chg > map_commit)) {
33039678
MK
3258 /*
3259 * The page was added to the reservation map between
3260 * vma_needs_reservation and vma_commit_reservation.
3261 * This indicates a race with hugetlb_reserve_pages.
3262 * Adjust for the subpool count incremented above AND
3263 * in hugetlb_reserve_pages for the same page. Also,
3264 * the reservation count added in hugetlb_reserve_pages
3265 * no longer applies.
3266 */
3267 long rsv_adjust;
3268
3269 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3270 hugetlb_acct_memory(h, -rsv_adjust);
79aa925b 3271 if (deferred_reserve)
d4ab0316
SK
3272 hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
3273 pages_per_huge_page(h), folio);
33039678 3274 }
8cba9576
NP
3275
3276 if (!memcg_charge_ret)
3277 mem_cgroup_commit_charge(folio, memcg);
3278 mem_cgroup_put(memcg);
3279
d0ce0e47 3280 return folio;
8f34af6f
JZ
3281
3282out_uncharge_cgroup:
3283 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
08cf9faf
MA
3284out_uncharge_cgroup_reservation:
3285 if (deferred_reserve)
3286 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3287 h_cg);
8f34af6f 3288out_subpool_put:
d85f69b0 3289 if (map_chg || avoid_reserve)
8f34af6f 3290 hugepage_subpool_put_pages(spool, 1);
8cba9576 3291out_end_reservation:
feba16e2 3292 vma_end_reservation(h, vma, addr);
8cba9576
NP
3293 if (!memcg_charge_ret)
3294 mem_cgroup_cancel_charge(memcg, nr_pages);
3295 mem_cgroup_put(memcg);
8f34af6f 3296 return ERR_PTR(-ENOSPC);
b45b5bd6
DG
3297}
3298
b5389086 3299int alloc_bootmem_huge_page(struct hstate *h, int nid)
e24a1307 3300 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
b5389086 3301int __alloc_bootmem_huge_page(struct hstate *h, int nid)
aa888a74 3302{
b5389086 3303 struct huge_bootmem_page *m = NULL; /* initialize for clang */
b78b27d0 3304 int nr_nodes, node = nid;
aa888a74 3305
b5389086
ZY
3306 /* do node specific alloc */
3307 if (nid != NUMA_NO_NODE) {
3308 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3309 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3310 if (!m)
3311 return 0;
3312 goto found;
3313 }
3314 /* allocate from next node when distributing huge pages */
2e73ff23 3315 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, &node_states[N_MEMORY]) {
b5389086 3316 m = memblock_alloc_try_nid_raw(
8b89a116 3317 huge_page_size(h), huge_page_size(h),
97ad1087 3318 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
b5389086
ZY
3319 /*
3320 * Use the beginning of the huge page to store the
3321 * huge_bootmem_page struct (until gather_bootmem
3322 * puts them into the mem_map).
3323 */
3324 if (!m)
3325 return 0;
3326 goto found;
aa888a74 3327 }
aa888a74
AK
3328
3329found:
fde1c4ec
UA
3330
3331 /*
3332 * Only initialize the head struct page in memmap_init_reserved_pages,
3333 * rest of the struct pages will be initialized by the HugeTLB
3334 * subsystem itself.
3335 * The head struct page is used to get folio information by the HugeTLB
3336 * subsystem like zone id and node id.
3337 */
3338 memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
3339 huge_page_size(h) - PAGE_SIZE);
aa888a74 3340 /* Put them into a private list first because mem_map is not up yet */
330d6e48 3341 INIT_LIST_HEAD(&m->list);
b78b27d0 3342 list_add(&m->list, &huge_boot_pages[node]);
aa888a74
AK
3343 m->hstate = h;
3344 return 1;
3345}
3346
fde1c4ec
UA
3347/* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
3348static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3349 unsigned long start_page_number,
3350 unsigned long end_page_number)
3351{
3352 enum zone_type zone = zone_idx(folio_zone(folio));
3353 int nid = folio_nid(folio);
3354 unsigned long head_pfn = folio_pfn(folio);
3355 unsigned long pfn, end_pfn = head_pfn + end_page_number;
3356 int ret;
3357
3358 for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) {
3359 struct page *page = pfn_to_page(pfn);
3360
3361 __init_single_page(page, pfn, zone, nid);
3362 prep_compound_tail((struct page *)folio, pfn - head_pfn);
3363 ret = page_ref_freeze(page, 1);
3364 VM_BUG_ON(!ret);
3365 }
3366}
3367
3368static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3369 struct hstate *h,
3370 unsigned long nr_pages)
3371{
3372 int ret;
3373
3374 /* Prepare folio head */
3375 __folio_clear_reserved(folio);
3376 __folio_set_head(folio);
a48bf7b4 3377 ret = folio_ref_freeze(folio, 1);
fde1c4ec
UA
3378 VM_BUG_ON(!ret);
3379 /* Initialize the necessary tail struct pages */
3380 hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
3381 prep_compound_head((struct page *)folio, huge_page_order(h));
3382}
3383
79359d6d
MK
3384static void __init prep_and_add_bootmem_folios(struct hstate *h,
3385 struct list_head *folio_list)
3386{
3387 unsigned long flags;
3388 struct folio *folio, *tmp_f;
3389
3390 /* Send list for bulk vmemmap optimization processing */
3391 hugetlb_vmemmap_optimize_folios(h, folio_list);
3392
79359d6d
MK
3393 list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3394 if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3395 /*
3396 * If HVO fails, initialize all tail struct pages
3397 * We do not worry about potential long lock hold
3398 * time as this is early in boot and there should
3399 * be no contention.
3400 */
3401 hugetlb_folio_init_tail_vmemmap(folio,
3402 HUGETLB_VMEMMAP_RESERVE_PAGES,
3403 pages_per_huge_page(h));
3404 }
b78b27d0
GL
3405 /* Subdivide locks to achieve better parallel performance */
3406 spin_lock_irqsave(&hugetlb_lock, flags);
79359d6d
MK
3407 __prep_account_new_huge_page(h, folio_nid(folio));
3408 enqueue_hugetlb_folio(h, folio);
b78b27d0 3409 spin_unlock_irqrestore(&hugetlb_lock, flags);
79359d6d 3410 }
79359d6d
MK
3411}
3412
48b8d744
MK
3413/*
3414 * Put bootmem huge pages into the standard lists after mem_map is up.
5e0a760b 3415 * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
48b8d744 3416 */
b78b27d0 3417static void __init gather_bootmem_prealloc_node(unsigned long nid)
aa888a74 3418{
d67e32f2 3419 LIST_HEAD(folio_list);
aa888a74 3420 struct huge_bootmem_page *m;
d67e32f2 3421 struct hstate *h = NULL, *prev_h = NULL;
aa888a74 3422
b78b27d0 3423 list_for_each_entry(m, &huge_boot_pages[nid], list) {
40d18ebf 3424 struct page *page = virt_to_page(m);
fde1c4ec 3425 struct folio *folio = (void *)page;
d67e32f2
MK
3426
3427 h = m->hstate;
3428 /*
3429 * It is possible to have multiple huge page sizes (hstates)
3430 * in this list. If so, process each size separately.
3431 */
3432 if (h != prev_h && prev_h != NULL)
79359d6d 3433 prep_and_add_bootmem_folios(prev_h, &folio_list);
d67e32f2 3434 prev_h = h;
ee8f248d 3435
48b8d744 3436 VM_BUG_ON(!hstate_is_gigantic(h));
d1c60955 3437 WARN_ON(folio_ref_count(folio) != 1);
fde1c4ec
UA
3438
3439 hugetlb_folio_init_vmemmap(folio, h,
3440 HUGETLB_VMEMMAP_RESERVE_PAGES);
79359d6d 3441 init_new_hugetlb_folio(h, folio);
d67e32f2 3442 list_add(&folio->lru, &folio_list);
af0fb9df 3443
b0320c7b 3444 /*
48b8d744
MK
3445 * We need to restore the 'stolen' pages to totalram_pages
3446 * in order to fix confusing memory reports from free(1) and
3447 * other side-effects, like CommitLimit going negative.
b0320c7b 3448 */
48b8d744 3449 adjust_managed_page_count(page, pages_per_huge_page(h));
520495fe 3450 cond_resched();
aa888a74 3451 }
d67e32f2 3452
79359d6d 3453 prep_and_add_bootmem_folios(h, &folio_list);
aa888a74 3454}
fde1c4ec 3455
b78b27d0
GL
3456static void __init gather_bootmem_prealloc_parallel(unsigned long start,
3457 unsigned long end, void *arg)
3458{
3459 int nid;
3460
3461 for (nid = start; nid < end; nid++)
3462 gather_bootmem_prealloc_node(nid);
3463}
3464
3465static void __init gather_bootmem_prealloc(void)
3466{
3467 struct padata_mt_job job = {
3468 .thread_fn = gather_bootmem_prealloc_parallel,
3469 .fn_arg = NULL,
3470 .start = 0,
3471 .size = num_node_state(N_MEMORY),
3472 .align = 1,
3473 .min_chunk = 1,
3474 .max_threads = num_node_state(N_MEMORY),
3475 .numa_aware = true,
3476 };
3477
3478 padata_do_multithreaded(&job);
3479}
3480
b5389086
ZY
3481static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3482{
3483 unsigned long i;
3484 char buf[32];
3485
3486 for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3487 if (hstate_is_gigantic(h)) {
3488 if (!alloc_bootmem_huge_page(h, nid))
3489 break;
3490 } else {
19fc1a7e 3491 struct folio *folio;
b5389086
ZY
3492 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3493
19fc1a7e 3494 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
b5389086 3495 &node_states[N_MEMORY], NULL);
19fc1a7e 3496 if (!folio)
b5389086 3497 break;
454a00c4 3498 free_huge_folio(folio); /* free it into the hugepage allocator */
b5389086
ZY
3499 }
3500 cond_resched();
3501 }
3502 if (i == h->max_huge_pages_node[nid])
3503 return;
3504
3505 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3506 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3507 h->max_huge_pages_node[nid], buf, nid, i);
3508 h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3509 h->max_huge_pages_node[nid] = i;
3510}
aa888a74 3511
fc37bbb3
GL
3512static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
3513{
3514 int i;
3515 bool node_specific_alloc = false;
3516
3517 for_each_online_node(i) {
3518 if (h->max_huge_pages_node[i] > 0) {
3519 hugetlb_hstate_alloc_pages_onenode(h, i);
3520 node_specific_alloc = true;
3521 }
3522 }
3523
3524 return node_specific_alloc;
3525}
3526
3527static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
3528{
3529 if (allocated < h->max_huge_pages) {
3530 char buf[32];
3531
3532 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3533 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
3534 h->max_huge_pages, buf, allocated);
3535 h->max_huge_pages = allocated;
3536 }
3537}
3538
c6c21c31
GL
3539static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
3540{
3541 struct hstate *h = (struct hstate *)arg;
3542 int i, num = end - start;
3543 nodemask_t node_alloc_noretry;
3544 LIST_HEAD(folio_list);
3545 int next_node = first_online_node;
3546
3547 /* Bit mask controlling how hard we retry per-node allocations.*/
3548 nodes_clear(node_alloc_noretry);
3549
3550 for (i = 0; i < num; ++i) {
3551 struct folio *folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3552 &node_alloc_noretry, &next_node);
3553 if (!folio)
3554 break;
3555
3556 list_move(&folio->lru, &folio_list);
3557 cond_resched();
3558 }
3559
3560 prep_and_add_allocated_folios(h, &folio_list);
3561}
3562
d5c3eb3f
GL
3563static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
3564{
3565 unsigned long i;
3566
3567 for (i = 0; i < h->max_huge_pages; ++i) {
3568 if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3569 break;
3570 cond_resched();
3571 }
3572
3573 return i;
3574}
3575
3576static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
3577{
c6c21c31
GL
3578 struct padata_mt_job job = {
3579 .fn_arg = h,
3580 .align = 1,
3581 .numa_aware = true
3582 };
d5c3eb3f 3583
c6c21c31
GL
3584 job.thread_fn = hugetlb_pages_alloc_boot_node;
3585 job.start = 0;
3586 job.size = h->max_huge_pages;
d5c3eb3f 3587
c6c21c31
GL
3588 /*
3589 * job.max_threads is twice the num_node_state(N_MEMORY),
3590 *
3591 * Tests below indicate that a multiplier of 2 significantly improves
3592 * performance, and although larger values also provide improvements,
3593 * the gains are marginal.
3594 *
3595 * Therefore, choosing 2 as the multiplier strikes a good balance between
3596 * enhancing parallel processing capabilities and maintaining efficient
3597 * resource management.
3598 *
3599 * +------------+-------+-------+-------+-------+-------+
3600 * | multiplier | 1 | 2 | 3 | 4 | 5 |
3601 * +------------+-------+-------+-------+-------+-------+
3602 * | 256G 2node | 358ms | 215ms | 157ms | 134ms | 126ms |
3603 * | 2T 4node | 979ms | 679ms | 543ms | 489ms | 481ms |
3604 * | 50G 2node | 71ms | 44ms | 37ms | 30ms | 31ms |
3605 * +------------+-------+-------+-------+-------+-------+
3606 */
3607 job.max_threads = num_node_state(N_MEMORY) * 2;
3608 job.min_chunk = h->max_huge_pages / num_node_state(N_MEMORY) / 2;
3609 padata_do_multithreaded(&job);
d5c3eb3f 3610
c6c21c31 3611 return h->nr_huge_pages;
d5c3eb3f
GL
3612}
3613
d67e32f2
MK
3614/*
3615 * NOTE: this routine is called in different contexts for gigantic and
3616 * non-gigantic pages.
3617 * - For gigantic pages, this is called early in the boot process and
3618 * pages are allocated from memblock allocated or something similar.
3619 * Gigantic pages are actually added to pools later with the routine
3620 * gather_bootmem_prealloc.
3621 * - For non-gigantic pages, this is called later in the boot process after
3622 * all of mm is up and functional. Pages are allocated from buddy and
3623 * then added to hugetlb pools.
3624 */
8faa8b07 3625static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1da177e4 3626{
d5c3eb3f 3627 unsigned long allocated;
b78b27d0 3628 static bool initialized __initdata;
b5389086
ZY
3629
3630 /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3631 if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3632 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3633 return;
3634 }
3635
b78b27d0
GL
3636 /* hugetlb_hstate_alloc_pages will be called many times, initialize huge_boot_pages once */
3637 if (!initialized) {
3638 int i = 0;
3639
3640 for (i = 0; i < MAX_NUMNODES; i++)
3641 INIT_LIST_HEAD(&huge_boot_pages[i]);
3642 initialized = true;
3643 }
3644
b5389086 3645 /* do node specific alloc */
fc37bbb3 3646 if (hugetlb_hstate_alloc_pages_specific_nodes(h))
b5389086
ZY
3647 return;
3648
3649 /* below will do all node balanced alloc */
d5c3eb3f
GL
3650 if (hstate_is_gigantic(h))
3651 allocated = hugetlb_gigantic_pages_alloc_boot(h);
3652 else
3653 allocated = hugetlb_pages_alloc_boot(h);
d67e32f2 3654
d5c3eb3f 3655 hugetlb_hstate_alloc_pages_errcheck(allocated, h);
e5ff2159
AK
3656}
3657
3658static void __init hugetlb_init_hstates(void)
3659{
79dfc695 3660 struct hstate *h, *h2;
e5ff2159
AK
3661
3662 for_each_hstate(h) {
8faa8b07 3663 /* oversize hugepages were init'ed in early boot */
bae7f4ae 3664 if (!hstate_is_gigantic(h))
8faa8b07 3665 hugetlb_hstate_alloc_pages(h);
79dfc695
MK
3666
3667 /*
3668 * Set demote order for each hstate. Note that
3669 * h->demote_order is initially 0.
3670 * - We can not demote gigantic pages if runtime freeing
3671 * is not supported, so skip this.
a01f4390
MK
3672 * - If CMA allocation is possible, we can not demote
3673 * HUGETLB_PAGE_ORDER or smaller size pages.
79dfc695
MK
3674 */
3675 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3676 continue;
a01f4390
MK
3677 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3678 continue;
79dfc695
MK
3679 for_each_hstate(h2) {
3680 if (h2 == h)
3681 continue;
3682 if (h2->order < h->order &&
3683 h2->order > h->demote_order)
3684 h->demote_order = h2->order;
3685 }
e5ff2159
AK
3686 }
3687}
3688
3689static void __init report_hugepages(void)
3690{
3691 struct hstate *h;
3692
3693 for_each_hstate(h) {
4abd32db 3694 char buf[32];
c6247f72
MW
3695
3696 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
6213834c 3697 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
c6247f72 3698 buf, h->free_huge_pages);
6213834c
MS
3699 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3700 hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
e5ff2159
AK
3701 }
3702}
3703
1da177e4 3704#ifdef CONFIG_HIGHMEM
6ae11b27
LS
3705static void try_to_free_low(struct hstate *h, unsigned long count,
3706 nodemask_t *nodes_allowed)
1da177e4 3707{
4415cc8d 3708 int i;
1121828a 3709 LIST_HEAD(page_list);
4415cc8d 3710
9487ca60 3711 lockdep_assert_held(&hugetlb_lock);
bae7f4ae 3712 if (hstate_is_gigantic(h))
aa888a74
AK
3713 return;
3714
1121828a
MK
3715 /*
3716 * Collect pages to be freed on a list, and free after dropping lock
3717 */
6ae11b27 3718 for_each_node_mask(i, *nodes_allowed) {
04bbfd84 3719 struct folio *folio, *next;
a5516438 3720 struct list_head *freel = &h->hugepage_freelists[i];
04bbfd84 3721 list_for_each_entry_safe(folio, next, freel, lru) {
a5516438 3722 if (count >= h->nr_huge_pages)
1121828a 3723 goto out;
04bbfd84 3724 if (folio_test_highmem(folio))
1da177e4 3725 continue;
04bbfd84
MWO
3726 remove_hugetlb_folio(h, folio, false);
3727 list_add(&folio->lru, &page_list);
1da177e4
LT
3728 }
3729 }
1121828a
MK
3730
3731out:
db71ef79 3732 spin_unlock_irq(&hugetlb_lock);
10c6ec49 3733 update_and_free_pages_bulk(h, &page_list);
db71ef79 3734 spin_lock_irq(&hugetlb_lock);
1da177e4
LT
3735}
3736#else
6ae11b27
LS
3737static inline void try_to_free_low(struct hstate *h, unsigned long count,
3738 nodemask_t *nodes_allowed)
1da177e4
LT
3739{
3740}
3741#endif
3742
20a0307c
WF
3743/*
3744 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3745 * balanced by operating on them in a round-robin fashion.
3746 * Returns 1 if an adjustment was made.
3747 */
6ae11b27
LS
3748static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3749 int delta)
20a0307c 3750{
b2261026 3751 int nr_nodes, node;
20a0307c 3752
9487ca60 3753 lockdep_assert_held(&hugetlb_lock);
20a0307c 3754 VM_BUG_ON(delta != -1 && delta != 1);
20a0307c 3755
b2261026 3756 if (delta < 0) {
2e73ff23 3757 for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
b2261026
JK
3758 if (h->surplus_huge_pages_node[node])
3759 goto found;
e8c5c824 3760 }
b2261026
JK
3761 } else {
3762 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3763 if (h->surplus_huge_pages_node[node] <
3764 h->nr_huge_pages_node[node])
3765 goto found;
e8c5c824 3766 }
b2261026
JK
3767 }
3768 return 0;
20a0307c 3769
b2261026
JK
3770found:
3771 h->surplus_huge_pages += delta;
3772 h->surplus_huge_pages_node[node] += delta;
3773 return 1;
20a0307c
WF
3774}
3775
a5516438 3776#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
fd875dca 3777static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
4eb0716e 3778 nodemask_t *nodes_allowed)
1da177e4 3779{
d67e32f2
MK
3780 unsigned long min_count;
3781 unsigned long allocated;
3782 struct folio *folio;
10c6ec49 3783 LIST_HEAD(page_list);
f60858f9
MK
3784 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3785
3786 /*
3787 * Bit mask controlling how hard we retry per-node allocations.
3788 * If we can not allocate the bit mask, do not attempt to allocate
3789 * the requested huge pages.
3790 */
3791 if (node_alloc_noretry)
3792 nodes_clear(*node_alloc_noretry);
3793 else
3794 return -ENOMEM;
1da177e4 3795
29383967
MK
3796 /*
3797 * resize_lock mutex prevents concurrent adjustments to number of
3798 * pages in hstate via the proc/sysfs interfaces.
3799 */
3800 mutex_lock(&h->resize_lock);
b65d4adb 3801 flush_free_hpage_work(h);
db71ef79 3802 spin_lock_irq(&hugetlb_lock);
4eb0716e 3803
fd875dca
MK
3804 /*
3805 * Check for a node specific request.
3806 * Changing node specific huge page count may require a corresponding
3807 * change to the global count. In any case, the passed node mask
3808 * (nodes_allowed) will restrict alloc/free to the specified node.
3809 */
3810 if (nid != NUMA_NO_NODE) {
3811 unsigned long old_count = count;
3812
b72b3c9c
XH
3813 count += persistent_huge_pages(h) -
3814 (h->nr_huge_pages_node[nid] -
3815 h->surplus_huge_pages_node[nid]);
fd875dca
MK
3816 /*
3817 * User may have specified a large count value which caused the
3818 * above calculation to overflow. In this case, they wanted
3819 * to allocate as many huge pages as possible. Set count to
3820 * largest possible value to align with their intention.
3821 */
3822 if (count < old_count)
3823 count = ULONG_MAX;
3824 }
3825
4eb0716e
AG
3826 /*
3827 * Gigantic pages runtime allocation depend on the capability for large
3828 * page range allocation.
3829 * If the system does not provide this feature, return an error when
3830 * the user tries to allocate gigantic pages but let the user free the
3831 * boottime allocated gigantic pages.
3832 */
3833 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3834 if (count > persistent_huge_pages(h)) {
db71ef79 3835 spin_unlock_irq(&hugetlb_lock);
29383967 3836 mutex_unlock(&h->resize_lock);
f60858f9 3837 NODEMASK_FREE(node_alloc_noretry);
4eb0716e
AG
3838 return -EINVAL;
3839 }
3840 /* Fall through to decrease pool */
3841 }
aa888a74 3842
7893d1d5
AL
3843 /*
3844 * Increase the pool size
3845 * First take pages out of surplus state. Then make up the
3846 * remaining difference by allocating fresh huge pages.
d1c3fb1f 3847 *
3a740e8b 3848 * We might race with alloc_surplus_hugetlb_folio() here and be unable
d1c3fb1f
NA
3849 * to convert a surplus huge page to a normal huge page. That is
3850 * not critical, though, it just means the overall size of the
3851 * pool might be one hugepage larger than it needs to be, but
3852 * within all the constraints specified by the sysctls.
7893d1d5 3853 */
a5516438 3854 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
6ae11b27 3855 if (!adjust_pool_surplus(h, nodes_allowed, -1))
7893d1d5
AL
3856 break;
3857 }
3858
d67e32f2
MK
3859 allocated = 0;
3860 while (count > (persistent_huge_pages(h) + allocated)) {
7893d1d5
AL
3861 /*
3862 * If this allocation races such that we no longer need the
454a00c4 3863 * page, free_huge_folio will handle it by freeing the page
7893d1d5
AL
3864 * and reducing the surplus.
3865 */
db71ef79 3866 spin_unlock_irq(&hugetlb_lock);
649920c6
JH
3867
3868 /* yield cpu to avoid soft lockup */
3869 cond_resched();
3870
d67e32f2 3871 folio = alloc_pool_huge_folio(h, nodes_allowed,
2e73ff23
GL
3872 node_alloc_noretry,
3873 &h->next_nid_to_alloc);
d67e32f2
MK
3874 if (!folio) {
3875 prep_and_add_allocated_folios(h, &page_list);
3876 spin_lock_irq(&hugetlb_lock);
7893d1d5 3877 goto out;
d67e32f2
MK
3878 }
3879
3880 list_add(&folio->lru, &page_list);
3881 allocated++;
7893d1d5 3882
536240f2 3883 /* Bail for signals. Probably ctrl-c from user */
d67e32f2
MK
3884 if (signal_pending(current)) {
3885 prep_and_add_allocated_folios(h, &page_list);
3886 spin_lock_irq(&hugetlb_lock);
536240f2 3887 goto out;
d67e32f2
MK
3888 }
3889
3890 spin_lock_irq(&hugetlb_lock);
3891 }
3892
3893 /* Add allocated pages to the pool */
3894 if (!list_empty(&page_list)) {
3895 spin_unlock_irq(&hugetlb_lock);
3896 prep_and_add_allocated_folios(h, &page_list);
3897 spin_lock_irq(&hugetlb_lock);
7893d1d5 3898 }
7893d1d5
AL
3899
3900 /*
3901 * Decrease the pool size
3902 * First return free pages to the buddy allocator (being careful
3903 * to keep enough around to satisfy reservations). Then place
3904 * pages into surplus state as needed so the pool will shrink
3905 * to the desired size as pages become free.
d1c3fb1f
NA
3906 *
3907 * By placing pages into the surplus state independent of the
3908 * overcommit value, we are allowing the surplus pool size to
3909 * exceed overcommit. There are few sane options here. Since
3a740e8b 3910 * alloc_surplus_hugetlb_folio() is checking the global counter,
d1c3fb1f
NA
3911 * though, we'll note that we're not allowed to exceed surplus
3912 * and won't grow the pool anywhere else. Not until one of the
3913 * sysctls are changed, or the surplus pages go out of use.
7893d1d5 3914 */
a5516438 3915 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
6b0c880d 3916 min_count = max(count, min_count);
6ae11b27 3917 try_to_free_low(h, min_count, nodes_allowed);
10c6ec49
MK
3918
3919 /*
3920 * Collect pages to be removed on list without dropping lock
3921 */
a5516438 3922 while (min_count < persistent_huge_pages(h)) {
d5b43e96
MWO
3923 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3924 if (!folio)
1da177e4 3925 break;
10c6ec49 3926
d5b43e96 3927 list_add(&folio->lru, &page_list);
1da177e4 3928 }
10c6ec49 3929 /* free the pages after dropping lock */
db71ef79 3930 spin_unlock_irq(&hugetlb_lock);
10c6ec49 3931 update_and_free_pages_bulk(h, &page_list);
b65d4adb 3932 flush_free_hpage_work(h);
db71ef79 3933 spin_lock_irq(&hugetlb_lock);
10c6ec49 3934
a5516438 3935 while (count < persistent_huge_pages(h)) {
6ae11b27 3936 if (!adjust_pool_surplus(h, nodes_allowed, 1))
7893d1d5
AL
3937 break;
3938 }
3939out:
4eb0716e 3940 h->max_huge_pages = persistent_huge_pages(h);
db71ef79 3941 spin_unlock_irq(&hugetlb_lock);
29383967 3942 mutex_unlock(&h->resize_lock);
4eb0716e 3943
f60858f9
MK
3944 NODEMASK_FREE(node_alloc_noretry);
3945
4eb0716e 3946 return 0;
1da177e4
LT
3947}
3948
bdd7be07 3949static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
8531fc6f 3950{
bdd7be07 3951 int i, nid = folio_nid(folio);
8531fc6f 3952 struct hstate *target_hstate;
31731452 3953 struct page *subpage;
bdd7be07 3954 struct folio *inner_folio;
8531fc6f
MK
3955 int rc = 0;
3956
3957 target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3958
cfd5082b 3959 remove_hugetlb_folio_for_demote(h, folio, false);
8531fc6f
MK
3960 spin_unlock_irq(&hugetlb_lock);
3961
d8f5f7e4
MK
3962 /*
3963 * If vmemmap already existed for folio, the remove routine above would
3964 * have cleared the hugetlb folio flag. Hence the folio is technically
c5ad3233 3965 * no longer a hugetlb folio. hugetlb_vmemmap_restore_folio can only be
d8f5f7e4
MK
3966 * passed hugetlb folios and will BUG otherwise.
3967 */
3968 if (folio_test_hugetlb(folio)) {
c5ad3233 3969 rc = hugetlb_vmemmap_restore_folio(h, folio);
d8f5f7e4
MK
3970 if (rc) {
3971 /* Allocation of vmemmmap failed, we can not demote folio */
3972 spin_lock_irq(&hugetlb_lock);
3973 folio_ref_unfreeze(folio, 1);
3974 add_hugetlb_folio(h, folio, false);
3975 return rc;
3976 }
8531fc6f
MK
3977 }
3978
3979 /*
911565b8 3980 * Use destroy_compound_hugetlb_folio_for_demote for all huge page
bdd7be07 3981 * sizes as it will not ref count folios.
8531fc6f 3982 */
911565b8 3983 destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
8531fc6f
MK
3984
3985 /*
3986 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3987 * Without the mutex, pages added to target hstate could be marked
3988 * as surplus.
3989 *
3990 * Note that we already hold h->resize_lock. To prevent deadlock,
3991 * use the convention of always taking larger size hstate mutex first.
3992 */
3993 mutex_lock(&target_hstate->resize_lock);
3994 for (i = 0; i < pages_per_huge_page(h);
3995 i += pages_per_huge_page(target_hstate)) {
bdd7be07
SK
3996 subpage = folio_page(folio, i);
3997 inner_folio = page_folio(subpage);
8531fc6f 3998 if (hstate_is_gigantic(target_hstate))
bdd7be07 3999 prep_compound_gigantic_folio_for_demote(inner_folio,
8531fc6f
MK
4000 target_hstate->order);
4001 else
31731452 4002 prep_compound_page(subpage, target_hstate->order);
bdd7be07
SK
4003 folio_change_private(inner_folio, NULL);
4004 prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
454a00c4 4005 free_huge_folio(inner_folio);
8531fc6f
MK
4006 }
4007 mutex_unlock(&target_hstate->resize_lock);
4008
4009 spin_lock_irq(&hugetlb_lock);
4010
4011 /*
4012 * Not absolutely necessary, but for consistency update max_huge_pages
4013 * based on pool changes for the demoted page.
4014 */
4015 h->max_huge_pages--;
a43a83c7
ML
4016 target_hstate->max_huge_pages +=
4017 pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
8531fc6f
MK
4018
4019 return rc;
4020}
4021
79dfc695
MK
4022static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
4023 __must_hold(&hugetlb_lock)
4024{
8531fc6f 4025 int nr_nodes, node;
bdd7be07 4026 struct folio *folio;
79dfc695
MK
4027
4028 lockdep_assert_held(&hugetlb_lock);
4029
4030 /* We should never get here if no demote order */
4031 if (!h->demote_order) {
4032 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
4033 return -EINVAL; /* internal error */
4034 }
4035
8531fc6f 4036 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
bdd7be07
SK
4037 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
4038 if (folio_test_hwpoison(folio))
5a317412 4039 continue;
bdd7be07 4040 return demote_free_hugetlb_folio(h, folio);
8531fc6f
MK
4041 }
4042 }
4043
5a317412
MK
4044 /*
4045 * Only way to get here is if all pages on free lists are poisoned.
4046 * Return -EBUSY so that caller will not retry.
4047 */
4048 return -EBUSY;
79dfc695
MK
4049}
4050
a3437870
NA
4051#define HSTATE_ATTR_RO(_name) \
4052 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
4053
79dfc695
MK
4054#define HSTATE_ATTR_WO(_name) \
4055 static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
4056
a3437870 4057#define HSTATE_ATTR(_name) \
98bc26ac 4058 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
a3437870
NA
4059
4060static struct kobject *hugepages_kobj;
4061static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
4062
9a305230
LS
4063static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
4064
4065static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
a3437870
NA
4066{
4067 int i;
9a305230 4068
a3437870 4069 for (i = 0; i < HUGE_MAX_HSTATE; i++)
9a305230
LS
4070 if (hstate_kobjs[i] == kobj) {
4071 if (nidp)
4072 *nidp = NUMA_NO_NODE;
a3437870 4073 return &hstates[i];
9a305230
LS
4074 }
4075
4076 return kobj_to_node_hstate(kobj, nidp);
a3437870
NA
4077}
4078
06808b08 4079static ssize_t nr_hugepages_show_common(struct kobject *kobj,
a3437870
NA
4080 struct kobj_attribute *attr, char *buf)
4081{
9a305230
LS
4082 struct hstate *h;
4083 unsigned long nr_huge_pages;
4084 int nid;
4085
4086 h = kobj_to_hstate(kobj, &nid);
4087 if (nid == NUMA_NO_NODE)
4088 nr_huge_pages = h->nr_huge_pages;
4089 else
4090 nr_huge_pages = h->nr_huge_pages_node[nid];
4091
ae7a927d 4092 return sysfs_emit(buf, "%lu\n", nr_huge_pages);
a3437870 4093}
adbe8726 4094
238d3c13
DR
4095static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4096 struct hstate *h, int nid,
4097 unsigned long count, size_t len)
a3437870
NA
4098{
4099 int err;
2d0adf7e 4100 nodemask_t nodes_allowed, *n_mask;
a3437870 4101
2d0adf7e
OS
4102 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
4103 return -EINVAL;
adbe8726 4104
9a305230
LS
4105 if (nid == NUMA_NO_NODE) {
4106 /*
4107 * global hstate attribute
4108 */
4109 if (!(obey_mempolicy &&
2d0adf7e
OS
4110 init_nodemask_of_mempolicy(&nodes_allowed)))
4111 n_mask = &node_states[N_MEMORY];
4112 else
4113 n_mask = &nodes_allowed;
4114 } else {
9a305230 4115 /*
fd875dca
MK
4116 * Node specific request. count adjustment happens in
4117 * set_max_huge_pages() after acquiring hugetlb_lock.
9a305230 4118 */
2d0adf7e
OS
4119 init_nodemask_of_node(&nodes_allowed, nid);
4120 n_mask = &nodes_allowed;
fd875dca 4121 }
9a305230 4122
2d0adf7e 4123 err = set_max_huge_pages(h, count, nid, n_mask);
06808b08 4124
4eb0716e 4125 return err ? err : len;
06808b08
LS
4126}
4127
238d3c13
DR
4128static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
4129 struct kobject *kobj, const char *buf,
4130 size_t len)
4131{
4132 struct hstate *h;
4133 unsigned long count;
4134 int nid;
4135 int err;
4136
4137 err = kstrtoul(buf, 10, &count);
4138 if (err)
4139 return err;
4140
4141 h = kobj_to_hstate(kobj, &nid);
4142 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
4143}
4144
06808b08
LS
4145static ssize_t nr_hugepages_show(struct kobject *kobj,
4146 struct kobj_attribute *attr, char *buf)
4147{
4148 return nr_hugepages_show_common(kobj, attr, buf);
4149}
4150
4151static ssize_t nr_hugepages_store(struct kobject *kobj,
4152 struct kobj_attribute *attr, const char *buf, size_t len)
4153{
238d3c13 4154 return nr_hugepages_store_common(false, kobj, buf, len);
a3437870
NA
4155}
4156HSTATE_ATTR(nr_hugepages);
4157
06808b08
LS
4158#ifdef CONFIG_NUMA
4159
4160/*
4161 * hstate attribute for optionally mempolicy-based constraint on persistent
4162 * huge page alloc/free.
4163 */
4164static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
ae7a927d
JP
4165 struct kobj_attribute *attr,
4166 char *buf)
06808b08
LS
4167{
4168 return nr_hugepages_show_common(kobj, attr, buf);
4169}
4170
4171static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
4172 struct kobj_attribute *attr, const char *buf, size_t len)
4173{
238d3c13 4174 return nr_hugepages_store_common(true, kobj, buf, len);
06808b08
LS
4175}
4176HSTATE_ATTR(nr_hugepages_mempolicy);
4177#endif
4178
4179
a3437870
NA
4180static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
4181 struct kobj_attribute *attr, char *buf)
4182{
9a305230 4183 struct hstate *h = kobj_to_hstate(kobj, NULL);
ae7a927d 4184 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
a3437870 4185}
adbe8726 4186
a3437870
NA
4187static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
4188 struct kobj_attribute *attr, const char *buf, size_t count)
4189{
4190 int err;
4191 unsigned long input;
9a305230 4192 struct hstate *h = kobj_to_hstate(kobj, NULL);
a3437870 4193
bae7f4ae 4194 if (hstate_is_gigantic(h))
adbe8726
EM
4195 return -EINVAL;
4196
3dbb95f7 4197 err = kstrtoul(buf, 10, &input);
a3437870 4198 if (err)
73ae31e5 4199 return err;
a3437870 4200
db71ef79 4201 spin_lock_irq(&hugetlb_lock);
a3437870 4202 h->nr_overcommit_huge_pages = input;
db71ef79 4203 spin_unlock_irq(&hugetlb_lock);
a3437870
NA
4204
4205 return count;
4206}
4207HSTATE_ATTR(nr_overcommit_hugepages);
4208
4209static ssize_t free_hugepages_show(struct kobject *kobj,
4210 struct kobj_attribute *attr, char *buf)
4211{
9a305230
LS
4212 struct hstate *h;
4213 unsigned long free_huge_pages;
4214 int nid;
4215
4216 h = kobj_to_hstate(kobj, &nid);
4217 if (nid == NUMA_NO_NODE)
4218 free_huge_pages = h->free_huge_pages;
4219 else
4220 free_huge_pages = h->free_huge_pages_node[nid];
4221
ae7a927d 4222 return sysfs_emit(buf, "%lu\n", free_huge_pages);
a3437870
NA
4223}
4224HSTATE_ATTR_RO(free_hugepages);
4225
4226static ssize_t resv_hugepages_show(struct kobject *kobj,
4227 struct kobj_attribute *attr, char *buf)
4228{
9a305230 4229 struct hstate *h = kobj_to_hstate(kobj, NULL);
ae7a927d 4230 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
a3437870
NA
4231}
4232HSTATE_ATTR_RO(resv_hugepages);
4233
4234static ssize_t surplus_hugepages_show(struct kobject *kobj,
4235 struct kobj_attribute *attr, char *buf)
4236{
9a305230
LS
4237 struct hstate *h;
4238 unsigned long surplus_huge_pages;
4239 int nid;
4240
4241 h = kobj_to_hstate(kobj, &nid);
4242 if (nid == NUMA_NO_NODE)
4243 surplus_huge_pages = h->surplus_huge_pages;
4244 else
4245 surplus_huge_pages = h->surplus_huge_pages_node[nid];
4246
ae7a927d 4247 return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
a3437870
NA
4248}
4249HSTATE_ATTR_RO(surplus_hugepages);
4250
79dfc695
MK
4251static ssize_t demote_store(struct kobject *kobj,
4252 struct kobj_attribute *attr, const char *buf, size_t len)
4253{
4254 unsigned long nr_demote;
4255 unsigned long nr_available;
4256 nodemask_t nodes_allowed, *n_mask;
4257 struct hstate *h;
8eeda55f 4258 int err;
79dfc695
MK
4259 int nid;
4260
4261 err = kstrtoul(buf, 10, &nr_demote);
4262 if (err)
4263 return err;
4264 h = kobj_to_hstate(kobj, &nid);
4265
4266 if (nid != NUMA_NO_NODE) {
4267 init_nodemask_of_node(&nodes_allowed, nid);
4268 n_mask = &nodes_allowed;
4269 } else {
4270 n_mask = &node_states[N_MEMORY];
4271 }
4272
4273 /* Synchronize with other sysfs operations modifying huge pages */
4274 mutex_lock(&h->resize_lock);
4275 spin_lock_irq(&hugetlb_lock);
4276
4277 while (nr_demote) {
4278 /*
4279 * Check for available pages to demote each time thorough the
4280 * loop as demote_pool_huge_page will drop hugetlb_lock.
79dfc695
MK
4281 */
4282 if (nid != NUMA_NO_NODE)
4283 nr_available = h->free_huge_pages_node[nid];
4284 else
4285 nr_available = h->free_huge_pages;
4286 nr_available -= h->resv_huge_pages;
4287 if (!nr_available)
4288 break;
4289
4290 err = demote_pool_huge_page(h, n_mask);
4291 if (err)
4292 break;
4293
4294 nr_demote--;
4295 }
4296
4297 spin_unlock_irq(&hugetlb_lock);
4298 mutex_unlock(&h->resize_lock);
4299
4300 if (err)
4301 return err;
4302 return len;
4303}
4304HSTATE_ATTR_WO(demote);
4305
4306static ssize_t demote_size_show(struct kobject *kobj,
4307 struct kobj_attribute *attr, char *buf)
4308{
12658abf 4309 struct hstate *h = kobj_to_hstate(kobj, NULL);
79dfc695
MK
4310 unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
4311
4312 return sysfs_emit(buf, "%lukB\n", demote_size);
4313}
4314
4315static ssize_t demote_size_store(struct kobject *kobj,
4316 struct kobj_attribute *attr,
4317 const char *buf, size_t count)
4318{
4319 struct hstate *h, *demote_hstate;
4320 unsigned long demote_size;
4321 unsigned int demote_order;
79dfc695
MK
4322
4323 demote_size = (unsigned long)memparse(buf, NULL);
4324
4325 demote_hstate = size_to_hstate(demote_size);
4326 if (!demote_hstate)
4327 return -EINVAL;
4328 demote_order = demote_hstate->order;
a01f4390
MK
4329 if (demote_order < HUGETLB_PAGE_ORDER)
4330 return -EINVAL;
79dfc695
MK
4331
4332 /* demote order must be smaller than hstate order */
12658abf 4333 h = kobj_to_hstate(kobj, NULL);
79dfc695
MK
4334 if (demote_order >= h->order)
4335 return -EINVAL;
4336
4337 /* resize_lock synchronizes access to demote size and writes */
4338 mutex_lock(&h->resize_lock);
4339 h->demote_order = demote_order;
4340 mutex_unlock(&h->resize_lock);
4341
4342 return count;
4343}
4344HSTATE_ATTR(demote_size);
4345
a3437870
NA
4346static struct attribute *hstate_attrs[] = {
4347 &nr_hugepages_attr.attr,
4348 &nr_overcommit_hugepages_attr.attr,
4349 &free_hugepages_attr.attr,
4350 &resv_hugepages_attr.attr,
4351 &surplus_hugepages_attr.attr,
06808b08
LS
4352#ifdef CONFIG_NUMA
4353 &nr_hugepages_mempolicy_attr.attr,
4354#endif
a3437870
NA
4355 NULL,
4356};
4357
67e5ed96 4358static const struct attribute_group hstate_attr_group = {
a3437870
NA
4359 .attrs = hstate_attrs,
4360};
4361
79dfc695
MK
4362static struct attribute *hstate_demote_attrs[] = {
4363 &demote_size_attr.attr,
4364 &demote_attr.attr,
4365 NULL,
4366};
4367
4368static const struct attribute_group hstate_demote_attr_group = {
4369 .attrs = hstate_demote_attrs,
4370};
4371
094e9539
JM
4372static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
4373 struct kobject **hstate_kobjs,
67e5ed96 4374 const struct attribute_group *hstate_attr_group)
a3437870
NA
4375{
4376 int retval;
972dc4de 4377 int hi = hstate_index(h);
a3437870 4378
9a305230
LS
4379 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
4380 if (!hstate_kobjs[hi])
a3437870
NA
4381 return -ENOMEM;
4382
9a305230 4383 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
cc2205a6 4384 if (retval) {
9a305230 4385 kobject_put(hstate_kobjs[hi]);
cc2205a6 4386 hstate_kobjs[hi] = NULL;
3a6bdda0 4387 return retval;
cc2205a6 4388 }
a3437870 4389
79dfc695 4390 if (h->demote_order) {
01088a60
ML
4391 retval = sysfs_create_group(hstate_kobjs[hi],
4392 &hstate_demote_attr_group);
4393 if (retval) {
79dfc695 4394 pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
01088a60
ML
4395 sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
4396 kobject_put(hstate_kobjs[hi]);
4397 hstate_kobjs[hi] = NULL;
4398 return retval;
4399 }
79dfc695
MK
4400 }
4401
01088a60 4402 return 0;
a3437870
NA
4403}
4404
9a305230 4405#ifdef CONFIG_NUMA
a4a00b45 4406static bool hugetlb_sysfs_initialized __ro_after_init;
9a305230
LS
4407
4408/*
4409 * node_hstate/s - associate per node hstate attributes, via their kobjects,
10fbcf4c
KS
4410 * with node devices in node_devices[] using a parallel array. The array
4411 * index of a node device or _hstate == node id.
4412 * This is here to avoid any static dependency of the node device driver, in
9a305230
LS
4413 * the base kernel, on the hugetlb module.
4414 */
4415struct node_hstate {
4416 struct kobject *hugepages_kobj;
4417 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
4418};
b4e289a6 4419static struct node_hstate node_hstates[MAX_NUMNODES];
9a305230
LS
4420
4421/*
10fbcf4c 4422 * A subset of global hstate attributes for node devices
9a305230
LS
4423 */
4424static struct attribute *per_node_hstate_attrs[] = {
4425 &nr_hugepages_attr.attr,
4426 &free_hugepages_attr.attr,
4427 &surplus_hugepages_attr.attr,
4428 NULL,
4429};
4430
67e5ed96 4431static const struct attribute_group per_node_hstate_attr_group = {
9a305230
LS
4432 .attrs = per_node_hstate_attrs,
4433};
4434
4435/*
10fbcf4c 4436 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
9a305230
LS
4437 * Returns node id via non-NULL nidp.
4438 */
4439static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4440{
4441 int nid;
4442
4443 for (nid = 0; nid < nr_node_ids; nid++) {
4444 struct node_hstate *nhs = &node_hstates[nid];
4445 int i;
4446 for (i = 0; i < HUGE_MAX_HSTATE; i++)
4447 if (nhs->hstate_kobjs[i] == kobj) {
4448 if (nidp)
4449 *nidp = nid;
4450 return &hstates[i];
4451 }
4452 }
4453
4454 BUG();
4455 return NULL;
4456}
4457
4458/*
10fbcf4c 4459 * Unregister hstate attributes from a single node device.
9a305230
LS
4460 * No-op if no hstate attributes attached.
4461 */
a4a00b45 4462void hugetlb_unregister_node(struct node *node)
9a305230
LS
4463{
4464 struct hstate *h;
10fbcf4c 4465 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
4466
4467 if (!nhs->hugepages_kobj)
9b5e5d0f 4468 return; /* no hstate attributes */
9a305230 4469
972dc4de
AK
4470 for_each_hstate(h) {
4471 int idx = hstate_index(h);
01088a60
ML
4472 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
4473
4474 if (!hstate_kobj)
4475 continue;
4476 if (h->demote_order)
4477 sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
4478 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
4479 kobject_put(hstate_kobj);
4480 nhs->hstate_kobjs[idx] = NULL;
972dc4de 4481 }
9a305230
LS
4482
4483 kobject_put(nhs->hugepages_kobj);
4484 nhs->hugepages_kobj = NULL;
4485}
4486
9a305230
LS
4487
4488/*
10fbcf4c 4489 * Register hstate attributes for a single node device.
9a305230
LS
4490 * No-op if attributes already registered.
4491 */
a4a00b45 4492void hugetlb_register_node(struct node *node)
9a305230
LS
4493{
4494 struct hstate *h;
10fbcf4c 4495 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
4496 int err;
4497
a4a00b45
MS
4498 if (!hugetlb_sysfs_initialized)
4499 return;
4500
9a305230
LS
4501 if (nhs->hugepages_kobj)
4502 return; /* already allocated */
4503
4504 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
10fbcf4c 4505 &node->dev.kobj);
9a305230
LS
4506 if (!nhs->hugepages_kobj)
4507 return;
4508
4509 for_each_hstate(h) {
4510 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
4511 nhs->hstate_kobjs,
4512 &per_node_hstate_attr_group);
4513 if (err) {
282f4214 4514 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
ffb22af5 4515 h->name, node->dev.id);
9a305230
LS
4516 hugetlb_unregister_node(node);
4517 break;
4518 }
4519 }
4520}
4521
4522/*
9b5e5d0f 4523 * hugetlb init time: register hstate attributes for all registered node
10fbcf4c
KS
4524 * devices of nodes that have memory. All on-line nodes should have
4525 * registered their associated device by this time.
9a305230 4526 */
7d9ca000 4527static void __init hugetlb_register_all_nodes(void)
9a305230
LS
4528{
4529 int nid;
4530
a4a00b45 4531 for_each_online_node(nid)
b958d4d0 4532 hugetlb_register_node(node_devices[nid]);
9a305230
LS
4533}
4534#else /* !CONFIG_NUMA */
4535
4536static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4537{
4538 BUG();
4539 if (nidp)
4540 *nidp = -1;
4541 return NULL;
4542}
4543
9a305230
LS
4544static void hugetlb_register_all_nodes(void) { }
4545
4546#endif
4547
263b8998
ML
4548#ifdef CONFIG_CMA
4549static void __init hugetlb_cma_check(void);
4550#else
4551static inline __init void hugetlb_cma_check(void)
4552{
4553}
4554#endif
4555
a4a00b45
MS
4556static void __init hugetlb_sysfs_init(void)
4557{
4558 struct hstate *h;
4559 int err;
4560
4561 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4562 if (!hugepages_kobj)
4563 return;
4564
4565 for_each_hstate(h) {
4566 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4567 hstate_kobjs, &hstate_attr_group);
4568 if (err)
4569 pr_err("HugeTLB: Unable to add hstate %s", h->name);
4570 }
4571
4572#ifdef CONFIG_NUMA
4573 hugetlb_sysfs_initialized = true;
4574#endif
4575 hugetlb_register_all_nodes();
4576}
4577
962de548
KW
4578#ifdef CONFIG_SYSCTL
4579static void hugetlb_sysctl_init(void);
4580#else
4581static inline void hugetlb_sysctl_init(void) { }
4582#endif
4583
a3437870
NA
4584static int __init hugetlb_init(void)
4585{
8382d914
DB
4586 int i;
4587
d6995da3
MK
4588 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4589 __NR_HPAGEFLAGS);
4590
c2833a5b
MK
4591 if (!hugepages_supported()) {
4592 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4593 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
0ef89d25 4594 return 0;
c2833a5b 4595 }
a3437870 4596
282f4214
MK
4597 /*
4598 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
4599 * architectures depend on setup being done here.
4600 */
4601 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4602 if (!parsed_default_hugepagesz) {
4603 /*
4604 * If we did not parse a default huge page size, set
4605 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4606 * number of huge pages for this default size was implicitly
4607 * specified, set that here as well.
4608 * Note that the implicit setting will overwrite an explicit
4609 * setting. A warning will be printed in this case.
4610 */
4611 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4612 if (default_hstate_max_huge_pages) {
4613 if (default_hstate.max_huge_pages) {
4614 char buf[32];
4615
4616 string_get_size(huge_page_size(&default_hstate),
4617 1, STRING_UNITS_2, buf, 32);
4618 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4619 default_hstate.max_huge_pages, buf);
4620 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4621 default_hstate_max_huge_pages);
4622 }
4623 default_hstate.max_huge_pages =
4624 default_hstate_max_huge_pages;
b5389086 4625
0a7a0f6f 4626 for_each_online_node(i)
b5389086
ZY
4627 default_hstate.max_huge_pages_node[i] =
4628 default_hugepages_in_node[i];
d715cf80 4629 }
f8b74815 4630 }
a3437870 4631
cf11e85f 4632 hugetlb_cma_check();
a3437870 4633 hugetlb_init_hstates();
aa888a74 4634 gather_bootmem_prealloc();
a3437870
NA
4635 report_hugepages();
4636
4637 hugetlb_sysfs_init();
7179e7bf 4638 hugetlb_cgroup_file_init();
962de548 4639 hugetlb_sysctl_init();
9a305230 4640
8382d914
DB
4641#ifdef CONFIG_SMP
4642 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4643#else
4644 num_fault_mutexes = 1;
4645#endif
c672c7f2 4646 hugetlb_fault_mutex_table =
6da2ec56
KC
4647 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4648 GFP_KERNEL);
c672c7f2 4649 BUG_ON(!hugetlb_fault_mutex_table);
8382d914
DB
4650
4651 for (i = 0; i < num_fault_mutexes; i++)
c672c7f2 4652 mutex_init(&hugetlb_fault_mutex_table[i]);
a3437870
NA
4653 return 0;
4654}
3e89e1c5 4655subsys_initcall(hugetlb_init);
a3437870 4656
ae94da89
MK
4657/* Overwritten by architectures with more huge page sizes */
4658bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
9fee021d 4659{
ae94da89 4660 return size == HPAGE_SIZE;
9fee021d
VT
4661}
4662
d00181b9 4663void __init hugetlb_add_hstate(unsigned int order)
a3437870
NA
4664{
4665 struct hstate *h;
8faa8b07
AK
4666 unsigned long i;
4667
a3437870 4668 if (size_to_hstate(PAGE_SIZE << order)) {
a3437870
NA
4669 return;
4670 }
47d38344 4671 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
59838b25 4672 BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
47d38344 4673 h = &hstates[hugetlb_max_hstate++];
29383967 4674 mutex_init(&h->resize_lock);
a3437870 4675 h->order = order;
aca78307 4676 h->mask = ~(huge_page_size(h) - 1);
8faa8b07
AK
4677 for (i = 0; i < MAX_NUMNODES; ++i)
4678 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
0edaecfa 4679 INIT_LIST_HEAD(&h->hugepage_activelist);
54f18d35
AM
4680 h->next_nid_to_alloc = first_memory_node;
4681 h->next_nid_to_free = first_memory_node;
a3437870 4682 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
c2c3a60a 4683 huge_page_size(h)/SZ_1K);
8faa8b07 4684
a3437870
NA
4685 parsed_hstate = h;
4686}
4687
b5389086
ZY
4688bool __init __weak hugetlb_node_alloc_supported(void)
4689{
4690 return true;
4691}
f87442f4
PL
4692
4693static void __init hugepages_clear_pages_in_node(void)
4694{
4695 if (!hugetlb_max_hstate) {
4696 default_hstate_max_huge_pages = 0;
4697 memset(default_hugepages_in_node, 0,
10395680 4698 sizeof(default_hugepages_in_node));
f87442f4
PL
4699 } else {
4700 parsed_hstate->max_huge_pages = 0;
4701 memset(parsed_hstate->max_huge_pages_node, 0,
10395680 4702 sizeof(parsed_hstate->max_huge_pages_node));
f87442f4
PL
4703 }
4704}
4705
282f4214
MK
4706/*
4707 * hugepages command line processing
4708 * hugepages normally follows a valid hugepagsz or default_hugepagsz
4709 * specification. If not, ignore the hugepages value. hugepages can also
4710 * be the first huge page command line option in which case it implicitly
4711 * specifies the number of huge pages for the default size.
4712 */
4713static int __init hugepages_setup(char *s)
a3437870
NA
4714{
4715 unsigned long *mhp;
8faa8b07 4716 static unsigned long *last_mhp;
b5389086
ZY
4717 int node = NUMA_NO_NODE;
4718 int count;
4719 unsigned long tmp;
4720 char *p = s;
a3437870 4721
9fee021d 4722 if (!parsed_valid_hugepagesz) {
282f4214 4723 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
9fee021d 4724 parsed_valid_hugepagesz = true;
f81f6e4b 4725 return 1;
9fee021d 4726 }
282f4214 4727
a3437870 4728 /*
282f4214
MK
4729 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4730 * yet, so this hugepages= parameter goes to the "default hstate".
4731 * Otherwise, it goes with the previously parsed hugepagesz or
4732 * default_hugepagesz.
a3437870 4733 */
9fee021d 4734 else if (!hugetlb_max_hstate)
a3437870
NA
4735 mhp = &default_hstate_max_huge_pages;
4736 else
4737 mhp = &parsed_hstate->max_huge_pages;
4738
8faa8b07 4739 if (mhp == last_mhp) {
282f4214 4740 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
f81f6e4b 4741 return 1;
8faa8b07
AK
4742 }
4743
b5389086
ZY
4744 while (*p) {
4745 count = 0;
4746 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4747 goto invalid;
4748 /* Parameter is node format */
4749 if (p[count] == ':') {
4750 if (!hugetlb_node_alloc_supported()) {
4751 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
f81f6e4b 4752 return 1;
b5389086 4753 }
0a7a0f6f 4754 if (tmp >= MAX_NUMNODES || !node_online(tmp))
e79ce983 4755 goto invalid;
0a7a0f6f 4756 node = array_index_nospec(tmp, MAX_NUMNODES);
b5389086 4757 p += count + 1;
b5389086
ZY
4758 /* Parse hugepages */
4759 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4760 goto invalid;
4761 if (!hugetlb_max_hstate)
4762 default_hugepages_in_node[node] = tmp;
4763 else
4764 parsed_hstate->max_huge_pages_node[node] = tmp;
4765 *mhp += tmp;
4766 /* Go to parse next node*/
4767 if (p[count] == ',')
4768 p += count + 1;
4769 else
4770 break;
4771 } else {
4772 if (p != s)
4773 goto invalid;
4774 *mhp = tmp;
4775 break;
4776 }
4777 }
a3437870 4778
8faa8b07
AK
4779 /*
4780 * Global state is always initialized later in hugetlb_init.
04adbc3f 4781 * But we need to allocate gigantic hstates here early to still
8faa8b07
AK
4782 * use the bootmem allocator.
4783 */
04adbc3f 4784 if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
8faa8b07
AK
4785 hugetlb_hstate_alloc_pages(parsed_hstate);
4786
4787 last_mhp = mhp;
4788
a3437870 4789 return 1;
b5389086
ZY
4790
4791invalid:
4792 pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
f87442f4 4793 hugepages_clear_pages_in_node();
f81f6e4b 4794 return 1;
a3437870 4795}
282f4214 4796__setup("hugepages=", hugepages_setup);
e11bfbfc 4797
282f4214
MK
4798/*
4799 * hugepagesz command line processing
4800 * A specific huge page size can only be specified once with hugepagesz.
4801 * hugepagesz is followed by hugepages on the command line. The global
4802 * variable 'parsed_valid_hugepagesz' is used to determine if prior
4803 * hugepagesz argument was valid.
4804 */
359f2544 4805static int __init hugepagesz_setup(char *s)
e11bfbfc 4806{
359f2544 4807 unsigned long size;
282f4214
MK
4808 struct hstate *h;
4809
4810 parsed_valid_hugepagesz = false;
359f2544
MK
4811 size = (unsigned long)memparse(s, NULL);
4812
4813 if (!arch_hugetlb_valid_size(size)) {
282f4214 4814 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
f81f6e4b 4815 return 1;
359f2544
MK
4816 }
4817
282f4214
MK
4818 h = size_to_hstate(size);
4819 if (h) {
4820 /*
4821 * hstate for this size already exists. This is normally
4822 * an error, but is allowed if the existing hstate is the
4823 * default hstate. More specifically, it is only allowed if
4824 * the number of huge pages for the default hstate was not
4825 * previously specified.
4826 */
4827 if (!parsed_default_hugepagesz || h != &default_hstate ||
4828 default_hstate.max_huge_pages) {
4829 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
f81f6e4b 4830 return 1;
282f4214
MK
4831 }
4832
4833 /*
4834 * No need to call hugetlb_add_hstate() as hstate already
4835 * exists. But, do set parsed_hstate so that a following
4836 * hugepages= parameter will be applied to this hstate.
4837 */
4838 parsed_hstate = h;
4839 parsed_valid_hugepagesz = true;
4840 return 1;
38237830
MK
4841 }
4842
359f2544 4843 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
282f4214 4844 parsed_valid_hugepagesz = true;
e11bfbfc
NP
4845 return 1;
4846}
359f2544
MK
4847__setup("hugepagesz=", hugepagesz_setup);
4848
282f4214
MK
4849/*
4850 * default_hugepagesz command line input
4851 * Only one instance of default_hugepagesz allowed on command line.
4852 */
ae94da89 4853static int __init default_hugepagesz_setup(char *s)
e11bfbfc 4854{
ae94da89 4855 unsigned long size;
b5389086 4856 int i;
ae94da89 4857
282f4214 4858 parsed_valid_hugepagesz = false;
282f4214
MK
4859 if (parsed_default_hugepagesz) {
4860 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
f81f6e4b 4861 return 1;
282f4214
MK
4862 }
4863
ae94da89
MK
4864 size = (unsigned long)memparse(s, NULL);
4865
4866 if (!arch_hugetlb_valid_size(size)) {
282f4214 4867 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
f81f6e4b 4868 return 1;
ae94da89
MK
4869 }
4870
282f4214
MK
4871 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4872 parsed_valid_hugepagesz = true;
4873 parsed_default_hugepagesz = true;
4874 default_hstate_idx = hstate_index(size_to_hstate(size));
4875
4876 /*
4877 * The number of default huge pages (for this size) could have been
4878 * specified as the first hugetlb parameter: hugepages=X. If so,
4879 * then default_hstate_max_huge_pages is set. If the default huge
5e0a760b 4880 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
282f4214
MK
4881 * allocated here from bootmem allocator.
4882 */
4883 if (default_hstate_max_huge_pages) {
4884 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
0a7a0f6f 4885 for_each_online_node(i)
b5389086
ZY
4886 default_hstate.max_huge_pages_node[i] =
4887 default_hugepages_in_node[i];
282f4214
MK
4888 if (hstate_is_gigantic(&default_hstate))
4889 hugetlb_hstate_alloc_pages(&default_hstate);
4890 default_hstate_max_huge_pages = 0;
4891 }
4892
e11bfbfc
NP
4893 return 1;
4894}
ae94da89 4895__setup("default_hugepagesz=", default_hugepagesz_setup);
a3437870 4896
d2226ebd
FT
4897static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4898{
4899#ifdef CONFIG_NUMA
4900 struct mempolicy *mpol = get_task_policy(current);
4901
4902 /*
4903 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4904 * (from policy_nodemask) specifically for hugetlb case
4905 */
4906 if (mpol->mode == MPOL_BIND &&
4907 (apply_policy_zone(mpol, gfp_zone(gfp)) &&
4908 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4909 return &mpol->nodes;
4910#endif
4911 return NULL;
4912}
4913
8ca39e68 4914static unsigned int allowed_mems_nr(struct hstate *h)
8a213460
NA
4915{
4916 int node;
4917 unsigned int nr = 0;
d2226ebd 4918 nodemask_t *mbind_nodemask;
8ca39e68
MS
4919 unsigned int *array = h->free_huge_pages_node;
4920 gfp_t gfp_mask = htlb_alloc_mask(h);
4921
d2226ebd 4922 mbind_nodemask = policy_mbind_nodemask(gfp_mask);
8ca39e68 4923 for_each_node_mask(node, cpuset_current_mems_allowed) {
d2226ebd 4924 if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
8ca39e68
MS
4925 nr += array[node];
4926 }
8a213460
NA
4927
4928 return nr;
4929}
4930
4931#ifdef CONFIG_SYSCTL
17743798
MS
4932static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
4933 void *buffer, size_t *length,
4934 loff_t *ppos, unsigned long *out)
4935{
4936 struct ctl_table dup_table;
4937
4938 /*
4939 * In order to avoid races with __do_proc_doulongvec_minmax(), we
4940 * can duplicate the @table and alter the duplicate of it.
4941 */
4942 dup_table = *table;
4943 dup_table.data = out;
4944
4945 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
4946}
4947
06808b08
LS
4948static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
4949 struct ctl_table *table, int write,
32927393 4950 void *buffer, size_t *length, loff_t *ppos)
1da177e4 4951{
e5ff2159 4952 struct hstate *h = &default_hstate;
238d3c13 4953 unsigned long tmp = h->max_huge_pages;
08d4a246 4954 int ret;
e5ff2159 4955
457c1b27 4956 if (!hugepages_supported())
86613628 4957 return -EOPNOTSUPP;
457c1b27 4958
17743798
MS
4959 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4960 &tmp);
08d4a246
MH
4961 if (ret)
4962 goto out;
e5ff2159 4963
238d3c13
DR
4964 if (write)
4965 ret = __nr_hugepages_store_common(obey_mempolicy, h,
4966 NUMA_NO_NODE, tmp, *length);
08d4a246
MH
4967out:
4968 return ret;
1da177e4 4969}
396faf03 4970
962de548 4971static int hugetlb_sysctl_handler(struct ctl_table *table, int write,
32927393 4972 void *buffer, size_t *length, loff_t *ppos)
06808b08
LS
4973{
4974
4975 return hugetlb_sysctl_handler_common(false, table, write,
4976 buffer, length, ppos);
4977}
4978
4979#ifdef CONFIG_NUMA
962de548 4980static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
32927393 4981 void *buffer, size_t *length, loff_t *ppos)
06808b08
LS
4982{
4983 return hugetlb_sysctl_handler_common(true, table, write,
4984 buffer, length, ppos);
4985}
4986#endif /* CONFIG_NUMA */
4987
962de548 4988static int hugetlb_overcommit_handler(struct ctl_table *table, int write,
32927393 4989 void *buffer, size_t *length, loff_t *ppos)
a3d0c6aa 4990{
a5516438 4991 struct hstate *h = &default_hstate;
e5ff2159 4992 unsigned long tmp;
08d4a246 4993 int ret;
e5ff2159 4994
457c1b27 4995 if (!hugepages_supported())
86613628 4996 return -EOPNOTSUPP;
457c1b27 4997
c033a93c 4998 tmp = h->nr_overcommit_huge_pages;
e5ff2159 4999
bae7f4ae 5000 if (write && hstate_is_gigantic(h))
adbe8726
EM
5001 return -EINVAL;
5002
17743798
MS
5003 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
5004 &tmp);
08d4a246
MH
5005 if (ret)
5006 goto out;
e5ff2159
AK
5007
5008 if (write) {
db71ef79 5009 spin_lock_irq(&hugetlb_lock);
e5ff2159 5010 h->nr_overcommit_huge_pages = tmp;
db71ef79 5011 spin_unlock_irq(&hugetlb_lock);
e5ff2159 5012 }
08d4a246
MH
5013out:
5014 return ret;
a3d0c6aa
NA
5015}
5016
962de548
KW
5017static struct ctl_table hugetlb_table[] = {
5018 {
5019 .procname = "nr_hugepages",
5020 .data = NULL,
5021 .maxlen = sizeof(unsigned long),
5022 .mode = 0644,
5023 .proc_handler = hugetlb_sysctl_handler,
5024 },
5025#ifdef CONFIG_NUMA
5026 {
5027 .procname = "nr_hugepages_mempolicy",
5028 .data = NULL,
5029 .maxlen = sizeof(unsigned long),
5030 .mode = 0644,
5031 .proc_handler = &hugetlb_mempolicy_sysctl_handler,
5032 },
5033#endif
5034 {
5035 .procname = "hugetlb_shm_group",
5036 .data = &sysctl_hugetlb_shm_group,
5037 .maxlen = sizeof(gid_t),
5038 .mode = 0644,
5039 .proc_handler = proc_dointvec,
5040 },
5041 {
5042 .procname = "nr_overcommit_hugepages",
5043 .data = NULL,
5044 .maxlen = sizeof(unsigned long),
5045 .mode = 0644,
5046 .proc_handler = hugetlb_overcommit_handler,
5047 },
5048 { }
5049};
5050
5051static void hugetlb_sysctl_init(void)
5052{
5053 register_sysctl_init("vm", hugetlb_table);
5054}
1da177e4
LT
5055#endif /* CONFIG_SYSCTL */
5056
e1759c21 5057void hugetlb_report_meminfo(struct seq_file *m)
1da177e4 5058{
fcb2b0c5
RG
5059 struct hstate *h;
5060 unsigned long total = 0;
5061
457c1b27
NA
5062 if (!hugepages_supported())
5063 return;
fcb2b0c5
RG
5064
5065 for_each_hstate(h) {
5066 unsigned long count = h->nr_huge_pages;
5067
aca78307 5068 total += huge_page_size(h) * count;
fcb2b0c5
RG
5069
5070 if (h == &default_hstate)
5071 seq_printf(m,
5072 "HugePages_Total: %5lu\n"
5073 "HugePages_Free: %5lu\n"
5074 "HugePages_Rsvd: %5lu\n"
5075 "HugePages_Surp: %5lu\n"
5076 "Hugepagesize: %8lu kB\n",
5077 count,
5078 h->free_huge_pages,
5079 h->resv_huge_pages,
5080 h->surplus_huge_pages,
aca78307 5081 huge_page_size(h) / SZ_1K);
fcb2b0c5
RG
5082 }
5083
aca78307 5084 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K);
1da177e4
LT
5085}
5086
7981593b 5087int hugetlb_report_node_meminfo(char *buf, int len, int nid)
1da177e4 5088{
a5516438 5089 struct hstate *h = &default_hstate;
7981593b 5090
457c1b27
NA
5091 if (!hugepages_supported())
5092 return 0;
7981593b
JP
5093
5094 return sysfs_emit_at(buf, len,
5095 "Node %d HugePages_Total: %5u\n"
5096 "Node %d HugePages_Free: %5u\n"
5097 "Node %d HugePages_Surp: %5u\n",
5098 nid, h->nr_huge_pages_node[nid],
5099 nid, h->free_huge_pages_node[nid],
5100 nid, h->surplus_huge_pages_node[nid]);
1da177e4
LT
5101}
5102
dcadcf1c 5103void hugetlb_show_meminfo_node(int nid)
949f7ec5
DR
5104{
5105 struct hstate *h;
949f7ec5 5106
457c1b27
NA
5107 if (!hugepages_supported())
5108 return;
5109
dcadcf1c
GL
5110 for_each_hstate(h)
5111 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
5112 nid,
5113 h->nr_huge_pages_node[nid],
5114 h->free_huge_pages_node[nid],
5115 h->surplus_huge_pages_node[nid],
5116 huge_page_size(h) / SZ_1K);
949f7ec5
DR
5117}
5118
5d317b2b
NH
5119void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
5120{
5121 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
6c1aa2d3 5122 K(atomic_long_read(&mm->hugetlb_usage)));
5d317b2b
NH
5123}
5124
1da177e4
LT
5125/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
5126unsigned long hugetlb_total_pages(void)
5127{
d0028588
WL
5128 struct hstate *h;
5129 unsigned long nr_total_pages = 0;
5130
5131 for_each_hstate(h)
5132 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
5133 return nr_total_pages;
1da177e4 5134}
1da177e4 5135
a5516438 5136static int hugetlb_acct_memory(struct hstate *h, long delta)
fc1b8a73
MG
5137{
5138 int ret = -ENOMEM;
5139
0aa7f354
ML
5140 if (!delta)
5141 return 0;
5142
db71ef79 5143 spin_lock_irq(&hugetlb_lock);
fc1b8a73
MG
5144 /*
5145 * When cpuset is configured, it breaks the strict hugetlb page
5146 * reservation as the accounting is done on a global variable. Such
5147 * reservation is completely rubbish in the presence of cpuset because
5148 * the reservation is not checked against page availability for the
5149 * current cpuset. Application can still potentially OOM'ed by kernel
5150 * with lack of free htlb page in cpuset that the task is in.
5151 * Attempt to enforce strict accounting with cpuset is almost
5152 * impossible (or too ugly) because cpuset is too fluid that
5153 * task or memory node can be dynamically moved between cpusets.
5154 *
5155 * The change of semantics for shared hugetlb mapping with cpuset is
5156 * undesirable. However, in order to preserve some of the semantics,
5157 * we fall back to check against current free page availability as
5158 * a best attempt and hopefully to minimize the impact of changing
5159 * semantics that cpuset has.
8ca39e68
MS
5160 *
5161 * Apart from cpuset, we also have memory policy mechanism that
5162 * also determines from which node the kernel will allocate memory
5163 * in a NUMA system. So similar to cpuset, we also should consider
5164 * the memory policy of the current task. Similar to the description
5165 * above.
fc1b8a73
MG
5166 */
5167 if (delta > 0) {
a5516438 5168 if (gather_surplus_pages(h, delta) < 0)
fc1b8a73
MG
5169 goto out;
5170
8ca39e68 5171 if (delta > allowed_mems_nr(h)) {
a5516438 5172 return_unused_surplus_pages(h, delta);
fc1b8a73
MG
5173 goto out;
5174 }
5175 }
5176
5177 ret = 0;
5178 if (delta < 0)
a5516438 5179 return_unused_surplus_pages(h, (unsigned long) -delta);
fc1b8a73
MG
5180
5181out:
db71ef79 5182 spin_unlock_irq(&hugetlb_lock);
fc1b8a73
MG
5183 return ret;
5184}
5185
84afd99b
AW
5186static void hugetlb_vm_op_open(struct vm_area_struct *vma)
5187{
f522c3ac 5188 struct resv_map *resv = vma_resv_map(vma);
84afd99b
AW
5189
5190 /*
612b8a31 5191 * HPAGE_RESV_OWNER indicates a private mapping.
84afd99b
AW
5192 * This new VMA should share its siblings reservation map if present.
5193 * The VMA will only ever have a valid reservation map pointer where
5194 * it is being copied for another still existing VMA. As that VMA
25985edc 5195 * has a reference to the reservation map it cannot disappear until
84afd99b
AW
5196 * after this open call completes. It is therefore safe to take a
5197 * new reference here without additional locking.
5198 */
09a26e83
MK
5199 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
5200 resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
f522c3ac 5201 kref_get(&resv->refs);
09a26e83 5202 }
8d9bfb26 5203
131a79b4
MK
5204 /*
5205 * vma_lock structure for sharable mappings is vma specific.
612b8a31
MK
5206 * Clear old pointer (if copied via vm_area_dup) and allocate
5207 * new structure. Before clearing, make sure vma_lock is not
5208 * for this vma.
131a79b4
MK
5209 */
5210 if (vma->vm_flags & VM_MAYSHARE) {
612b8a31
MK
5211 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
5212
5213 if (vma_lock) {
5214 if (vma_lock->vma != vma) {
5215 vma->vm_private_data = NULL;
5216 hugetlb_vma_lock_alloc(vma);
5217 } else
5218 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
5219 } else
5220 hugetlb_vma_lock_alloc(vma);
131a79b4 5221 }
84afd99b
AW
5222}
5223
a1e78772
MG
5224static void hugetlb_vm_op_close(struct vm_area_struct *vma)
5225{
a5516438 5226 struct hstate *h = hstate_vma(vma);
8d9bfb26 5227 struct resv_map *resv;
90481622 5228 struct hugepage_subpool *spool = subpool_vma(vma);
4e35f483 5229 unsigned long reserve, start, end;
1c5ecae3 5230 long gbl_reserve;
84afd99b 5231
8d9bfb26
MK
5232 hugetlb_vma_lock_free(vma);
5233
5234 resv = vma_resv_map(vma);
4e35f483
JK
5235 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5236 return;
84afd99b 5237
4e35f483
JK
5238 start = vma_hugecache_offset(h, vma, vma->vm_start);
5239 end = vma_hugecache_offset(h, vma, vma->vm_end);
84afd99b 5240
4e35f483 5241 reserve = (end - start) - region_count(resv, start, end);
e9fe92ae 5242 hugetlb_cgroup_uncharge_counter(resv, start, end);
4e35f483 5243 if (reserve) {
1c5ecae3
MK
5244 /*
5245 * Decrement reserve counts. The global reserve count may be
5246 * adjusted if the subpool has a minimum size.
5247 */
5248 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
5249 hugetlb_acct_memory(h, -gbl_reserve);
84afd99b 5250 }
e9fe92ae
MA
5251
5252 kref_put(&resv->refs, resv_map_release);
a1e78772
MG
5253}
5254
31383c68
DW
5255static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
5256{
5257 if (addr & ~(huge_page_mask(hstate_vma(vma))))
5258 return -EINVAL;
b30c14cd
JH
5259
5260 /*
5261 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
5262 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
5263 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
5264 */
5265 if (addr & ~PUD_MASK) {
5266 /*
5267 * hugetlb_vm_op_split is called right before we attempt to
5268 * split the VMA. We will need to unshare PMDs in the old and
5269 * new VMAs, so let's unshare before we split.
5270 */
5271 unsigned long floor = addr & PUD_MASK;
5272 unsigned long ceil = floor + PUD_SIZE;
5273
5274 if (floor >= vma->vm_start && ceil <= vma->vm_end)
5275 hugetlb_unshare_pmds(vma, floor, ceil);
5276 }
5277
31383c68
DW
5278 return 0;
5279}
5280
05ea8860
DW
5281static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
5282{
aca78307 5283 return huge_page_size(hstate_vma(vma));
05ea8860
DW
5284}
5285
1da177e4
LT
5286/*
5287 * We cannot handle pagefaults against hugetlb pages at all. They cause
5288 * handle_mm_fault() to try to instantiate regular-sized pages in the
6c26d310 5289 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
1da177e4
LT
5290 * this far.
5291 */
b3ec9f33 5292static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
1da177e4
LT
5293{
5294 BUG();
d0217ac0 5295 return 0;
1da177e4
LT
5296}
5297
eec3636a
JC
5298/*
5299 * When a new function is introduced to vm_operations_struct and added
5300 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
5301 * This is because under System V memory model, mappings created via
5302 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
5303 * their original vm_ops are overwritten with shm_vm_ops.
5304 */
f0f37e2f 5305const struct vm_operations_struct hugetlb_vm_ops = {
d0217ac0 5306 .fault = hugetlb_vm_op_fault,
84afd99b 5307 .open = hugetlb_vm_op_open,
a1e78772 5308 .close = hugetlb_vm_op_close,
dd3b614f 5309 .may_split = hugetlb_vm_op_split,
05ea8860 5310 .pagesize = hugetlb_vm_op_pagesize,
1da177e4
LT
5311};
5312
1e8f889b
DG
5313static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
5314 int writable)
63551ae0
DG
5315{
5316 pte_t entry;
79c1c594 5317 unsigned int shift = huge_page_shift(hstate_vma(vma));
63551ae0 5318
1e8f889b 5319 if (writable) {
106c992a
GS
5320 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
5321 vma->vm_page_prot)));
63551ae0 5322 } else {
106c992a
GS
5323 entry = huge_pte_wrprotect(mk_huge_pte(page,
5324 vma->vm_page_prot));
63551ae0
DG
5325 }
5326 entry = pte_mkyoung(entry);
79c1c594 5327 entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
63551ae0
DG
5328
5329 return entry;
5330}
5331
1e8f889b
DG
5332static void set_huge_ptep_writable(struct vm_area_struct *vma,
5333 unsigned long address, pte_t *ptep)
5334{
5335 pte_t entry;
5336
106c992a 5337 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
32f84528 5338 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4b3073e1 5339 update_mmu_cache(vma, address, ptep);
1e8f889b
DG
5340}
5341
d5ed7444 5342bool is_hugetlb_entry_migration(pte_t pte)
4a705fef
NH
5343{
5344 swp_entry_t swp;
5345
5346 if (huge_pte_none(pte) || pte_present(pte))
d5ed7444 5347 return false;
4a705fef 5348 swp = pte_to_swp_entry(pte);
d79d176a 5349 if (is_migration_entry(swp))
d5ed7444 5350 return true;
4a705fef 5351 else
d5ed7444 5352 return false;
4a705fef
NH
5353}
5354
52526ca7 5355bool is_hugetlb_entry_hwpoisoned(pte_t pte)
4a705fef
NH
5356{
5357 swp_entry_t swp;
5358
5359 if (huge_pte_none(pte) || pte_present(pte))
3e5c3600 5360 return false;
4a705fef 5361 swp = pte_to_swp_entry(pte);
d79d176a 5362 if (is_hwpoison_entry(swp))
3e5c3600 5363 return true;
4a705fef 5364 else
3e5c3600 5365 return false;
4a705fef 5366}
1e8f889b 5367
4eae4efa 5368static void
ea4c353d 5369hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
935d4f0c 5370 struct folio *new_folio, pte_t old, unsigned long sz)
4eae4efa 5371{
5a2f8d22
PX
5372 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
5373
ea4c353d 5374 __folio_mark_uptodate(new_folio);
9d5fafd5 5375 hugetlb_add_new_anon_rmap(new_folio, vma, addr);
5a2f8d22
PX
5376 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
5377 newpte = huge_pte_mkuffd_wp(newpte);
935d4f0c 5378 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
4eae4efa 5379 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
ea4c353d 5380 folio_set_hugetlb_migratable(new_folio);
4eae4efa
PX
5381}
5382
63551ae0 5383int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
bc70fbf2
PX
5384 struct vm_area_struct *dst_vma,
5385 struct vm_area_struct *src_vma)
63551ae0 5386{
3aa4ed80 5387 pte_t *src_pte, *dst_pte, entry;
ad27ce20 5388 struct folio *pte_folio;
1c59827d 5389 unsigned long addr;
bc70fbf2
PX
5390 bool cow = is_cow_mapping(src_vma->vm_flags);
5391 struct hstate *h = hstate_vma(src_vma);
a5516438 5392 unsigned long sz = huge_page_size(h);
4eae4efa 5393 unsigned long npages = pages_per_huge_page(h);
ac46d4f3 5394 struct mmu_notifier_range range;
e95a9851 5395 unsigned long last_addr_mask;
e8569dd2 5396 int ret = 0;
1e8f889b 5397
ac46d4f3 5398 if (cow) {
7d4a8be0 5399 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
bc70fbf2
PX
5400 src_vma->vm_start,
5401 src_vma->vm_end);
ac46d4f3 5402 mmu_notifier_invalidate_range_start(&range);
e727bfd5 5403 vma_assert_write_locked(src_vma);
623a1ddf 5404 raw_write_seqcount_begin(&src->write_protect_seq);
40549ba8
MK
5405 } else {
5406 /*
5407 * For shared mappings the vma lock must be held before
9c67a207 5408 * calling hugetlb_walk() in the src vma. Otherwise, the
40549ba8
MK
5409 * returned ptep could go away if part of a shared pmd and
5410 * another thread calls huge_pmd_unshare.
5411 */
5412 hugetlb_vma_lock_read(src_vma);
ac46d4f3 5413 }
e8569dd2 5414
e95a9851 5415 last_addr_mask = hugetlb_mask_last_page(h);
bc70fbf2 5416 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
cb900f41 5417 spinlock_t *src_ptl, *dst_ptl;
9c67a207 5418 src_pte = hugetlb_walk(src_vma, addr, sz);
e95a9851
MK
5419 if (!src_pte) {
5420 addr |= last_addr_mask;
c74df32c 5421 continue;
e95a9851 5422 }
bc70fbf2 5423 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
e8569dd2
AS
5424 if (!dst_pte) {
5425 ret = -ENOMEM;
5426 break;
5427 }
c5c99429 5428
5e41540c
MK
5429 /*
5430 * If the pagetables are shared don't copy or take references.
5e41540c 5431 *
3aa4ed80 5432 * dst_pte == src_pte is the common case of src/dest sharing.
5e41540c 5433 * However, src could have 'unshared' and dst shares with
3aa4ed80
ML
5434 * another vma. So page_count of ptep page is checked instead
5435 * to reliably determine whether pte is shared.
5e41540c 5436 */
3aa4ed80 5437 if (page_count(virt_to_page(dst_pte)) > 1) {
e95a9851 5438 addr |= last_addr_mask;
c5c99429 5439 continue;
e95a9851 5440 }
c5c99429 5441
cb900f41
KS
5442 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5443 src_ptl = huge_pte_lockptr(h, src, src_pte);
5444 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4a705fef 5445 entry = huge_ptep_get(src_pte);
4eae4efa 5446again:
3aa4ed80 5447 if (huge_pte_none(entry)) {
5e41540c 5448 /*
3aa4ed80 5449 * Skip if src entry none.
5e41540c 5450 */
4a705fef 5451 ;
c2cb0dcc 5452 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
5a2f8d22 5453 if (!userfaultfd_wp(dst_vma))
c2cb0dcc 5454 entry = huge_pte_clear_uffd_wp(entry);
935d4f0c 5455 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
c2cb0dcc 5456 } else if (unlikely(is_hugetlb_entry_migration(entry))) {
4a705fef 5457 swp_entry_t swp_entry = pte_to_swp_entry(entry);
5a2f8d22 5458 bool uffd_wp = pte_swp_uffd_wp(entry);
4a705fef 5459
6c287605 5460 if (!is_readable_migration_entry(swp_entry) && cow) {
4a705fef
NH
5461 /*
5462 * COW mappings require pages in both
5463 * parent and child to be set to read.
5464 */
4dd845b5
AP
5465 swp_entry = make_readable_migration_entry(
5466 swp_offset(swp_entry));
4a705fef 5467 entry = swp_entry_to_pte(swp_entry);
bc70fbf2 5468 if (userfaultfd_wp(src_vma) && uffd_wp)
5a2f8d22 5469 entry = pte_swp_mkuffd_wp(entry);
935d4f0c 5470 set_huge_pte_at(src, addr, src_pte, entry, sz);
4a705fef 5471 }
5a2f8d22 5472 if (!userfaultfd_wp(dst_vma))
bc70fbf2 5473 entry = huge_pte_clear_uffd_wp(entry);
935d4f0c 5474 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
bc70fbf2 5475 } else if (unlikely(is_pte_marker(entry))) {
af19487f
AR
5476 pte_marker marker = copy_pte_marker(
5477 pte_to_swp_entry(entry), dst_vma);
5478
5479 if (marker)
5480 set_huge_pte_at(dst, addr, dst_pte,
935d4f0c 5481 make_pte_marker(marker), sz);
4a705fef 5482 } else {
4eae4efa 5483 entry = huge_ptep_get(src_pte);
ad27ce20
Z
5484 pte_folio = page_folio(pte_page(entry));
5485 folio_get(pte_folio);
4eae4efa
PX
5486
5487 /*
fb3d824d
DH
5488 * Failing to duplicate the anon rmap is a rare case
5489 * where we see pinned hugetlb pages while they're
5490 * prone to COW. We need to do the COW earlier during
5491 * fork.
4eae4efa
PX
5492 *
5493 * When pre-allocating the page or copying data, we
5494 * need to be without the pgtable locks since we could
5495 * sleep during the process.
5496 */
ad27ce20 5497 if (!folio_test_anon(pte_folio)) {
44887f39 5498 hugetlb_add_file_rmap(pte_folio);
ebe2e35e 5499 } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
4eae4efa 5500 pte_t src_pte_old = entry;
d0ce0e47 5501 struct folio *new_folio;
4eae4efa
PX
5502
5503 spin_unlock(src_ptl);
5504 spin_unlock(dst_ptl);
5505 /* Do not use reserve as it's private owned */
d0ce0e47
SK
5506 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
5507 if (IS_ERR(new_folio)) {
ad27ce20 5508 folio_put(pte_folio);
d0ce0e47 5509 ret = PTR_ERR(new_folio);
4eae4efa
PX
5510 break;
5511 }
1cb9dc4b 5512 ret = copy_user_large_folio(new_folio,
ad27ce20
Z
5513 pte_folio,
5514 addr, dst_vma);
5515 folio_put(pte_folio);
1cb9dc4b
LS
5516 if (ret) {
5517 folio_put(new_folio);
5518 break;
5519 }
4eae4efa 5520
d0ce0e47 5521 /* Install the new hugetlb folio if src pte stable */
4eae4efa
PX
5522 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5523 src_ptl = huge_pte_lockptr(h, src, src_pte);
5524 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5525 entry = huge_ptep_get(src_pte);
5526 if (!pte_same(src_pte_old, entry)) {
bc70fbf2 5527 restore_reserve_on_error(h, dst_vma, addr,
d2d7bb44 5528 new_folio);
d0ce0e47 5529 folio_put(new_folio);
3aa4ed80 5530 /* huge_ptep of dst_pte won't change as in child */
4eae4efa
PX
5531 goto again;
5532 }
5a2f8d22 5533 hugetlb_install_folio(dst_vma, dst_pte, addr,
935d4f0c 5534 new_folio, src_pte_old, sz);
4eae4efa
PX
5535 spin_unlock(src_ptl);
5536 spin_unlock(dst_ptl);
5537 continue;
5538 }
5539
34ee645e 5540 if (cow) {
0f10851e
JG
5541 /*
5542 * No need to notify as we are downgrading page
5543 * table protection not changing it to point
5544 * to a new page.
5545 *
ee65728e 5546 * See Documentation/mm/mmu_notifier.rst
0f10851e 5547 */
7f2e9525 5548 huge_ptep_set_wrprotect(src, addr, src_pte);
84894e1c 5549 entry = huge_pte_wrprotect(entry);
34ee645e 5550 }
4eae4efa 5551
5a2f8d22
PX
5552 if (!userfaultfd_wp(dst_vma))
5553 entry = huge_pte_clear_uffd_wp(entry);
5554
935d4f0c 5555 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4eae4efa 5556 hugetlb_count_add(npages, dst);
1c59827d 5557 }
cb900f41
KS
5558 spin_unlock(src_ptl);
5559 spin_unlock(dst_ptl);
63551ae0 5560 }
63551ae0 5561
623a1ddf
DH
5562 if (cow) {
5563 raw_write_seqcount_end(&src->write_protect_seq);
ac46d4f3 5564 mmu_notifier_invalidate_range_end(&range);
40549ba8
MK
5565 } else {
5566 hugetlb_vma_unlock_read(src_vma);
623a1ddf 5567 }
e8569dd2
AS
5568
5569 return ret;
63551ae0
DG
5570}
5571
550a7d60 5572static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
935d4f0c
RR
5573 unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5574 unsigned long sz)
550a7d60
MA
5575{
5576 struct hstate *h = hstate_vma(vma);
5577 struct mm_struct *mm = vma->vm_mm;
550a7d60 5578 spinlock_t *src_ptl, *dst_ptl;
db110a99 5579 pte_t pte;
550a7d60 5580
550a7d60
MA
5581 dst_ptl = huge_pte_lock(h, mm, dst_pte);
5582 src_ptl = huge_pte_lockptr(h, mm, src_pte);
5583
5584 /*
5585 * We don't have to worry about the ordering of src and dst ptlocks
8651a137 5586 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
550a7d60
MA
5587 */
5588 if (src_ptl != dst_ptl)
5589 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5590
5591 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
935d4f0c 5592 set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
550a7d60
MA
5593
5594 if (src_ptl != dst_ptl)
5595 spin_unlock(src_ptl);
5596 spin_unlock(dst_ptl);
5597}
5598
5599int move_hugetlb_page_tables(struct vm_area_struct *vma,
5600 struct vm_area_struct *new_vma,
5601 unsigned long old_addr, unsigned long new_addr,
5602 unsigned long len)
5603{
5604 struct hstate *h = hstate_vma(vma);
5605 struct address_space *mapping = vma->vm_file->f_mapping;
5606 unsigned long sz = huge_page_size(h);
5607 struct mm_struct *mm = vma->vm_mm;
5608 unsigned long old_end = old_addr + len;
e95a9851 5609 unsigned long last_addr_mask;
550a7d60
MA
5610 pte_t *src_pte, *dst_pte;
5611 struct mmu_notifier_range range;
3d0b95cd 5612 bool shared_pmd = false;
550a7d60 5613
7d4a8be0 5614 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
550a7d60
MA
5615 old_end);
5616 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3d0b95cd
BW
5617 /*
5618 * In case of shared PMDs, we should cover the maximum possible
5619 * range.
5620 */
5621 flush_cache_range(vma, range.start, range.end);
5622
550a7d60 5623 mmu_notifier_invalidate_range_start(&range);
e95a9851 5624 last_addr_mask = hugetlb_mask_last_page(h);
550a7d60 5625 /* Prevent race with file truncation */
40549ba8 5626 hugetlb_vma_lock_write(vma);
550a7d60
MA
5627 i_mmap_lock_write(mapping);
5628 for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
9c67a207 5629 src_pte = hugetlb_walk(vma, old_addr, sz);
e95a9851
MK
5630 if (!src_pte) {
5631 old_addr |= last_addr_mask;
5632 new_addr |= last_addr_mask;
550a7d60 5633 continue;
e95a9851 5634 }
550a7d60
MA
5635 if (huge_pte_none(huge_ptep_get(src_pte)))
5636 continue;
5637
4ddb4d91 5638 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
3d0b95cd 5639 shared_pmd = true;
4ddb4d91
MK
5640 old_addr |= last_addr_mask;
5641 new_addr |= last_addr_mask;
550a7d60 5642 continue;
3d0b95cd 5643 }
550a7d60
MA
5644
5645 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5646 if (!dst_pte)
5647 break;
5648
935d4f0c 5649 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
550a7d60 5650 }
3d0b95cd
BW
5651
5652 if (shared_pmd)
f720b471 5653 flush_hugetlb_tlb_range(vma, range.start, range.end);
3d0b95cd 5654 else
f720b471 5655 flush_hugetlb_tlb_range(vma, old_end - len, old_end);
550a7d60 5656 mmu_notifier_invalidate_range_end(&range);
13e4ad2c 5657 i_mmap_unlock_write(mapping);
40549ba8 5658 hugetlb_vma_unlock_write(vma);
550a7d60
MA
5659
5660 return len + old_addr - old_end;
5661}
5662
2820b0f0
RR
5663void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5664 unsigned long start, unsigned long end,
5665 struct page *ref_page, zap_flags_t zap_flags)
63551ae0
DG
5666{
5667 struct mm_struct *mm = vma->vm_mm;
5668 unsigned long address;
c7546f8f 5669 pte_t *ptep;
63551ae0 5670 pte_t pte;
cb900f41 5671 spinlock_t *ptl;
63551ae0 5672 struct page *page;
a5516438
AK
5673 struct hstate *h = hstate_vma(vma);
5674 unsigned long sz = huge_page_size(h);
df7a6d1f 5675 bool adjust_reservation = false;
e95a9851 5676 unsigned long last_addr_mask;
a4a118f2 5677 bool force_flush = false;
a5516438 5678
63551ae0 5679 WARN_ON(!is_vm_hugetlb_page(vma));
a5516438
AK
5680 BUG_ON(start & ~huge_page_mask(h));
5681 BUG_ON(end & ~huge_page_mask(h));
63551ae0 5682
07e32661
AK
5683 /*
5684 * This is a hugetlb vma, all the pte entries should point
5685 * to huge page.
5686 */
ed6a7935 5687 tlb_change_page_size(tlb, sz);
24669e58 5688 tlb_start_vma(tlb, vma);
dff11abe 5689
e95a9851 5690 last_addr_mask = hugetlb_mask_last_page(h);
569f48b8 5691 address = start;
569f48b8 5692 for (; address < end; address += sz) {
9c67a207 5693 ptep = hugetlb_walk(vma, address, sz);
e95a9851
MK
5694 if (!ptep) {
5695 address |= last_addr_mask;
c7546f8f 5696 continue;
e95a9851 5697 }
c7546f8f 5698
cb900f41 5699 ptl = huge_pte_lock(h, mm, ptep);
4ddb4d91 5700 if (huge_pmd_unshare(mm, vma, address, ptep)) {
31d49da5 5701 spin_unlock(ptl);
a4a118f2
NA
5702 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5703 force_flush = true;
4ddb4d91 5704 address |= last_addr_mask;
31d49da5
AK
5705 continue;
5706 }
39dde65c 5707
6629326b 5708 pte = huge_ptep_get(ptep);
31d49da5
AK
5709 if (huge_pte_none(pte)) {
5710 spin_unlock(ptl);
5711 continue;
5712 }
6629326b
HD
5713
5714 /*
9fbc1f63
NH
5715 * Migrating hugepage or HWPoisoned hugepage is already
5716 * unmapped and its refcount is dropped, so just clear pte here.
6629326b 5717 */
9fbc1f63 5718 if (unlikely(!pte_present(pte))) {
05e90bd0
PX
5719 /*
5720 * If the pte was wr-protected by uffd-wp in any of the
5721 * swap forms, meanwhile the caller does not want to
5722 * drop the uffd-wp bit in this zap, then replace the
5723 * pte with a marker.
5724 */
5725 if (pte_swp_uffd_wp_any(pte) &&
5726 !(zap_flags & ZAP_FLAG_DROP_MARKER))
5727 set_huge_pte_at(mm, address, ptep,
935d4f0c
RR
5728 make_pte_marker(PTE_MARKER_UFFD_WP),
5729 sz);
05e90bd0
PX
5730 else
5731 huge_pte_clear(mm, address, ptep, sz);
31d49da5
AK
5732 spin_unlock(ptl);
5733 continue;
8c4894c6 5734 }
6629326b
HD
5735
5736 page = pte_page(pte);
04f2cbe3
MG
5737 /*
5738 * If a reference page is supplied, it is because a specific
5739 * page is being unmapped, not a range. Ensure the page we
5740 * are about to unmap is the actual page of interest.
5741 */
5742 if (ref_page) {
31d49da5
AK
5743 if (page != ref_page) {
5744 spin_unlock(ptl);
5745 continue;
5746 }
04f2cbe3
MG
5747 /*
5748 * Mark the VMA as having unmapped its page so that
5749 * future faults in this VMA will fail rather than
5750 * looking like data was lost
5751 */
5752 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5753 }
5754
c7546f8f 5755 pte = huge_ptep_get_and_clear(mm, address, ptep);
b528e4b6 5756 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
106c992a 5757 if (huge_pte_dirty(pte))
6649a386 5758 set_page_dirty(page);
05e90bd0
PX
5759 /* Leave a uffd-wp pte marker if needed */
5760 if (huge_pte_uffd_wp(pte) &&
5761 !(zap_flags & ZAP_FLAG_DROP_MARKER))
5762 set_huge_pte_at(mm, address, ptep,
935d4f0c
RR
5763 make_pte_marker(PTE_MARKER_UFFD_WP),
5764 sz);
5d317b2b 5765 hugetlb_count_sub(pages_per_huge_page(h), mm);
e135826b 5766 hugetlb_remove_rmap(page_folio(page));
31d49da5 5767
df7a6d1f
BL
5768 /*
5769 * Restore the reservation for anonymous page, otherwise the
5770 * backing page could be stolen by someone.
5771 * If there we are freeing a surplus, do not set the restore
5772 * reservation bit.
5773 */
5774 if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
5775 folio_test_anon(page_folio(page))) {
5776 folio_set_hugetlb_restore_reserve(page_folio(page));
5777 /* Reservation to be adjusted after the spin lock */
5778 adjust_reservation = true;
5779 }
5780
cb900f41 5781 spin_unlock(ptl);
df7a6d1f
BL
5782
5783 /*
5784 * Adjust the reservation for the region that will have the
5785 * reserve restored. Keep in mind that vma_needs_reservation() changes
5786 * resv->adds_in_progress if it succeeds. If this is not done,
5787 * do_exit() will not see it, and will keep the reservation
5788 * forever.
5789 */
5790 if (adjust_reservation && vma_needs_reservation(h, vma, address))
5791 vma_add_reservation(h, vma, address);
5792
e77b0852 5793 tlb_remove_page_size(tlb, page, huge_page_size(h));
31d49da5
AK
5794 /*
5795 * Bail out after unmapping reference page if supplied
5796 */
5797 if (ref_page)
5798 break;
fe1668ae 5799 }
24669e58 5800 tlb_end_vma(tlb, vma);
a4a118f2
NA
5801
5802 /*
5803 * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5804 * could defer the flush until now, since by holding i_mmap_rwsem we
5805 * guaranteed that the last refernece would not be dropped. But we must
5806 * do the flushing before we return, as otherwise i_mmap_rwsem will be
5807 * dropped and the last reference to the shared PMDs page might be
5808 * dropped as well.
5809 *
5810 * In theory we could defer the freeing of the PMD pages as well, but
5811 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5812 * detect sharing, so we cannot defer the release of the page either.
5813 * Instead, do flush now.
5814 */
5815 if (force_flush)
5816 tlb_flush_mmu_tlbonly(tlb);
1da177e4 5817}
63551ae0 5818
2820b0f0
RR
5819void __hugetlb_zap_begin(struct vm_area_struct *vma,
5820 unsigned long *start, unsigned long *end)
d833352a 5821{
2820b0f0
RR
5822 if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5823 return;
5824
5825 adjust_range_if_pmd_sharing_possible(vma, start, end);
131a79b4 5826 hugetlb_vma_lock_write(vma);
2820b0f0
RR
5827 if (vma->vm_file)
5828 i_mmap_lock_write(vma->vm_file->f_mapping);
5829}
131a79b4 5830
2820b0f0
RR
5831void __hugetlb_zap_end(struct vm_area_struct *vma,
5832 struct zap_details *details)
5833{
5834 zap_flags_t zap_flags = details ? details->zap_flags : 0;
131a79b4 5835
2820b0f0
RR
5836 if (!vma->vm_file) /* hugetlbfs_file_mmap error */
5837 return;
d833352a 5838
04ada095
MK
5839 if (zap_flags & ZAP_FLAG_UNMAP) { /* final unmap */
5840 /*
5841 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5842 * When the vma_lock is freed, this makes the vma ineligible
5843 * for pmd sharing. And, i_mmap_rwsem is required to set up
5844 * pmd sharing. This is important as page tables for this
5845 * unmapped range will be asynchrously deleted. If the page
5846 * tables are shared, there will be issues when accessed by
5847 * someone else.
5848 */
5849 __hugetlb_vma_unlock_write_free(vma);
04ada095 5850 } else {
04ada095
MK
5851 hugetlb_vma_unlock_write(vma);
5852 }
2820b0f0
RR
5853
5854 if (vma->vm_file)
5855 i_mmap_unlock_write(vma->vm_file->f_mapping);
d833352a
MG
5856}
5857
502717f4 5858void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
05e90bd0
PX
5859 unsigned long end, struct page *ref_page,
5860 zap_flags_t zap_flags)
502717f4 5861{
369258ce 5862 struct mmu_notifier_range range;
24669e58 5863 struct mmu_gather tlb;
dff11abe 5864
7d4a8be0 5865 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
369258ce
MK
5866 start, end);
5867 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5868 mmu_notifier_invalidate_range_start(&range);
a72afd87 5869 tlb_gather_mmu(&tlb, vma->vm_mm);
369258ce 5870
05e90bd0 5871 __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
369258ce
MK
5872
5873 mmu_notifier_invalidate_range_end(&range);
ae8eba8b 5874 tlb_finish_mmu(&tlb);
502717f4
CK
5875}
5876
04f2cbe3
MG
5877/*
5878 * This is called when the original mapper is failing to COW a MAP_PRIVATE
578b7725 5879 * mapping it owns the reserve page for. The intention is to unmap the page
04f2cbe3
MG
5880 * from other VMAs and let the children be SIGKILLed if they are faulting the
5881 * same region.
5882 */
2f4612af
DB
5883static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5884 struct page *page, unsigned long address)
04f2cbe3 5885{
7526674d 5886 struct hstate *h = hstate_vma(vma);
04f2cbe3
MG
5887 struct vm_area_struct *iter_vma;
5888 struct address_space *mapping;
04f2cbe3
MG
5889 pgoff_t pgoff;
5890
5891 /*
5892 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5893 * from page cache lookup which is in HPAGE_SIZE units.
5894 */
7526674d 5895 address = address & huge_page_mask(h);
36e4f20a
MH
5896 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5897 vma->vm_pgoff;
93c76a3d 5898 mapping = vma->vm_file->f_mapping;
04f2cbe3 5899
4eb2b1dc
MG
5900 /*
5901 * Take the mapping lock for the duration of the table walk. As
5902 * this mapping should be shared between all the VMAs,
5903 * __unmap_hugepage_range() is called as the lock is already held
5904 */
83cde9e8 5905 i_mmap_lock_write(mapping);
6b2dbba8 5906 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
04f2cbe3
MG
5907 /* Do not unmap the current VMA */
5908 if (iter_vma == vma)
5909 continue;
5910
2f84a899
MG
5911 /*
5912 * Shared VMAs have their own reserves and do not affect
5913 * MAP_PRIVATE accounting but it is possible that a shared
5914 * VMA is using the same page so check and skip such VMAs.
5915 */
5916 if (iter_vma->vm_flags & VM_MAYSHARE)
5917 continue;
5918
04f2cbe3
MG
5919 /*
5920 * Unmap the page from other VMAs without their own reserves.
5921 * They get marked to be SIGKILLed if they fault in these
5922 * areas. This is because a future no-page fault on this VMA
5923 * could insert a zeroed page instead of the data existing
5924 * from the time of fork. This would look like data corruption
5925 */
5926 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
24669e58 5927 unmap_hugepage_range(iter_vma, address,
05e90bd0 5928 address + huge_page_size(h), page, 0);
04f2cbe3 5929 }
83cde9e8 5930 i_mmap_unlock_write(mapping);
04f2cbe3
MG
5931}
5932
0fe6e20b 5933/*
c89357e2 5934 * hugetlb_wp() should be called with page lock of the original hugepage held.
aa6d2e8c 5935 * Called with hugetlb_fault_mutex_table held and pte_page locked so we
ef009b25
MH
5936 * cannot race with other handlers or page migration.
5937 * Keep the pte_same checks anyway to make transition from the mutex easier.
0fe6e20b 5938 */
c89357e2
DH
5939static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5940 unsigned long address, pte_t *ptep, unsigned int flags,
9acad7ba
VMO
5941 struct folio *pagecache_folio, spinlock_t *ptl,
5942 struct vm_fault *vmf)
1e8f889b 5943{
c89357e2 5944 const bool unshare = flags & FAULT_FLAG_UNSHARE;
60d5b473 5945 pte_t pte = huge_ptep_get(ptep);
a5516438 5946 struct hstate *h = hstate_vma(vma);
959a78b6 5947 struct folio *old_folio;
d0ce0e47 5948 struct folio *new_folio;
2b740303
SJ
5949 int outside_reserve = 0;
5950 vm_fault_t ret = 0;
974e6d66 5951 unsigned long haddr = address & huge_page_mask(h);
ac46d4f3 5952 struct mmu_notifier_range range;
1e8f889b 5953
60d5b473
PX
5954 /*
5955 * Never handle CoW for uffd-wp protected pages. It should be only
5956 * handled when the uffd-wp protection is removed.
5957 *
5958 * Note that only the CoW optimization path (in hugetlb_no_page())
5959 * can trigger this, because hugetlb_fault() will always resolve
5960 * uffd-wp bit first.
5961 */
5962 if (!unshare && huge_pte_uffd_wp(pte))
5963 return 0;
5964
1d8d1464
DH
5965 /*
5966 * hugetlb does not support FOLL_FORCE-style write faults that keep the
5967 * PTE mapped R/O such as maybe_mkwrite() would do.
5968 */
5969 if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
5970 return VM_FAULT_SIGSEGV;
5971
5972 /* Let's take out MAP_SHARED mappings first. */
5973 if (vma->vm_flags & VM_MAYSHARE) {
1d8d1464
DH
5974 set_huge_ptep_writable(vma, haddr, ptep);
5975 return 0;
5976 }
5977
959a78b6 5978 old_folio = page_folio(pte_page(pte));
1e8f889b 5979
662ce1dc
YY
5980 delayacct_wpcopy_start();
5981
04f2cbe3 5982retry_avoidcopy:
c89357e2
DH
5983 /*
5984 * If no-one else is actually using this page, we're the exclusive
5985 * owner and can reuse this page.
5986 */
959a78b6 5987 if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5ca43289 5988 if (!PageAnonExclusive(&old_folio->page)) {
06968625 5989 folio_move_anon_rmap(old_folio, vma);
5ca43289
DH
5990 SetPageAnonExclusive(&old_folio->page);
5991 }
c89357e2
DH
5992 if (likely(!unshare))
5993 set_huge_ptep_writable(vma, haddr, ptep);
662ce1dc
YY
5994
5995 delayacct_wpcopy_end();
83c54070 5996 return 0;
1e8f889b 5997 }
959a78b6
Z
5998 VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5999 PageAnonExclusive(&old_folio->page), &old_folio->page);
1e8f889b 6000
04f2cbe3
MG
6001 /*
6002 * If the process that created a MAP_PRIVATE mapping is about to
6003 * perform a COW due to a shared page count, attempt to satisfy
6004 * the allocation without using the existing reserves. The pagecache
6005 * page is used to determine if the reserve at this address was
6006 * consumed or not. If reserves were used, a partial faulted mapping
6007 * at the time of fork() could consume its reserves on COW instead
6008 * of the full address range.
6009 */
5944d011 6010 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
959a78b6 6011 old_folio != pagecache_folio)
04f2cbe3
MG
6012 outside_reserve = 1;
6013
959a78b6 6014 folio_get(old_folio);
b76c8cfb 6015
ad4404a2
DB
6016 /*
6017 * Drop page table lock as buddy allocator may be called. It will
6018 * be acquired again before returning to the caller, as expected.
6019 */
cb900f41 6020 spin_unlock(ptl);
d0ce0e47 6021 new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve);
1e8f889b 6022
d0ce0e47 6023 if (IS_ERR(new_folio)) {
04f2cbe3
MG
6024 /*
6025 * If a process owning a MAP_PRIVATE mapping fails to COW,
6026 * it is due to references held by a child and an insufficient
6027 * huge page pool. To guarantee the original mappers
6028 * reliability, unmap the page from child processes. The child
6029 * may get SIGKILLed if it later faults.
6030 */
6031 if (outside_reserve) {
40549ba8
MK
6032 struct address_space *mapping = vma->vm_file->f_mapping;
6033 pgoff_t idx;
6034 u32 hash;
6035
959a78b6 6036 folio_put(old_folio);
40549ba8
MK
6037 /*
6038 * Drop hugetlb_fault_mutex and vma_lock before
6039 * unmapping. unmapping needs to hold vma_lock
6040 * in write mode. Dropping vma_lock in read mode
6041 * here is OK as COW mappings do not interact with
6042 * PMD sharing.
6043 *
6044 * Reacquire both after unmap operation.
6045 */
6046 idx = vma_hugecache_offset(h, vma, haddr);
6047 hash = hugetlb_fault_mutex_hash(mapping, idx);
6048 hugetlb_vma_unlock_read(vma);
6049 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6050
959a78b6 6051 unmap_ref_private(mm, vma, &old_folio->page, haddr);
40549ba8
MK
6052
6053 mutex_lock(&hugetlb_fault_mutex_table[hash]);
6054 hugetlb_vma_lock_read(vma);
2f4612af 6055 spin_lock(ptl);
9c67a207 6056 ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
2f4612af
DB
6057 if (likely(ptep &&
6058 pte_same(huge_ptep_get(ptep), pte)))
6059 goto retry_avoidcopy;
6060 /*
6061 * race occurs while re-acquiring page table
6062 * lock, and our job is done.
6063 */
662ce1dc 6064 delayacct_wpcopy_end();
2f4612af 6065 return 0;
04f2cbe3
MG
6066 }
6067
d0ce0e47 6068 ret = vmf_error(PTR_ERR(new_folio));
ad4404a2 6069 goto out_release_old;
1e8f889b
DG
6070 }
6071
0fe6e20b
NH
6072 /*
6073 * When the original hugepage is shared one, it does not have
6074 * anon_vma prepared.
6075 */
9acad7ba
VMO
6076 ret = vmf_anon_prepare(vmf);
6077 if (unlikely(ret))
ad4404a2 6078 goto out_release_all;
0fe6e20b 6079
959a78b6 6080 if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
1cb9dc4b
LS
6081 ret = VM_FAULT_HWPOISON_LARGE;
6082 goto out_release_all;
6083 }
d0ce0e47 6084 __folio_mark_uptodate(new_folio);
1e8f889b 6085
7d4a8be0 6086 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
6f4f13e8 6087 haddr + huge_page_size(h));
ac46d4f3 6088 mmu_notifier_invalidate_range_start(&range);
ad4404a2 6089
b76c8cfb 6090 /*
cb900f41 6091 * Retake the page table lock to check for racing updates
b76c8cfb
LW
6092 * before the page tables are altered
6093 */
cb900f41 6094 spin_lock(ptl);
9c67a207 6095 ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
a9af0c5d 6096 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
0f230bc2
PX
6097 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
6098
c89357e2 6099 /* Break COW or unshare */
5b7a1d40 6100 huge_ptep_clear_flush(vma, haddr, ptep);
e135826b 6101 hugetlb_remove_rmap(old_folio);
9d5fafd5 6102 hugetlb_add_new_anon_rmap(new_folio, vma, haddr);
0f230bc2
PX
6103 if (huge_pte_uffd_wp(pte))
6104 newpte = huge_pte_mkuffd_wp(newpte);
935d4f0c 6105 set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
d0ce0e47 6106 folio_set_hugetlb_migratable(new_folio);
1e8f889b 6107 /* Make the old page be freed below */
959a78b6 6108 new_folio = old_folio;
1e8f889b 6109 }
cb900f41 6110 spin_unlock(ptl);
ac46d4f3 6111 mmu_notifier_invalidate_range_end(&range);
ad4404a2 6112out_release_all:
c89357e2
DH
6113 /*
6114 * No restore in case of successful pagetable update (Break COW or
6115 * unshare)
6116 */
959a78b6 6117 if (new_folio != old_folio)
d2d7bb44 6118 restore_reserve_on_error(h, vma, haddr, new_folio);
d0ce0e47 6119 folio_put(new_folio);
ad4404a2 6120out_release_old:
959a78b6 6121 folio_put(old_folio);
8312034f 6122
ad4404a2 6123 spin_lock(ptl); /* Caller expects lock to be held */
662ce1dc
YY
6124
6125 delayacct_wpcopy_end();
ad4404a2 6126 return ret;
1e8f889b
DG
6127}
6128
3ae77f43
HD
6129/*
6130 * Return whether there is a pagecache page to back given address within VMA.
3ae77f43
HD
6131 */
6132static bool hugetlbfs_pagecache_present(struct hstate *h,
2a15efc9
HD
6133 struct vm_area_struct *vma, unsigned long address)
6134{
91a2fb95 6135 struct address_space *mapping = vma->vm_file->f_mapping;
a08c7193 6136 pgoff_t idx = linear_page_index(vma, address);
fd4aed8d 6137 struct folio *folio;
2a15efc9 6138
fd4aed8d
MK
6139 folio = filemap_get_folio(mapping, idx);
6140 if (IS_ERR(folio))
6141 return false;
6142 folio_put(folio);
6143 return true;
2a15efc9
HD
6144}
6145
9b91c0e2 6146int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
ab76ad54
MK
6147 pgoff_t idx)
6148{
6149 struct inode *inode = mapping->host;
6150 struct hstate *h = hstate_inode(inode);
d9ef44de 6151 int err;
ab76ad54 6152
a08c7193 6153 idx <<= huge_page_order(h);
d9ef44de
MWO
6154 __folio_set_locked(folio);
6155 err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
6156
6157 if (unlikely(err)) {
6158 __folio_clear_locked(folio);
ab76ad54 6159 return err;
d9ef44de 6160 }
9b91c0e2 6161 folio_clear_hugetlb_restore_reserve(folio);
ab76ad54 6162
22146c3c 6163 /*
d9ef44de 6164 * mark folio dirty so that it will not be removed from cache/file
22146c3c
MK
6165 * by non-hugetlbfs specific code paths.
6166 */
d9ef44de 6167 folio_mark_dirty(folio);
22146c3c 6168
ab76ad54
MK
6169 spin_lock(&inode->i_lock);
6170 inode->i_blocks += blocks_per_huge_page(h);
6171 spin_unlock(&inode->i_lock);
6172 return 0;
6173}
6174
7dac0ec8 6175static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
7677f7fd 6176 struct address_space *mapping,
7677f7fd
AR
6177 unsigned long reason)
6178{
7677f7fd 6179 u32 hash;
7677f7fd
AR
6180
6181 /*
958f32ce
LS
6182 * vma_lock and hugetlb_fault_mutex must be dropped before handling
6183 * userfault. Also mmap_lock could be dropped due to handling
6184 * userfault, any vma operation should be careful from here.
7677f7fd 6185 */
7dac0ec8
VMO
6186 hugetlb_vma_unlock_read(vmf->vma);
6187 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
7677f7fd 6188 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
7dac0ec8 6189 return handle_userfault(vmf, reason);
7677f7fd
AR
6190}
6191
2ea7ff1e
PX
6192/*
6193 * Recheck pte with pgtable lock. Returns true if pte didn't change, or
6194 * false if pte changed or is changing.
6195 */
6196static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
6197 pte_t *ptep, pte_t old_pte)
6198{
6199 spinlock_t *ptl;
6200 bool same;
6201
6202 ptl = huge_pte_lock(h, mm, ptep);
6203 same = pte_same(huge_ptep_get(ptep), old_pte);
6204 spin_unlock(ptl);
6205
6206 return same;
6207}
6208
2b740303
SJ
6209static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6210 struct vm_area_struct *vma,
6211 struct address_space *mapping, pgoff_t idx,
c64e912c 6212 unsigned long address, pte_t *ptep,
7dac0ec8
VMO
6213 pte_t old_pte, unsigned int flags,
6214 struct vm_fault *vmf)
ac9b9c66 6215{
a5516438 6216 struct hstate *h = hstate_vma(vma);
2b740303 6217 vm_fault_t ret = VM_FAULT_SIGBUS;
409eb8c2 6218 int anon_rmap = 0;
4c887265 6219 unsigned long size;
d0ce0e47 6220 struct folio *folio;
1e8f889b 6221 pte_t new_pte;
cb900f41 6222 spinlock_t *ptl;
285b8dca 6223 unsigned long haddr = address & huge_page_mask(h);
d0ce0e47 6224 bool new_folio, new_pagecache_folio = false;
958f32ce 6225 u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
4c887265 6226
04f2cbe3
MG
6227 /*
6228 * Currently, we are forced to kill the process in the event the
6229 * original mapper has unmapped pages from the child due to a failed
c89357e2
DH
6230 * COW/unsharing. Warn that such a situation has occurred as it may not
6231 * be obvious.
04f2cbe3
MG
6232 */
6233 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
910154d5 6234 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
ffb22af5 6235 current->pid);
958f32ce 6236 goto out;
04f2cbe3
MG
6237 }
6238
4c887265 6239 /*
188a3972
MK
6240 * Use page lock to guard against racing truncation
6241 * before we get page_table_lock.
4c887265 6242 */
d0ce0e47 6243 new_folio = false;
a08c7193 6244 folio = filemap_lock_hugetlb_folio(h, mapping, idx);
66dabbb6 6245 if (IS_ERR(folio)) {
188a3972
MK
6246 size = i_size_read(mapping->host) >> huge_page_shift(h);
6247 if (idx >= size)
6248 goto out;
7677f7fd 6249 /* Check for page in userfault range */
2ea7ff1e
PX
6250 if (userfaultfd_missing(vma)) {
6251 /*
6252 * Since hugetlb_no_page() was examining pte
6253 * without pgtable lock, we need to re-test under
6254 * lock because the pte may not be stable and could
6255 * have changed from under us. Try to detect
6256 * either changed or during-changing ptes and retry
6257 * properly when needed.
6258 *
6259 * Note that userfaultfd is actually fine with
6260 * false positives (e.g. caused by pte changed),
6261 * but not wrong logical events (e.g. caused by
6262 * reading a pte during changing). The latter can
6263 * confuse the userspace, so the strictness is very
6264 * much preferred. E.g., MISSING event should
6265 * never happen on the page after UFFDIO_COPY has
6266 * correctly installed the page and returned.
6267 */
6268 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
6269 ret = 0;
6270 goto out;
6271 }
6272
7dac0ec8 6273 return hugetlb_handle_userfault(vmf, mapping,
2ea7ff1e
PX
6274 VM_UFFD_MISSING);
6275 }
1a1aad8a 6276
d0ce0e47
SK
6277 folio = alloc_hugetlb_folio(vma, haddr, 0);
6278 if (IS_ERR(folio)) {
4643d67e
MK
6279 /*
6280 * Returning error will result in faulting task being
6281 * sent SIGBUS. The hugetlb fault mutex prevents two
6282 * tasks from racing to fault in the same page which
6283 * could result in false unable to allocate errors.
6284 * Page migration does not take the fault mutex, but
6285 * does a clear then write of pte's under page table
6286 * lock. Page fault code could race with migration,
6287 * notice the clear pte and try to allocate a page
6288 * here. Before returning error, get ptl and make
6289 * sure there really is no pte entry.
6290 */
f9bf6c03 6291 if (hugetlb_pte_stable(h, mm, ptep, old_pte))
d0ce0e47 6292 ret = vmf_error(PTR_ERR(folio));
f9bf6c03
PX
6293 else
6294 ret = 0;
6bda666a
CL
6295 goto out;
6296 }
d0ce0e47
SK
6297 clear_huge_page(&folio->page, address, pages_per_huge_page(h));
6298 __folio_mark_uptodate(folio);
6299 new_folio = true;
ac9b9c66 6300
f83a275d 6301 if (vma->vm_flags & VM_MAYSHARE) {
9b91c0e2 6302 int err = hugetlb_add_to_page_cache(folio, mapping, idx);
6bda666a 6303 if (err) {
3a5497a2
ML
6304 /*
6305 * err can't be -EEXIST which implies someone
6306 * else consumed the reservation since hugetlb
6307 * fault mutex is held when add a hugetlb page
6308 * to the page cache. So it's safe to call
6309 * restore_reserve_on_error() here.
6310 */
d2d7bb44 6311 restore_reserve_on_error(h, vma, haddr, folio);
d0ce0e47 6312 folio_put(folio);
6bda666a
CL
6313 goto out;
6314 }
d0ce0e47 6315 new_pagecache_folio = true;
23be7468 6316 } else {
d0ce0e47 6317 folio_lock(folio);
9acad7ba
VMO
6318
6319 ret = vmf_anon_prepare(vmf);
6320 if (unlikely(ret))
0fe6e20b 6321 goto backout_unlocked;
409eb8c2 6322 anon_rmap = 1;
23be7468 6323 }
0fe6e20b 6324 } else {
998b4382
NH
6325 /*
6326 * If memory error occurs between mmap() and fault, some process
6327 * don't have hwpoisoned swap entry for errored virtual address.
6328 * So we need to block hugepage fault by PG_hwpoison bit check.
6329 */
d0ce0e47 6330 if (unlikely(folio_test_hwpoison(folio))) {
0eb98f15 6331 ret = VM_FAULT_HWPOISON_LARGE |
972dc4de 6332 VM_FAULT_SET_HINDEX(hstate_index(h));
998b4382
NH
6333 goto backout_unlocked;
6334 }
7677f7fd
AR
6335
6336 /* Check for page in userfault range. */
6337 if (userfaultfd_minor(vma)) {
d0ce0e47
SK
6338 folio_unlock(folio);
6339 folio_put(folio);
2ea7ff1e
PX
6340 /* See comment in userfaultfd_missing() block above */
6341 if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
6342 ret = 0;
6343 goto out;
6344 }
7dac0ec8 6345 return hugetlb_handle_userfault(vmf, mapping,
2ea7ff1e 6346 VM_UFFD_MINOR);
7677f7fd 6347 }
6bda666a 6348 }
1e8f889b 6349
57303d80
AW
6350 /*
6351 * If we are going to COW a private mapping later, we examine the
6352 * pending reservations for this page now. This will ensure that
6353 * any allocations necessary to record that reservation occur outside
6354 * the spinlock.
6355 */
5e911373 6356 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
285b8dca 6357 if (vma_needs_reservation(h, vma, haddr) < 0) {
2b26736c
AW
6358 ret = VM_FAULT_OOM;
6359 goto backout_unlocked;
6360 }
5e911373 6361 /* Just decrements count, does not deallocate */
285b8dca 6362 vma_end_reservation(h, vma, haddr);
5e911373 6363 }
57303d80 6364
8bea8052 6365 ptl = huge_pte_lock(h, mm, ptep);
83c54070 6366 ret = 0;
c64e912c
PX
6367 /* If pte changed from under us, retry */
6368 if (!pte_same(huge_ptep_get(ptep), old_pte))
4c887265
AL
6369 goto backout;
6370
4781593d 6371 if (anon_rmap)
9d5fafd5 6372 hugetlb_add_new_anon_rmap(folio, vma, haddr);
4781593d 6373 else
44887f39 6374 hugetlb_add_file_rmap(folio);
d0ce0e47 6375 new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
1e8f889b 6376 && (vma->vm_flags & VM_SHARED)));
c64e912c
PX
6377 /*
6378 * If this pte was previously wr-protected, keep it wr-protected even
6379 * if populated.
6380 */
6381 if (unlikely(pte_marker_uffd_wp(old_pte)))
f1eb1bac 6382 new_pte = huge_pte_mkuffd_wp(new_pte);
935d4f0c 6383 set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h));
1e8f889b 6384
5d317b2b 6385 hugetlb_count_add(pages_per_huge_page(h), mm);
788c7df4 6386 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
1e8f889b 6387 /* Optimization, do the COW without a second fault */
9acad7ba 6388 ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl, vmf);
1e8f889b
DG
6389 }
6390
cb900f41 6391 spin_unlock(ptl);
cb6acd01
MK
6392
6393 /*
d0ce0e47
SK
6394 * Only set hugetlb_migratable in newly allocated pages. Existing pages
6395 * found in the pagecache may not have hugetlb_migratable if they have
8f251a3d 6396 * been isolated for migration.
cb6acd01 6397 */
d0ce0e47
SK
6398 if (new_folio)
6399 folio_set_hugetlb_migratable(folio);
cb6acd01 6400
d0ce0e47 6401 folio_unlock(folio);
4c887265 6402out:
958f32ce
LS
6403 hugetlb_vma_unlock_read(vma);
6404 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
ac9b9c66 6405 return ret;
4c887265
AL
6406
6407backout:
cb900f41 6408 spin_unlock(ptl);
2b26736c 6409backout_unlocked:
d0ce0e47 6410 if (new_folio && !new_pagecache_folio)
d2d7bb44 6411 restore_reserve_on_error(h, vma, haddr, folio);
fa27759a 6412
d0ce0e47
SK
6413 folio_unlock(folio);
6414 folio_put(folio);
4c887265 6415 goto out;
ac9b9c66
HD
6416}
6417
8382d914 6418#ifdef CONFIG_SMP
188b04a7 6419u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
8382d914
DB
6420{
6421 unsigned long key[2];
6422 u32 hash;
6423
1b426bac
MK
6424 key[0] = (unsigned long) mapping;
6425 key[1] = idx;
8382d914 6426
55254636 6427 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
8382d914
DB
6428
6429 return hash & (num_fault_mutexes - 1);
6430}
6431#else
6432/*
6c26d310 6433 * For uniprocessor systems we always use a single mutex, so just
8382d914
DB
6434 * return 0 and avoid the hashing overhead.
6435 */
188b04a7 6436u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
8382d914
DB
6437{
6438 return 0;
6439}
6440#endif
6441
2b740303 6442vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 6443 unsigned long address, unsigned int flags)
86e5216f 6444{
8382d914 6445 pte_t *ptep, entry;
cb900f41 6446 spinlock_t *ptl;
2b740303 6447 vm_fault_t ret;
8382d914 6448 u32 hash;
061e62e8 6449 struct folio *folio = NULL;
371607a3 6450 struct folio *pagecache_folio = NULL;
a5516438 6451 struct hstate *h = hstate_vma(vma);
8382d914 6452 struct address_space *mapping;
0f792cf9 6453 int need_wait_lock = 0;
285b8dca 6454 unsigned long haddr = address & huge_page_mask(h);
0ca22723
VMO
6455 struct vm_fault vmf = {
6456 .vma = vma,
6457 .address = haddr,
6458 .real_address = address,
6459 .flags = flags,
6460 .pgoff = vma_hugecache_offset(h, vma, haddr),
6461 /* TODO: Track hugetlb faults using vm_fault */
6462
6463 /*
6464 * Some fields may not be initialized, be careful as it may
6465 * be hard to debug if called functions make assumptions
6466 */
6467 };
86e5216f 6468
3935baa9
DG
6469 /*
6470 * Serialize hugepage allocation and instantiation, so that we don't
6471 * get spurious allocation failures if two CPUs race to instantiate
6472 * the same page in the page cache.
6473 */
40549ba8 6474 mapping = vma->vm_file->f_mapping;
0ca22723 6475 hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
c672c7f2 6476 mutex_lock(&hugetlb_fault_mutex_table[hash]);
8382d914 6477
40549ba8
MK
6478 /*
6479 * Acquire vma lock before calling huge_pte_alloc and hold
6480 * until finished with ptep. This prevents huge_pmd_unshare from
6481 * being called elsewhere and making the ptep no longer valid.
40549ba8
MK
6482 */
6483 hugetlb_vma_lock_read(vma);
6484 ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
6485 if (!ptep) {
6486 hugetlb_vma_unlock_read(vma);
6487 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6488 return VM_FAULT_OOM;
6489 }
6490
7f2e9525 6491 entry = huge_ptep_get(ptep);
af19487f
AR
6492 if (huge_pte_none_mostly(entry)) {
6493 if (is_pte_marker(entry)) {
6494 pte_marker marker =
6495 pte_marker_get(pte_to_swp_entry(entry));
6496
6497 if (marker & PTE_MARKER_POISONED) {
6498 ret = VM_FAULT_HWPOISON_LARGE;
6499 goto out_mutex;
6500 }
6501 }
6502
958f32ce 6503 /*
af19487f
AR
6504 * Other PTE markers should be handled the same way as none PTE.
6505 *
958f32ce
LS
6506 * hugetlb_no_page will drop vma lock and hugetlb fault
6507 * mutex internally, which make us return immediately.
6508 */
0ca22723 6509 return hugetlb_no_page(mm, vma, mapping, vmf.pgoff, address,
7dac0ec8 6510 ptep, entry, flags, &vmf);
af19487f 6511 }
86e5216f 6512
83c54070 6513 ret = 0;
1e8f889b 6514
0f792cf9
NH
6515 /*
6516 * entry could be a migration/hwpoison entry at this point, so this
6517 * check prevents the kernel from going below assuming that we have
7c8de358
EP
6518 * an active hugepage in pagecache. This goto expects the 2nd page
6519 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
6520 * properly handle it.
0f792cf9 6521 */
fcd48540
PX
6522 if (!pte_present(entry)) {
6523 if (unlikely(is_hugetlb_entry_migration(entry))) {
6524 /*
6525 * Release the hugetlb fault lock now, but retain
6526 * the vma lock, because it is needed to guard the
6527 * huge_pte_lockptr() later in
6528 * migration_entry_wait_huge(). The vma lock will
6529 * be released there.
6530 */
6531 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6532 migration_entry_wait_huge(vma, ptep);
6533 return 0;
6534 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
6535 ret = VM_FAULT_HWPOISON_LARGE |
6536 VM_FAULT_SET_HINDEX(hstate_index(h));
0f792cf9 6537 goto out_mutex;
fcd48540 6538 }
0f792cf9 6539
57303d80 6540 /*
c89357e2
DH
6541 * If we are going to COW/unshare the mapping later, we examine the
6542 * pending reservations for this page now. This will ensure that any
57303d80 6543 * allocations necessary to record that reservation occur outside the
1d8d1464
DH
6544 * spinlock. Also lookup the pagecache page now as it is used to
6545 * determine if a reservation has been consumed.
57303d80 6546 */
c89357e2 6547 if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
1d8d1464 6548 !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
285b8dca 6549 if (vma_needs_reservation(h, vma, haddr) < 0) {
2b26736c 6550 ret = VM_FAULT_OOM;
b4d1d99f 6551 goto out_mutex;
2b26736c 6552 }
5e911373 6553 /* Just decrements count, does not deallocate */
285b8dca 6554 vma_end_reservation(h, vma, haddr);
57303d80 6555
0ca22723
VMO
6556 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping,
6557 vmf.pgoff);
66dabbb6
CH
6558 if (IS_ERR(pagecache_folio))
6559 pagecache_folio = NULL;
57303d80
AW
6560 }
6561
0f792cf9
NH
6562 ptl = huge_pte_lock(h, mm, ptep);
6563
c89357e2 6564 /* Check for a racing update before calling hugetlb_wp() */
0f792cf9
NH
6565 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
6566 goto out_ptl;
6567
166f3ecc
PX
6568 /* Handle userfault-wp first, before trying to lock more pages */
6569 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
6570 (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
d61ea1cb 6571 if (!userfaultfd_wp_async(vma)) {
d61ea1cb
PX
6572 spin_unlock(ptl);
6573 if (pagecache_folio) {
6574 folio_unlock(pagecache_folio);
6575 folio_put(pagecache_folio);
6576 }
6577 hugetlb_vma_unlock_read(vma);
6578 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6579 return handle_userfault(&vmf, VM_UFFD_WP);
166f3ecc 6580 }
d61ea1cb
PX
6581
6582 entry = huge_pte_clear_uffd_wp(entry);
52526ca7
MUA
6583 set_huge_pte_at(mm, haddr, ptep, entry,
6584 huge_page_size(hstate_vma(vma)));
d61ea1cb 6585 /* Fallthrough to CoW */
166f3ecc
PX
6586 }
6587
56c9cfb1 6588 /*
c89357e2 6589 * hugetlb_wp() requires page locks of pte_page(entry) and
371607a3 6590 * pagecache_folio, so here we need take the former one
061e62e8 6591 * when folio != pagecache_folio or !pagecache_folio.
56c9cfb1 6592 */
061e62e8
Z
6593 folio = page_folio(pte_page(entry));
6594 if (folio != pagecache_folio)
6595 if (!folio_trylock(folio)) {
0f792cf9
NH
6596 need_wait_lock = 1;
6597 goto out_ptl;
6598 }
b4d1d99f 6599
061e62e8 6600 folio_get(folio);
b4d1d99f 6601
c89357e2 6602 if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
106c992a 6603 if (!huge_pte_write(entry)) {
c89357e2 6604 ret = hugetlb_wp(mm, vma, address, ptep, flags,
9acad7ba 6605 pagecache_folio, ptl, &vmf);
0f792cf9 6606 goto out_put_page;
c89357e2
DH
6607 } else if (likely(flags & FAULT_FLAG_WRITE)) {
6608 entry = huge_pte_mkdirty(entry);
b4d1d99f 6609 }
b4d1d99f
DG
6610 }
6611 entry = pte_mkyoung(entry);
285b8dca 6612 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
788c7df4 6613 flags & FAULT_FLAG_WRITE))
285b8dca 6614 update_mmu_cache(vma, haddr, ptep);
0f792cf9 6615out_put_page:
061e62e8
Z
6616 if (folio != pagecache_folio)
6617 folio_unlock(folio);
6618 folio_put(folio);
cb900f41
KS
6619out_ptl:
6620 spin_unlock(ptl);
57303d80 6621
371607a3
SK
6622 if (pagecache_folio) {
6623 folio_unlock(pagecache_folio);
6624 folio_put(pagecache_folio);
57303d80 6625 }
b4d1d99f 6626out_mutex:
40549ba8 6627 hugetlb_vma_unlock_read(vma);
c672c7f2 6628 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
0f792cf9
NH
6629 /*
6630 * Generally it's safe to hold refcount during waiting page lock. But
6631 * here we just wait to defer the next page fault to avoid busy loop and
6632 * the page is not used after unlocked before returning from the current
6633 * page fault. So we are safe from accessing freed page, even if we wait
6634 * here without taking refcount.
6635 */
6636 if (need_wait_lock)
061e62e8 6637 folio_wait_locked(folio);
1e8f889b 6638 return ret;
86e5216f
AL
6639}
6640
714c1891 6641#ifdef CONFIG_USERFAULTFD
72e315f7
HD
6642/*
6643 * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6644 */
6645static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6646 struct vm_area_struct *vma, unsigned long address)
6647{
6648 struct mempolicy *mpol;
6649 nodemask_t *nodemask;
6650 struct folio *folio;
6651 gfp_t gfp_mask;
6652 int node;
6653
6654 gfp_mask = htlb_alloc_mask(h);
6655 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6656 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
6657 mpol_cond_put(mpol);
6658
6659 return folio;
6660}
6661
8fb5debc 6662/*
a734991c
AR
6663 * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6664 * with modifications for hugetlb pages.
8fb5debc 6665 */
61c50040 6666int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
a734991c
AR
6667 struct vm_area_struct *dst_vma,
6668 unsigned long dst_addr,
6669 unsigned long src_addr,
d9712937 6670 uffd_flags_t flags,
0169fd51 6671 struct folio **foliop)
8fb5debc 6672{
61c50040 6673 struct mm_struct *dst_mm = dst_vma->vm_mm;
d9712937
AR
6674 bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6675 bool wp_enabled = (flags & MFILL_ATOMIC_WP);
8cc5fcbb
MA
6676 struct hstate *h = hstate_vma(dst_vma);
6677 struct address_space *mapping = dst_vma->vm_file->f_mapping;
6678 pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
1e392147 6679 unsigned long size;
1c9e8def 6680 int vm_shared = dst_vma->vm_flags & VM_SHARED;
8fb5debc
MK
6681 pte_t _dst_pte;
6682 spinlock_t *ptl;
8cc5fcbb 6683 int ret = -ENOMEM;
d0ce0e47 6684 struct folio *folio;
f6191471 6685 int writable;
d0ce0e47 6686 bool folio_in_pagecache = false;
8fb5debc 6687
8a13897f
AR
6688 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6689 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6690
6691 /* Don't overwrite any existing PTEs (even markers) */
6692 if (!huge_pte_none(huge_ptep_get(dst_pte))) {
6693 spin_unlock(ptl);
6694 return -EEXIST;
6695 }
6696
6697 _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
935d4f0c
RR
6698 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
6699 huge_page_size(h));
8a13897f
AR
6700
6701 /* No need to invalidate - it was non-present before */
6702 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6703
6704 spin_unlock(ptl);
6705 return 0;
6706 }
6707
f6191471
AR
6708 if (is_continue) {
6709 ret = -EFAULT;
a08c7193 6710 folio = filemap_lock_hugetlb_folio(h, mapping, idx);
66dabbb6 6711 if (IS_ERR(folio))
f6191471 6712 goto out;
d0ce0e47 6713 folio_in_pagecache = true;
0169fd51
Z
6714 } else if (!*foliop) {
6715 /* If a folio already exists, then it's UFFDIO_COPY for
d84cf06e
MA
6716 * a non-missing case. Return -EEXIST.
6717 */
6718 if (vm_shared &&
6719 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6720 ret = -EEXIST;
6721 goto out;
6722 }
6723
d0ce0e47
SK
6724 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6725 if (IS_ERR(folio)) {
d84cf06e 6726 ret = -ENOMEM;
8fb5debc 6727 goto out;
d84cf06e 6728 }
8fb5debc 6729
e87340ca
Z
6730 ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6731 false);
8fb5debc 6732
c1e8d7c6 6733 /* fallback to copy_from_user outside mmap_lock */
8fb5debc 6734 if (unlikely(ret)) {
9e368259 6735 ret = -ENOENT;
d0ce0e47 6736 /* Free the allocated folio which may have
8cc5fcbb
MA
6737 * consumed a reservation.
6738 */
d2d7bb44 6739 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
d0ce0e47 6740 folio_put(folio);
8cc5fcbb 6741
d0ce0e47 6742 /* Allocate a temporary folio to hold the copied
8cc5fcbb
MA
6743 * contents.
6744 */
d0ce0e47
SK
6745 folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6746 if (!folio) {
8cc5fcbb
MA
6747 ret = -ENOMEM;
6748 goto out;
6749 }
0169fd51
Z
6750 *foliop = folio;
6751 /* Set the outparam foliop and return to the caller to
8cc5fcbb 6752 * copy the contents outside the lock. Don't free the
0169fd51 6753 * folio.
8cc5fcbb 6754 */
8fb5debc
MK
6755 goto out;
6756 }
6757 } else {
8cc5fcbb
MA
6758 if (vm_shared &&
6759 hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
0169fd51 6760 folio_put(*foliop);
8cc5fcbb 6761 ret = -EEXIST;
0169fd51 6762 *foliop = NULL;
8cc5fcbb
MA
6763 goto out;
6764 }
6765
d0ce0e47
SK
6766 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6767 if (IS_ERR(folio)) {
0169fd51 6768 folio_put(*foliop);
8cc5fcbb 6769 ret = -ENOMEM;
0169fd51 6770 *foliop = NULL;
8cc5fcbb
MA
6771 goto out;
6772 }
1cb9dc4b 6773 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
0169fd51
Z
6774 folio_put(*foliop);
6775 *foliop = NULL;
1cb9dc4b
LS
6776 if (ret) {
6777 folio_put(folio);
8cc5fcbb
MA
6778 goto out;
6779 }
8fb5debc
MK
6780 }
6781
6782 /*
b14d1671
JH
6783 * If we just allocated a new page, we need a memory barrier to ensure
6784 * that preceding stores to the page become visible before the
6785 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate
6786 * is what we need.
6787 *
6788 * In the case where we have not allocated a new page (is_continue),
6789 * the page must already be uptodate. UFFDIO_CONTINUE already includes
6790 * an earlier smp_wmb() to ensure that prior stores will be visible
6791 * before the set_pte_at() write.
8fb5debc 6792 */
b14d1671
JH
6793 if (!is_continue)
6794 __folio_mark_uptodate(folio);
6795 else
6796 WARN_ON_ONCE(!folio_test_uptodate(folio));
8fb5debc 6797
f6191471
AR
6798 /* Add shared, newly allocated pages to the page cache. */
6799 if (vm_shared && !is_continue) {
1e392147
AA
6800 size = i_size_read(mapping->host) >> huge_page_shift(h);
6801 ret = -EFAULT;
6802 if (idx >= size)
6803 goto out_release_nounlock;
1c9e8def 6804
1e392147
AA
6805 /*
6806 * Serialization between remove_inode_hugepages() and
7e1813d4 6807 * hugetlb_add_to_page_cache() below happens through the
1e392147
AA
6808 * hugetlb_fault_mutex_table that here must be hold by
6809 * the caller.
6810 */
9b91c0e2 6811 ret = hugetlb_add_to_page_cache(folio, mapping, idx);
1c9e8def
MK
6812 if (ret)
6813 goto out_release_nounlock;
d0ce0e47 6814 folio_in_pagecache = true;
1c9e8def
MK
6815 }
6816
bcc66543 6817 ptl = huge_pte_lock(h, dst_mm, dst_pte);
8fb5debc 6818
8625147c 6819 ret = -EIO;
d0ce0e47 6820 if (folio_test_hwpoison(folio))
8625147c
JH
6821 goto out_release_unlock;
6822
6041c691
PX
6823 /*
6824 * We allow to overwrite a pte marker: consider when both MISSING|WP
6825 * registered, we firstly wr-protect a none pte which has no page cache
6826 * page backing it, then access the page.
6827 */
fa27759a 6828 ret = -EEXIST;
6041c691 6829 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
8fb5debc
MK
6830 goto out_release_unlock;
6831
d0ce0e47 6832 if (folio_in_pagecache)
44887f39 6833 hugetlb_add_file_rmap(folio);
4781593d 6834 else
9d5fafd5 6835 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
8fb5debc 6836
6041c691
PX
6837 /*
6838 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6839 * with wp flag set, don't set pte write bit.
6840 */
d9712937 6841 if (wp_enabled || (is_continue && !vm_shared))
f6191471
AR
6842 writable = 0;
6843 else
6844 writable = dst_vma->vm_flags & VM_WRITE;
6845
d0ce0e47 6846 _dst_pte = make_huge_pte(dst_vma, &folio->page, writable);
6041c691
PX
6847 /*
6848 * Always mark UFFDIO_COPY page dirty; note that this may not be
6849 * extremely important for hugetlbfs for now since swapping is not
6850 * supported, but we should still be clear in that this page cannot be
6851 * thrown away at will, even if write bit not set.
6852 */
6853 _dst_pte = huge_pte_mkdirty(_dst_pte);
8fb5debc
MK
6854 _dst_pte = pte_mkyoung(_dst_pte);
6855
d9712937 6856 if (wp_enabled)
6041c691
PX
6857 _dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6858
935d4f0c 6859 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
8fb5debc 6860
8fb5debc
MK
6861 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6862
6863 /* No need to invalidate - it was non-present before */
6864 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6865
6866 spin_unlock(ptl);
f6191471 6867 if (!is_continue)
d0ce0e47 6868 folio_set_hugetlb_migratable(folio);
f6191471 6869 if (vm_shared || is_continue)
d0ce0e47 6870 folio_unlock(folio);
8fb5debc
MK
6871 ret = 0;
6872out:
6873 return ret;
6874out_release_unlock:
6875 spin_unlock(ptl);
f6191471 6876 if (vm_shared || is_continue)
d0ce0e47 6877 folio_unlock(folio);
5af10dfd 6878out_release_nounlock:
d0ce0e47 6879 if (!folio_in_pagecache)
d2d7bb44 6880 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
d0ce0e47 6881 folio_put(folio);
8fb5debc
MK
6882 goto out;
6883}
714c1891 6884#endif /* CONFIG_USERFAULTFD */
8fb5debc 6885
57a196a5 6886struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
5502ea44
PX
6887 unsigned long address, unsigned int flags,
6888 unsigned int *page_mask)
57a196a5
MK
6889{
6890 struct hstate *h = hstate_vma(vma);
6891 struct mm_struct *mm = vma->vm_mm;
6892 unsigned long haddr = address & huge_page_mask(h);
6893 struct page *page = NULL;
6894 spinlock_t *ptl;
6895 pte_t *pte, entry;
458568c9 6896 int ret;
57a196a5 6897
7d049f3a 6898 hugetlb_vma_lock_read(vma);
9c67a207 6899 pte = hugetlb_walk(vma, haddr, huge_page_size(h));
57a196a5 6900 if (!pte)
7d049f3a 6901 goto out_unlock;
57a196a5
MK
6902
6903 ptl = huge_pte_lock(h, mm, pte);
6904 entry = huge_ptep_get(pte);
6905 if (pte_present(entry)) {
458568c9
PX
6906 page = pte_page(entry);
6907
6908 if (!huge_pte_write(entry)) {
6909 if (flags & FOLL_WRITE) {
6910 page = NULL;
6911 goto out;
6912 }
6913
6914 if (gup_must_unshare(vma, flags, page)) {
6915 /* Tell the caller to do unsharing */
6916 page = ERR_PTR(-EMLINK);
6917 goto out;
6918 }
6919 }
6920
426056ef 6921 page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
458568c9 6922
57a196a5
MK
6923 /*
6924 * Note that page may be a sub-page, and with vmemmap
6925 * optimizations the page struct may be read only.
6926 * try_grab_page() will increase the ref count on the
6927 * head page, so this will be OK.
6928 *
e2ca6ba6
LT
6929 * try_grab_page() should always be able to get the page here,
6930 * because we hold the ptl lock and have verified pte_present().
57a196a5 6931 */
458568c9
PX
6932 ret = try_grab_page(page, flags);
6933
6934 if (WARN_ON_ONCE(ret)) {
6935 page = ERR_PTR(ret);
57a196a5
MK
6936 goto out;
6937 }
5502ea44
PX
6938
6939 *page_mask = (1U << huge_page_order(h)) - 1;
57a196a5
MK
6940 }
6941out:
6942 spin_unlock(ptl);
7d049f3a
PX
6943out_unlock:
6944 hugetlb_vma_unlock_read(vma);
dd767aaa
PX
6945
6946 /*
6947 * Fixup retval for dump requests: if pagecache doesn't exist,
6948 * don't try to allocate a new page but just skip it.
6949 */
6950 if (!page && (flags & FOLL_DUMP) &&
6951 !hugetlbfs_pagecache_present(h, vma, address))
6952 page = ERR_PTR(-EFAULT);
6953
57a196a5
MK
6954 return page;
6955}
6956
a79390f5 6957long hugetlb_change_protection(struct vm_area_struct *vma,
5a90d5a1
PX
6958 unsigned long address, unsigned long end,
6959 pgprot_t newprot, unsigned long cp_flags)
8f860591
ZY
6960{
6961 struct mm_struct *mm = vma->vm_mm;
6962 unsigned long start = address;
6963 pte_t *ptep;
6964 pte_t pte;
a5516438 6965 struct hstate *h = hstate_vma(vma);
a79390f5 6966 long pages = 0, psize = huge_page_size(h);
dff11abe 6967 bool shared_pmd = false;
ac46d4f3 6968 struct mmu_notifier_range range;
e95a9851 6969 unsigned long last_addr_mask;
5a90d5a1
PX
6970 bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6971 bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
dff11abe
MK
6972
6973 /*
6974 * In the case of shared PMDs, the area to flush could be beyond
ac46d4f3 6975 * start/end. Set range.start/range.end to cover the maximum possible
dff11abe
MK
6976 * range if PMD sharing is possible.
6977 */
7269f999 6978 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
7d4a8be0 6979 0, mm, start, end);
ac46d4f3 6980 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
8f860591
ZY
6981
6982 BUG_ON(address >= end);
ac46d4f3 6983 flush_cache_range(vma, range.start, range.end);
8f860591 6984
ac46d4f3 6985 mmu_notifier_invalidate_range_start(&range);
40549ba8 6986 hugetlb_vma_lock_write(vma);
83cde9e8 6987 i_mmap_lock_write(vma->vm_file->f_mapping);
40549ba8 6988 last_addr_mask = hugetlb_mask_last_page(h);
60dfaad6 6989 for (; address < end; address += psize) {
cb900f41 6990 spinlock_t *ptl;
9c67a207 6991 ptep = hugetlb_walk(vma, address, psize);
e95a9851 6992 if (!ptep) {
fed15f13
PX
6993 if (!uffd_wp) {
6994 address |= last_addr_mask;
6995 continue;
6996 }
6997 /*
6998 * Userfaultfd wr-protect requires pgtable
6999 * pre-allocations to install pte markers.
7000 */
7001 ptep = huge_pte_alloc(mm, vma, address, psize);
d1751118
PX
7002 if (!ptep) {
7003 pages = -ENOMEM;
fed15f13 7004 break;
d1751118 7005 }
e95a9851 7006 }
cb900f41 7007 ptl = huge_pte_lock(h, mm, ptep);
4ddb4d91 7008 if (huge_pmd_unshare(mm, vma, address, ptep)) {
60dfaad6
PX
7009 /*
7010 * When uffd-wp is enabled on the vma, unshare
7011 * shouldn't happen at all. Warn about it if it
7012 * happened due to some reason.
7013 */
7014 WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
7da4d641 7015 pages++;
cb900f41 7016 spin_unlock(ptl);
dff11abe 7017 shared_pmd = true;
4ddb4d91 7018 address |= last_addr_mask;
39dde65c 7019 continue;
7da4d641 7020 }
a8bda28d
NH
7021 pte = huge_ptep_get(ptep);
7022 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
0e678153
DH
7023 /* Nothing to do. */
7024 } else if (unlikely(is_hugetlb_entry_migration(pte))) {
a8bda28d 7025 swp_entry_t entry = pte_to_swp_entry(pte);
6c287605 7026 struct page *page = pfn_swap_entry_to_page(entry);
44f86392 7027 pte_t newpte = pte;
a8bda28d 7028
44f86392 7029 if (is_writable_migration_entry(entry)) {
6c287605
DH
7030 if (PageAnon(page))
7031 entry = make_readable_exclusive_migration_entry(
7032 swp_offset(entry));
7033 else
7034 entry = make_readable_migration_entry(
7035 swp_offset(entry));
a8bda28d 7036 newpte = swp_entry_to_pte(entry);
a8bda28d
NH
7037 pages++;
7038 }
44f86392
DH
7039
7040 if (uffd_wp)
7041 newpte = pte_swp_mkuffd_wp(newpte);
7042 else if (uffd_wp_resolve)
7043 newpte = pte_swp_clear_uffd_wp(newpte);
7044 if (!pte_same(pte, newpte))
935d4f0c 7045 set_huge_pte_at(mm, address, ptep, newpte, psize);
0e678153 7046 } else if (unlikely(is_pte_marker(pte))) {
c5977c95
PX
7047 /*
7048 * Do nothing on a poison marker; page is
7049 * corrupted, permissons do not apply. Here
7050 * pte_marker_uffd_wp()==true implies !poison
7051 * because they're mutual exclusive.
7052 */
7053 if (pte_marker_uffd_wp(pte) && uffd_wp_resolve)
0e678153 7054 /* Safe to modify directly (non-present->none). */
60dfaad6 7055 huge_pte_clear(mm, address, ptep, psize);
0e678153 7056 } else if (!huge_pte_none(pte)) {
023bdd00 7057 pte_t old_pte;
79c1c594 7058 unsigned int shift = huge_page_shift(hstate_vma(vma));
023bdd00
AK
7059
7060 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
16785bd7 7061 pte = huge_pte_modify(old_pte, newprot);
79c1c594 7062 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
5a90d5a1 7063 if (uffd_wp)
f1eb1bac 7064 pte = huge_pte_mkuffd_wp(pte);
5a90d5a1
PX
7065 else if (uffd_wp_resolve)
7066 pte = huge_pte_clear_uffd_wp(pte);
023bdd00 7067 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
7da4d641 7068 pages++;
60dfaad6
PX
7069 } else {
7070 /* None pte */
7071 if (unlikely(uffd_wp))
7072 /* Safe to modify directly (none->non-present). */
7073 set_huge_pte_at(mm, address, ptep,
935d4f0c
RR
7074 make_pte_marker(PTE_MARKER_UFFD_WP),
7075 psize);
8f860591 7076 }
cb900f41 7077 spin_unlock(ptl);
8f860591 7078 }
d833352a 7079 /*
c8c06efa 7080 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
d833352a 7081 * may have cleared our pud entry and done put_page on the page table:
c8c06efa 7082 * once we release i_mmap_rwsem, another task can do the final put_page
dff11abe
MK
7083 * and that page table be reused and filled with junk. If we actually
7084 * did unshare a page of pmds, flush the range corresponding to the pud.
d833352a 7085 */
dff11abe 7086 if (shared_pmd)
ac46d4f3 7087 flush_hugetlb_tlb_range(vma, range.start, range.end);
dff11abe
MK
7088 else
7089 flush_hugetlb_tlb_range(vma, start, end);
0f10851e 7090 /*
1af5a810
AP
7091 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
7092 * downgrading page table protection not changing it to point to a new
7093 * page.
0f10851e 7094 *
ee65728e 7095 * See Documentation/mm/mmu_notifier.rst
0f10851e 7096 */
83cde9e8 7097 i_mmap_unlock_write(vma->vm_file->f_mapping);
40549ba8 7098 hugetlb_vma_unlock_write(vma);
ac46d4f3 7099 mmu_notifier_invalidate_range_end(&range);
7da4d641 7100
d1751118 7101 return pages > 0 ? (pages << h->order) : pages;
8f860591
ZY
7102}
7103
33b8f84a
MK
7104/* Return true if reservation was successful, false otherwise. */
7105bool hugetlb_reserve_pages(struct inode *inode,
a1e78772 7106 long from, long to,
5a6fe125 7107 struct vm_area_struct *vma,
ca16d140 7108 vm_flags_t vm_flags)
e4e574b7 7109{
c5094ec7 7110 long chg = -1, add = -1;
a5516438 7111 struct hstate *h = hstate_inode(inode);
90481622 7112 struct hugepage_subpool *spool = subpool_inode(inode);
9119a41e 7113 struct resv_map *resv_map;
075a61d0 7114 struct hugetlb_cgroup *h_cg = NULL;
0db9d74e 7115 long gbl_reserve, regions_needed = 0;
e4e574b7 7116
63489f8e
MK
7117 /* This should never happen */
7118 if (from > to) {
7119 VM_WARN(1, "%s called with a negative range\n", __func__);
33b8f84a 7120 return false;
63489f8e
MK
7121 }
7122
8d9bfb26 7123 /*
e700898f
MK
7124 * vma specific semaphore used for pmd sharing and fault/truncation
7125 * synchronization
8d9bfb26
MK
7126 */
7127 hugetlb_vma_lock_alloc(vma);
7128
17c9d12e
MG
7129 /*
7130 * Only apply hugepage reservation if asked. At fault time, an
7131 * attempt will be made for VM_NORESERVE to allocate a page
90481622 7132 * without using reserves
17c9d12e 7133 */
ca16d140 7134 if (vm_flags & VM_NORESERVE)
33b8f84a 7135 return true;
17c9d12e 7136
a1e78772
MG
7137 /*
7138 * Shared mappings base their reservation on the number of pages that
7139 * are already allocated on behalf of the file. Private mappings need
7140 * to reserve the full area even if read-only as mprotect() may be
7141 * called to make the mapping read-write. Assume !vma is a shm mapping
7142 */
9119a41e 7143 if (!vma || vma->vm_flags & VM_MAYSHARE) {
f27a5136
MK
7144 /*
7145 * resv_map can not be NULL as hugetlb_reserve_pages is only
7146 * called for inodes for which resv_maps were created (see
7147 * hugetlbfs_get_inode).
7148 */
4e35f483 7149 resv_map = inode_resv_map(inode);
9119a41e 7150
0db9d74e 7151 chg = region_chg(resv_map, from, to, &regions_needed);
9119a41e 7152 } else {
e9fe92ae 7153 /* Private mapping. */
9119a41e 7154 resv_map = resv_map_alloc();
17c9d12e 7155 if (!resv_map)
8d9bfb26 7156 goto out_err;
17c9d12e 7157
a1e78772 7158 chg = to - from;
84afd99b 7159
17c9d12e
MG
7160 set_vma_resv_map(vma, resv_map);
7161 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
7162 }
7163
33b8f84a 7164 if (chg < 0)
c50ac050 7165 goto out_err;
8a630112 7166
33b8f84a
MK
7167 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
7168 chg * pages_per_huge_page(h), &h_cg) < 0)
075a61d0 7169 goto out_err;
075a61d0
MA
7170
7171 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
7172 /* For private mappings, the hugetlb_cgroup uncharge info hangs
7173 * of the resv_map.
7174 */
7175 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
7176 }
7177
1c5ecae3
MK
7178 /*
7179 * There must be enough pages in the subpool for the mapping. If
7180 * the subpool has a minimum size, there may be some global
7181 * reservations already in place (gbl_reserve).
7182 */
7183 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
33b8f84a 7184 if (gbl_reserve < 0)
075a61d0 7185 goto out_uncharge_cgroup;
5a6fe125
MG
7186
7187 /*
17c9d12e 7188 * Check enough hugepages are available for the reservation.
90481622 7189 * Hand the pages back to the subpool if there are not
5a6fe125 7190 */
33b8f84a 7191 if (hugetlb_acct_memory(h, gbl_reserve) < 0)
075a61d0 7192 goto out_put_pages;
17c9d12e
MG
7193
7194 /*
7195 * Account for the reservations made. Shared mappings record regions
7196 * that have reservations as they are shared by multiple VMAs.
7197 * When the last VMA disappears, the region map says how much
7198 * the reservation was and the page cache tells how much of
7199 * the reservation was consumed. Private mappings are per-VMA and
7200 * only the consumed reservations are tracked. When the VMA
7201 * disappears, the original reservation is the VMA size and the
7202 * consumed reservations are stored in the map. Hence, nothing
7203 * else has to be done for private mappings here
7204 */
33039678 7205 if (!vma || vma->vm_flags & VM_MAYSHARE) {
075a61d0 7206 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
0db9d74e
MA
7207
7208 if (unlikely(add < 0)) {
7209 hugetlb_acct_memory(h, -gbl_reserve);
075a61d0 7210 goto out_put_pages;
0db9d74e 7211 } else if (unlikely(chg > add)) {
33039678
MK
7212 /*
7213 * pages in this range were added to the reserve
7214 * map between region_chg and region_add. This
d0ce0e47 7215 * indicates a race with alloc_hugetlb_folio. Adjust
33039678
MK
7216 * the subpool and reserve counts modified above
7217 * based on the difference.
7218 */
7219 long rsv_adjust;
7220
d85aecf2
ML
7221 /*
7222 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
7223 * reference to h_cg->css. See comment below for detail.
7224 */
075a61d0
MA
7225 hugetlb_cgroup_uncharge_cgroup_rsvd(
7226 hstate_index(h),
7227 (chg - add) * pages_per_huge_page(h), h_cg);
7228
33039678
MK
7229 rsv_adjust = hugepage_subpool_put_pages(spool,
7230 chg - add);
7231 hugetlb_acct_memory(h, -rsv_adjust);
d85aecf2
ML
7232 } else if (h_cg) {
7233 /*
7234 * The file_regions will hold their own reference to
7235 * h_cg->css. So we should release the reference held
7236 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
7237 * done.
7238 */
7239 hugetlb_cgroup_put_rsvd_cgroup(h_cg);
33039678
MK
7240 }
7241 }
33b8f84a
MK
7242 return true;
7243
075a61d0
MA
7244out_put_pages:
7245 /* put back original number of pages, chg */
7246 (void)hugepage_subpool_put_pages(spool, chg);
7247out_uncharge_cgroup:
7248 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
7249 chg * pages_per_huge_page(h), h_cg);
c50ac050 7250out_err:
8d9bfb26 7251 hugetlb_vma_lock_free(vma);
5e911373 7252 if (!vma || vma->vm_flags & VM_MAYSHARE)
0db9d74e
MA
7253 /* Only call region_abort if the region_chg succeeded but the
7254 * region_add failed or didn't run.
7255 */
7256 if (chg >= 0 && add < 0)
7257 region_abort(resv_map, from, to, regions_needed);
92fe9dcb 7258 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
f031dd27 7259 kref_put(&resv_map->refs, resv_map_release);
92fe9dcb
RR
7260 set_vma_resv_map(vma, NULL);
7261 }
33b8f84a 7262 return false;
a43a8c39
CK
7263}
7264
b5cec28d
MK
7265long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
7266 long freed)
a43a8c39 7267{
a5516438 7268 struct hstate *h = hstate_inode(inode);
4e35f483 7269 struct resv_map *resv_map = inode_resv_map(inode);
9119a41e 7270 long chg = 0;
90481622 7271 struct hugepage_subpool *spool = subpool_inode(inode);
1c5ecae3 7272 long gbl_reserve;
45c682a6 7273
f27a5136
MK
7274 /*
7275 * Since this routine can be called in the evict inode path for all
7276 * hugetlbfs inodes, resv_map could be NULL.
7277 */
b5cec28d
MK
7278 if (resv_map) {
7279 chg = region_del(resv_map, start, end);
7280 /*
7281 * region_del() can fail in the rare case where a region
7282 * must be split and another region descriptor can not be
7283 * allocated. If end == LONG_MAX, it will not fail.
7284 */
7285 if (chg < 0)
7286 return chg;
7287 }
7288
45c682a6 7289 spin_lock(&inode->i_lock);
e4c6f8be 7290 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
45c682a6
KC
7291 spin_unlock(&inode->i_lock);
7292
1c5ecae3
MK
7293 /*
7294 * If the subpool has a minimum size, the number of global
7295 * reservations to be released may be adjusted.
dddf31a4
ML
7296 *
7297 * Note that !resv_map implies freed == 0. So (chg - freed)
7298 * won't go negative.
1c5ecae3
MK
7299 */
7300 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
7301 hugetlb_acct_memory(h, -gbl_reserve);
b5cec28d
MK
7302
7303 return 0;
a43a8c39 7304}
93f70f90 7305
3212b535
SC
7306#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7307static unsigned long page_table_shareable(struct vm_area_struct *svma,
7308 struct vm_area_struct *vma,
7309 unsigned long addr, pgoff_t idx)
7310{
7311 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
7312 svma->vm_start;
7313 unsigned long sbase = saddr & PUD_MASK;
7314 unsigned long s_end = sbase + PUD_SIZE;
7315
7316 /* Allow segments to share if only one is marked locked */
e430a95a
SB
7317 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
7318 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
3212b535
SC
7319
7320 /*
7321 * match the virtual addresses, permission and the alignment of the
7322 * page table page.
131a79b4
MK
7323 *
7324 * Also, vma_lock (vm_private_data) is required for sharing.
3212b535
SC
7325 */
7326 if (pmd_index(addr) != pmd_index(saddr) ||
7327 vm_flags != svm_flags ||
131a79b4
MK
7328 !range_in_vma(svma, sbase, s_end) ||
7329 !svma->vm_private_data)
3212b535
SC
7330 return 0;
7331
7332 return saddr;
7333}
7334
bbff39cc 7335bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
3212b535 7336{
bbff39cc
MK
7337 unsigned long start = addr & PUD_MASK;
7338 unsigned long end = start + PUD_SIZE;
7339
8d9bfb26
MK
7340#ifdef CONFIG_USERFAULTFD
7341 if (uffd_disable_huge_pmd_share(vma))
7342 return false;
7343#endif
3212b535
SC
7344 /*
7345 * check on proper vm_flags and page table alignment
7346 */
8d9bfb26
MK
7347 if (!(vma->vm_flags & VM_MAYSHARE))
7348 return false;
bbff39cc 7349 if (!vma->vm_private_data) /* vma lock required for sharing */
8d9bfb26
MK
7350 return false;
7351 if (!range_in_vma(vma, start, end))
7352 return false;
7353 return true;
7354}
7355
017b1660
MK
7356/*
7357 * Determine if start,end range within vma could be mapped by shared pmd.
7358 * If yes, adjust start and end to cover range associated with possible
7359 * shared pmd mappings.
7360 */
7361void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7362 unsigned long *start, unsigned long *end)
7363{
a1ba9da8
LX
7364 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
7365 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
017b1660 7366
a1ba9da8 7367 /*
f0953a1b
IM
7368 * vma needs to span at least one aligned PUD size, and the range
7369 * must be at least partially within in.
a1ba9da8
LX
7370 */
7371 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
7372 (*end <= v_start) || (*start >= v_end))
017b1660
MK
7373 return;
7374
75802ca6 7375 /* Extend the range to be PUD aligned for a worst case scenario */
a1ba9da8
LX
7376 if (*start > v_start)
7377 *start = ALIGN_DOWN(*start, PUD_SIZE);
017b1660 7378
a1ba9da8
LX
7379 if (*end < v_end)
7380 *end = ALIGN(*end, PUD_SIZE);
017b1660
MK
7381}
7382
3212b535
SC
7383/*
7384 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
7385 * and returns the corresponding pte. While this is not necessary for the
7386 * !shared pmd case because we can allocate the pmd later as well, it makes the
3a47c54f
MK
7387 * code much cleaner. pmd allocation is essential for the shared case because
7388 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
7389 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
7390 * bad pmd for sharing.
3212b535 7391 */
aec44e0f
PX
7392pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7393 unsigned long addr, pud_t *pud)
3212b535 7394{
3212b535
SC
7395 struct address_space *mapping = vma->vm_file->f_mapping;
7396 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
7397 vma->vm_pgoff;
7398 struct vm_area_struct *svma;
7399 unsigned long saddr;
7400 pte_t *spte = NULL;
7401 pte_t *pte;
7402
3a47c54f 7403 i_mmap_lock_read(mapping);
3212b535
SC
7404 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
7405 if (svma == vma)
7406 continue;
7407
7408 saddr = page_table_shareable(svma, vma, addr, idx);
7409 if (saddr) {
9c67a207
PX
7410 spte = hugetlb_walk(svma, saddr,
7411 vma_mmu_pagesize(svma));
3212b535
SC
7412 if (spte) {
7413 get_page(virt_to_page(spte));
7414 break;
7415 }
7416 }
7417 }
7418
7419 if (!spte)
7420 goto out;
7421
349d1670 7422 spin_lock(&mm->page_table_lock);
dc6c9a35 7423 if (pud_none(*pud)) {
3212b535
SC
7424 pud_populate(mm, pud,
7425 (pmd_t *)((unsigned long)spte & PAGE_MASK));
c17b1f42 7426 mm_inc_nr_pmds(mm);
dc6c9a35 7427 } else {
3212b535 7428 put_page(virt_to_page(spte));
dc6c9a35 7429 }
349d1670 7430 spin_unlock(&mm->page_table_lock);
3212b535
SC
7431out:
7432 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3a47c54f 7433 i_mmap_unlock_read(mapping);
3212b535
SC
7434 return pte;
7435}
7436
7437/*
7438 * unmap huge page backed by shared pte.
7439 *
7440 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
7441 * indicated by page_count > 1, unmap is achieved by clearing pud and
7442 * decrementing the ref count. If count == 1, the pte page is not shared.
7443 *
3a47c54f 7444 * Called with page table lock held.
3212b535
SC
7445 *
7446 * returns: 1 successfully unmapped a shared pte page
7447 * 0 the underlying pte page is not shared, or it is the last user
7448 */
34ae204f 7449int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
4ddb4d91 7450 unsigned long addr, pte_t *ptep)
3212b535 7451{
4ddb4d91
MK
7452 pgd_t *pgd = pgd_offset(mm, addr);
7453 p4d_t *p4d = p4d_offset(pgd, addr);
7454 pud_t *pud = pud_offset(p4d, addr);
3212b535 7455
34ae204f 7456 i_mmap_assert_write_locked(vma->vm_file->f_mapping);
40549ba8 7457 hugetlb_vma_assert_locked(vma);
3212b535
SC
7458 BUG_ON(page_count(virt_to_page(ptep)) == 0);
7459 if (page_count(virt_to_page(ptep)) == 1)
7460 return 0;
7461
7462 pud_clear(pud);
7463 put_page(virt_to_page(ptep));
dc6c9a35 7464 mm_dec_nr_pmds(mm);
3212b535
SC
7465 return 1;
7466}
c1991e07 7467
9e5fc74c 7468#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
8d9bfb26 7469
aec44e0f
PX
7470pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7471 unsigned long addr, pud_t *pud)
9e5fc74c
SC
7472{
7473 return NULL;
7474}
e81f2d22 7475
34ae204f 7476int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
4ddb4d91 7477 unsigned long addr, pte_t *ptep)
e81f2d22
ZZ
7478{
7479 return 0;
7480}
017b1660
MK
7481
7482void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7483 unsigned long *start, unsigned long *end)
7484{
7485}
c1991e07
PX
7486
7487bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7488{
7489 return false;
7490}
3212b535
SC
7491#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7492
9e5fc74c 7493#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
aec44e0f 7494pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
9e5fc74c
SC
7495 unsigned long addr, unsigned long sz)
7496{
7497 pgd_t *pgd;
c2febafc 7498 p4d_t *p4d;
9e5fc74c
SC
7499 pud_t *pud;
7500 pte_t *pte = NULL;
7501
7502 pgd = pgd_offset(mm, addr);
f4f0a3d8
KS
7503 p4d = p4d_alloc(mm, pgd, addr);
7504 if (!p4d)
7505 return NULL;
c2febafc 7506 pud = pud_alloc(mm, p4d, addr);
9e5fc74c
SC
7507 if (pud) {
7508 if (sz == PUD_SIZE) {
7509 pte = (pte_t *)pud;
7510 } else {
7511 BUG_ON(sz != PMD_SIZE);
c1991e07 7512 if (want_pmd_share(vma, addr) && pud_none(*pud))
aec44e0f 7513 pte = huge_pmd_share(mm, vma, addr, pud);
9e5fc74c
SC
7514 else
7515 pte = (pte_t *)pmd_alloc(mm, pud, addr);
7516 }
7517 }
191fcdb6
JH
7518
7519 if (pte) {
7520 pte_t pteval = ptep_get_lockless(pte);
7521
7522 BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7523 }
9e5fc74c
SC
7524
7525 return pte;
7526}
7527
9b19df29
PA
7528/*
7529 * huge_pte_offset() - Walk the page table to resolve the hugepage
7530 * entry at address @addr
7531 *
8ac0b81a
LX
7532 * Return: Pointer to page table entry (PUD or PMD) for
7533 * address @addr, or NULL if a !p*d_present() entry is encountered and the
9b19df29
PA
7534 * size @sz doesn't match the hugepage size at this level of the page
7535 * table.
7536 */
7868a208
PA
7537pte_t *huge_pte_offset(struct mm_struct *mm,
7538 unsigned long addr, unsigned long sz)
9e5fc74c
SC
7539{
7540 pgd_t *pgd;
c2febafc 7541 p4d_t *p4d;
8ac0b81a
LX
7542 pud_t *pud;
7543 pmd_t *pmd;
9e5fc74c
SC
7544
7545 pgd = pgd_offset(mm, addr);
c2febafc
KS
7546 if (!pgd_present(*pgd))
7547 return NULL;
7548 p4d = p4d_offset(pgd, addr);
7549 if (!p4d_present(*p4d))
7550 return NULL;
9b19df29 7551
c2febafc 7552 pud = pud_offset(p4d, addr);
8ac0b81a
LX
7553 if (sz == PUD_SIZE)
7554 /* must be pud huge, non-present or none */
c2febafc 7555 return (pte_t *)pud;
8ac0b81a 7556 if (!pud_present(*pud))
9b19df29 7557 return NULL;
8ac0b81a 7558 /* must have a valid entry and size to go further */
9b19df29 7559
8ac0b81a
LX
7560 pmd = pmd_offset(pud, addr);
7561 /* must be pmd huge, non-present or none */
7562 return (pte_t *)pmd;
9e5fc74c
SC
7563}
7564
e95a9851
MK
7565/*
7566 * Return a mask that can be used to update an address to the last huge
7567 * page in a page table page mapping size. Used to skip non-present
7568 * page table entries when linearly scanning address ranges. Architectures
7569 * with unique huge page to page table relationships can define their own
7570 * version of this routine.
7571 */
7572unsigned long hugetlb_mask_last_page(struct hstate *h)
7573{
7574 unsigned long hp_size = huge_page_size(h);
7575
7576 if (hp_size == PUD_SIZE)
7577 return P4D_SIZE - PUD_SIZE;
7578 else if (hp_size == PMD_SIZE)
7579 return PUD_SIZE - PMD_SIZE;
7580 else
7581 return 0UL;
7582}
7583
7584#else
7585
7586/* See description above. Architectures can provide their own version. */
7587__weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7588{
4ddb4d91
MK
7589#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7590 if (huge_page_size(h) == PMD_SIZE)
7591 return PUD_SIZE - PMD_SIZE;
7592#endif
e95a9851
MK
7593 return 0UL;
7594}
7595
61f77eda
NH
7596#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7597
7598/*
7599 * These functions are overwritable if your architecture needs its own
7600 * behavior.
7601 */
9747b9e9 7602bool isolate_hugetlb(struct folio *folio, struct list_head *list)
31caf665 7603{
9747b9e9 7604 bool ret = true;
bcc54222 7605
db71ef79 7606 spin_lock_irq(&hugetlb_lock);
6aa3a920
SK
7607 if (!folio_test_hugetlb(folio) ||
7608 !folio_test_hugetlb_migratable(folio) ||
7609 !folio_try_get(folio)) {
9747b9e9 7610 ret = false;
bcc54222
NH
7611 goto unlock;
7612 }
6aa3a920
SK
7613 folio_clear_hugetlb_migratable(folio);
7614 list_move_tail(&folio->lru, list);
bcc54222 7615unlock:
db71ef79 7616 spin_unlock_irq(&hugetlb_lock);
bcc54222 7617 return ret;
31caf665
NH
7618}
7619
04bac040 7620int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
25182f05
NH
7621{
7622 int ret = 0;
7623
7624 *hugetlb = false;
7625 spin_lock_irq(&hugetlb_lock);
04bac040 7626 if (folio_test_hugetlb(folio)) {
25182f05 7627 *hugetlb = true;
04bac040 7628 if (folio_test_hugetlb_freed(folio))
b283d983 7629 ret = 0;
04bac040
SK
7630 else if (folio_test_hugetlb_migratable(folio) || unpoison)
7631 ret = folio_try_get(folio);
0ed950d1
NH
7632 else
7633 ret = -EBUSY;
25182f05
NH
7634 }
7635 spin_unlock_irq(&hugetlb_lock);
7636 return ret;
7637}
7638
e591ef7d
NH
7639int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7640 bool *migratable_cleared)
405ce051
NH
7641{
7642 int ret;
7643
7644 spin_lock_irq(&hugetlb_lock);
e591ef7d 7645 ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
405ce051
NH
7646 spin_unlock_irq(&hugetlb_lock);
7647 return ret;
7648}
7649
ea8e72f4 7650void folio_putback_active_hugetlb(struct folio *folio)
31caf665 7651{
db71ef79 7652 spin_lock_irq(&hugetlb_lock);
ea8e72f4
SK
7653 folio_set_hugetlb_migratable(folio);
7654 list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
db71ef79 7655 spin_unlock_irq(&hugetlb_lock);
ea8e72f4 7656 folio_put(folio);
31caf665 7657}
ab5ac90a 7658
345c62d1 7659void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
ab5ac90a 7660{
345c62d1 7661 struct hstate *h = folio_hstate(old_folio);
ab5ac90a 7662
345c62d1
SK
7663 hugetlb_cgroup_migrate(old_folio, new_folio);
7664 set_page_owner_migrate_reason(&new_folio->page, reason);
ab5ac90a
MH
7665
7666 /*
345c62d1 7667 * transfer temporary state of the new hugetlb folio. This is
ab5ac90a
MH
7668 * reverse to other transitions because the newpage is going to
7669 * be final while the old one will be freed so it takes over
7670 * the temporary status.
7671 *
7672 * Also note that we have to transfer the per-node surplus state
7673 * here as well otherwise the global surplus count will not match
7674 * the per-node's.
7675 */
345c62d1
SK
7676 if (folio_test_hugetlb_temporary(new_folio)) {
7677 int old_nid = folio_nid(old_folio);
7678 int new_nid = folio_nid(new_folio);
7679
345c62d1
SK
7680 folio_set_hugetlb_temporary(old_folio);
7681 folio_clear_hugetlb_temporary(new_folio);
ab5ac90a 7682
ab5ac90a 7683
5af1ab1d
ML
7684 /*
7685 * There is no need to transfer the per-node surplus state
7686 * when we do not cross the node.
7687 */
7688 if (new_nid == old_nid)
7689 return;
db71ef79 7690 spin_lock_irq(&hugetlb_lock);
ab5ac90a
MH
7691 if (h->surplus_huge_pages_node[old_nid]) {
7692 h->surplus_huge_pages_node[old_nid]--;
7693 h->surplus_huge_pages_node[new_nid]++;
7694 }
db71ef79 7695 spin_unlock_irq(&hugetlb_lock);
ab5ac90a
MH
7696 }
7697}
cf11e85f 7698
b30c14cd
JH
7699static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7700 unsigned long start,
7701 unsigned long end)
6dfeaff9
PX
7702{
7703 struct hstate *h = hstate_vma(vma);
7704 unsigned long sz = huge_page_size(h);
7705 struct mm_struct *mm = vma->vm_mm;
7706 struct mmu_notifier_range range;
b30c14cd 7707 unsigned long address;
6dfeaff9
PX
7708 spinlock_t *ptl;
7709 pte_t *ptep;
7710
7711 if (!(vma->vm_flags & VM_MAYSHARE))
7712 return;
7713
6dfeaff9
PX
7714 if (start >= end)
7715 return;
7716
9c8bbfac 7717 flush_cache_range(vma, start, end);
6dfeaff9
PX
7718 /*
7719 * No need to call adjust_range_if_pmd_sharing_possible(), because
7720 * we have already done the PUD_SIZE alignment.
7721 */
7d4a8be0 7722 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
6dfeaff9
PX
7723 start, end);
7724 mmu_notifier_invalidate_range_start(&range);
40549ba8 7725 hugetlb_vma_lock_write(vma);
6dfeaff9
PX
7726 i_mmap_lock_write(vma->vm_file->f_mapping);
7727 for (address = start; address < end; address += PUD_SIZE) {
9c67a207 7728 ptep = hugetlb_walk(vma, address, sz);
6dfeaff9
PX
7729 if (!ptep)
7730 continue;
7731 ptl = huge_pte_lock(h, mm, ptep);
4ddb4d91 7732 huge_pmd_unshare(mm, vma, address, ptep);
6dfeaff9
PX
7733 spin_unlock(ptl);
7734 }
7735 flush_hugetlb_tlb_range(vma, start, end);
7736 i_mmap_unlock_write(vma->vm_file->f_mapping);
40549ba8 7737 hugetlb_vma_unlock_write(vma);
6dfeaff9 7738 /*
1af5a810 7739 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
ee65728e 7740 * Documentation/mm/mmu_notifier.rst.
6dfeaff9
PX
7741 */
7742 mmu_notifier_invalidate_range_end(&range);
7743}
7744
b30c14cd
JH
7745/*
7746 * This function will unconditionally remove all the shared pmd pgtable entries
7747 * within the specific vma for a hugetlbfs memory range.
7748 */
7749void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7750{
7751 hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7752 ALIGN_DOWN(vma->vm_end, PUD_SIZE));
7753}
7754
cf11e85f 7755#ifdef CONFIG_CMA
cf11e85f
RG
7756static bool cma_reserve_called __initdata;
7757
7758static int __init cmdline_parse_hugetlb_cma(char *p)
7759{
38e719ab
BW
7760 int nid, count = 0;
7761 unsigned long tmp;
7762 char *s = p;
7763
7764 while (*s) {
7765 if (sscanf(s, "%lu%n", &tmp, &count) != 1)
7766 break;
7767
7768 if (s[count] == ':') {
f9317f77 7769 if (tmp >= MAX_NUMNODES)
38e719ab 7770 break;
f9317f77 7771 nid = array_index_nospec(tmp, MAX_NUMNODES);
38e719ab
BW
7772
7773 s += count + 1;
7774 tmp = memparse(s, &s);
7775 hugetlb_cma_size_in_node[nid] = tmp;
7776 hugetlb_cma_size += tmp;
7777
7778 /*
7779 * Skip the separator if have one, otherwise
7780 * break the parsing.
7781 */
7782 if (*s == ',')
7783 s++;
7784 else
7785 break;
7786 } else {
7787 hugetlb_cma_size = memparse(p, &p);
7788 break;
7789 }
7790 }
7791
cf11e85f
RG
7792 return 0;
7793}
7794
7795early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7796
7797void __init hugetlb_cma_reserve(int order)
7798{
7799 unsigned long size, reserved, per_node;
38e719ab 7800 bool node_specific_cma_alloc = false;
cf11e85f
RG
7801 int nid;
7802
ce70cfb1
AK
7803 /*
7804 * HugeTLB CMA reservation is required for gigantic
7805 * huge pages which could not be allocated via the
7806 * page allocator. Just warn if there is any change
7807 * breaking this assumption.
7808 */
7809 VM_WARN_ON(order <= MAX_PAGE_ORDER);
cf11e85f
RG
7810 cma_reserve_called = true;
7811
38e719ab
BW
7812 if (!hugetlb_cma_size)
7813 return;
7814
7815 for (nid = 0; nid < MAX_NUMNODES; nid++) {
7816 if (hugetlb_cma_size_in_node[nid] == 0)
7817 continue;
7818
30a51400 7819 if (!node_online(nid)) {
38e719ab
BW
7820 pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7821 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7822 hugetlb_cma_size_in_node[nid] = 0;
7823 continue;
7824 }
7825
7826 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
7827 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7828 nid, (PAGE_SIZE << order) / SZ_1M);
7829 hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7830 hugetlb_cma_size_in_node[nid] = 0;
7831 } else {
7832 node_specific_cma_alloc = true;
7833 }
7834 }
7835
7836 /* Validate the CMA size again in case some invalid nodes specified. */
cf11e85f
RG
7837 if (!hugetlb_cma_size)
7838 return;
7839
7840 if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7841 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7842 (PAGE_SIZE << order) / SZ_1M);
a01f4390 7843 hugetlb_cma_size = 0;
cf11e85f
RG
7844 return;
7845 }
7846
38e719ab
BW
7847 if (!node_specific_cma_alloc) {
7848 /*
7849 * If 3 GB area is requested on a machine with 4 numa nodes,
7850 * let's allocate 1 GB on first three nodes and ignore the last one.
7851 */
7852 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7853 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7854 hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
7855 }
cf11e85f
RG
7856
7857 reserved = 0;
30a51400 7858 for_each_online_node(nid) {
cf11e85f 7859 int res;
2281f797 7860 char name[CMA_MAX_NAME];
cf11e85f 7861
38e719ab
BW
7862 if (node_specific_cma_alloc) {
7863 if (hugetlb_cma_size_in_node[nid] == 0)
7864 continue;
7865
7866 size = hugetlb_cma_size_in_node[nid];
7867 } else {
7868 size = min(per_node, hugetlb_cma_size - reserved);
7869 }
7870
cf11e85f
RG
7871 size = round_up(size, PAGE_SIZE << order);
7872
2281f797 7873 snprintf(name, sizeof(name), "hugetlb%d", nid);
a01f4390
MK
7874 /*
7875 * Note that 'order per bit' is based on smallest size that
7876 * may be returned to CMA allocator in the case of
7877 * huge page demotion.
7878 */
7879 res = cma_declare_contiguous_nid(0, size, 0,
7880 PAGE_SIZE << HUGETLB_PAGE_ORDER,
29d0f41d 7881 0, false, name,
cf11e85f
RG
7882 &hugetlb_cma[nid], nid);
7883 if (res) {
7884 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7885 res, nid);
7886 continue;
7887 }
7888
7889 reserved += size;
7890 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7891 size / SZ_1M, nid);
7892
7893 if (reserved >= hugetlb_cma_size)
7894 break;
7895 }
a01f4390
MK
7896
7897 if (!reserved)
7898 /*
7899 * hugetlb_cma_size is used to determine if allocations from
7900 * cma are possible. Set to zero if no cma regions are set up.
7901 */
7902 hugetlb_cma_size = 0;
cf11e85f
RG
7903}
7904
263b8998 7905static void __init hugetlb_cma_check(void)
cf11e85f
RG
7906{
7907 if (!hugetlb_cma_size || cma_reserve_called)
7908 return;
7909
7910 pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7911}
7912
7913#endif /* CONFIG_CMA */