Merge tag 'x86_urgent_for_v5.12_rc3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / mm / hugetlb.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Generic hugetlb support.
6d49e352 4 * (C) Nadia Yvette Chambers, April 2004
1da177e4 5 */
1da177e4
LT
6#include <linux/list.h>
7#include <linux/init.h>
1da177e4 8#include <linux/mm.h>
e1759c21 9#include <linux/seq_file.h>
1da177e4
LT
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
cddb8a5c 12#include <linux/mmu_notifier.h>
1da177e4 13#include <linux/nodemask.h>
63551ae0 14#include <linux/pagemap.h>
5da7ca86 15#include <linux/mempolicy.h>
3b32123d 16#include <linux/compiler.h>
aea47ff3 17#include <linux/cpuset.h>
3935baa9 18#include <linux/mutex.h>
97ad1087 19#include <linux/memblock.h>
a3437870 20#include <linux/sysfs.h>
5a0e3ad6 21#include <linux/slab.h>
bbe88753 22#include <linux/sched/mm.h>
63489f8e 23#include <linux/mmdebug.h>
174cd4b1 24#include <linux/sched/signal.h>
0fe6e20b 25#include <linux/rmap.h>
c6247f72 26#include <linux/string_helpers.h>
fd6a03ed
NH
27#include <linux/swap.h>
28#include <linux/swapops.h>
8382d914 29#include <linux/jhash.h>
98fa15f3 30#include <linux/numa.h>
c77c0a8a 31#include <linux/llist.h>
cf11e85f 32#include <linux/cma.h>
d6606683 33
63551ae0 34#include <asm/page.h>
ca15ca40 35#include <asm/pgalloc.h>
24669e58 36#include <asm/tlb.h>
63551ae0 37
24669e58 38#include <linux/io.h>
63551ae0 39#include <linux/hugetlb.h>
9dd540e2 40#include <linux/hugetlb_cgroup.h>
9a305230 41#include <linux/node.h>
1a1aad8a 42#include <linux/userfaultfd_k.h>
ab5ac90a 43#include <linux/page_owner.h>
7835e98b 44#include "internal.h"
1da177e4 45
c3f38a38 46int hugetlb_max_hstate __read_mostly;
e5ff2159
AK
47unsigned int default_hstate_idx;
48struct hstate hstates[HUGE_MAX_HSTATE];
cf11e85f 49
dbda8fea 50#ifdef CONFIG_CMA
cf11e85f 51static struct cma *hugetlb_cma[MAX_NUMNODES];
dbda8fea
BS
52#endif
53static unsigned long hugetlb_cma_size __initdata;
cf11e85f 54
641844f5
NH
55/*
56 * Minimum page order among possible hugepage sizes, set to a proper value
57 * at boot time.
58 */
59static unsigned int minimum_order __read_mostly = UINT_MAX;
e5ff2159 60
53ba51d2
JT
61__initdata LIST_HEAD(huge_boot_pages);
62
e5ff2159
AK
63/* for command line parsing */
64static struct hstate * __initdata parsed_hstate;
65static unsigned long __initdata default_hstate_max_huge_pages;
9fee021d 66static bool __initdata parsed_valid_hugepagesz = true;
282f4214 67static bool __initdata parsed_default_hugepagesz;
e5ff2159 68
3935baa9 69/*
31caf665
NH
70 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
71 * free_huge_pages, and surplus_huge_pages.
3935baa9 72 */
c3f38a38 73DEFINE_SPINLOCK(hugetlb_lock);
0bd0f9fb 74
8382d914
DB
75/*
76 * Serializes faults on the same logical page. This is used to
77 * prevent spurious OOMs when the hugepage pool is fully utilized.
78 */
79static int num_fault_mutexes;
c672c7f2 80struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
8382d914 81
7ca02d0a
MK
82/* Forward declaration */
83static int hugetlb_acct_memory(struct hstate *h, long delta);
84
1d88433b 85static inline bool subpool_is_free(struct hugepage_subpool *spool)
90481622 86{
1d88433b
ML
87 if (spool->count)
88 return false;
89 if (spool->max_hpages != -1)
90 return spool->used_hpages == 0;
91 if (spool->min_hpages != -1)
92 return spool->rsv_hpages == spool->min_hpages;
93
94 return true;
95}
90481622 96
1d88433b
ML
97static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
98{
90481622
DG
99 spin_unlock(&spool->lock);
100
101 /* If no pages are used, and no other handles to the subpool
7c8de358 102 * remain, give up any reservations based on minimum size and
7ca02d0a 103 * free the subpool */
1d88433b 104 if (subpool_is_free(spool)) {
7ca02d0a
MK
105 if (spool->min_hpages != -1)
106 hugetlb_acct_memory(spool->hstate,
107 -spool->min_hpages);
90481622 108 kfree(spool);
7ca02d0a 109 }
90481622
DG
110}
111
7ca02d0a
MK
112struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
113 long min_hpages)
90481622
DG
114{
115 struct hugepage_subpool *spool;
116
c6a91820 117 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
90481622
DG
118 if (!spool)
119 return NULL;
120
121 spin_lock_init(&spool->lock);
122 spool->count = 1;
7ca02d0a
MK
123 spool->max_hpages = max_hpages;
124 spool->hstate = h;
125 spool->min_hpages = min_hpages;
126
127 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
128 kfree(spool);
129 return NULL;
130 }
131 spool->rsv_hpages = min_hpages;
90481622
DG
132
133 return spool;
134}
135
136void hugepage_put_subpool(struct hugepage_subpool *spool)
137{
138 spin_lock(&spool->lock);
139 BUG_ON(!spool->count);
140 spool->count--;
141 unlock_or_release_subpool(spool);
142}
143
1c5ecae3
MK
144/*
145 * Subpool accounting for allocating and reserving pages.
146 * Return -ENOMEM if there are not enough resources to satisfy the
9e7ee400 147 * request. Otherwise, return the number of pages by which the
1c5ecae3
MK
148 * global pools must be adjusted (upward). The returned value may
149 * only be different than the passed value (delta) in the case where
7c8de358 150 * a subpool minimum size must be maintained.
1c5ecae3
MK
151 */
152static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
90481622
DG
153 long delta)
154{
1c5ecae3 155 long ret = delta;
90481622
DG
156
157 if (!spool)
1c5ecae3 158 return ret;
90481622
DG
159
160 spin_lock(&spool->lock);
1c5ecae3
MK
161
162 if (spool->max_hpages != -1) { /* maximum size accounting */
163 if ((spool->used_hpages + delta) <= spool->max_hpages)
164 spool->used_hpages += delta;
165 else {
166 ret = -ENOMEM;
167 goto unlock_ret;
168 }
90481622 169 }
90481622 170
09a95e29
MK
171 /* minimum size accounting */
172 if (spool->min_hpages != -1 && spool->rsv_hpages) {
1c5ecae3
MK
173 if (delta > spool->rsv_hpages) {
174 /*
175 * Asking for more reserves than those already taken on
176 * behalf of subpool. Return difference.
177 */
178 ret = delta - spool->rsv_hpages;
179 spool->rsv_hpages = 0;
180 } else {
181 ret = 0; /* reserves already accounted for */
182 spool->rsv_hpages -= delta;
183 }
184 }
185
186unlock_ret:
187 spin_unlock(&spool->lock);
90481622
DG
188 return ret;
189}
190
1c5ecae3
MK
191/*
192 * Subpool accounting for freeing and unreserving pages.
193 * Return the number of global page reservations that must be dropped.
194 * The return value may only be different than the passed value (delta)
195 * in the case where a subpool minimum size must be maintained.
196 */
197static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
90481622
DG
198 long delta)
199{
1c5ecae3
MK
200 long ret = delta;
201
90481622 202 if (!spool)
1c5ecae3 203 return delta;
90481622
DG
204
205 spin_lock(&spool->lock);
1c5ecae3
MK
206
207 if (spool->max_hpages != -1) /* maximum size accounting */
208 spool->used_hpages -= delta;
209
09a95e29
MK
210 /* minimum size accounting */
211 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
1c5ecae3
MK
212 if (spool->rsv_hpages + delta <= spool->min_hpages)
213 ret = 0;
214 else
215 ret = spool->rsv_hpages + delta - spool->min_hpages;
216
217 spool->rsv_hpages += delta;
218 if (spool->rsv_hpages > spool->min_hpages)
219 spool->rsv_hpages = spool->min_hpages;
220 }
221
222 /*
223 * If hugetlbfs_put_super couldn't free spool due to an outstanding
224 * quota reference, free it now.
225 */
90481622 226 unlock_or_release_subpool(spool);
1c5ecae3
MK
227
228 return ret;
90481622
DG
229}
230
231static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
232{
233 return HUGETLBFS_SB(inode->i_sb)->spool;
234}
235
236static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
237{
496ad9aa 238 return subpool_inode(file_inode(vma->vm_file));
90481622
DG
239}
240
0db9d74e
MA
241/* Helper that removes a struct file_region from the resv_map cache and returns
242 * it for use.
243 */
244static struct file_region *
245get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
246{
247 struct file_region *nrg = NULL;
248
249 VM_BUG_ON(resv->region_cache_count <= 0);
250
251 resv->region_cache_count--;
252 nrg = list_first_entry(&resv->region_cache, struct file_region, link);
0db9d74e
MA
253 list_del(&nrg->link);
254
255 nrg->from = from;
256 nrg->to = to;
257
258 return nrg;
259}
260
075a61d0
MA
261static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
262 struct file_region *rg)
263{
264#ifdef CONFIG_CGROUP_HUGETLB
265 nrg->reservation_counter = rg->reservation_counter;
266 nrg->css = rg->css;
267 if (rg->css)
268 css_get(rg->css);
269#endif
270}
271
272/* Helper that records hugetlb_cgroup uncharge info. */
273static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
274 struct hstate *h,
275 struct resv_map *resv,
276 struct file_region *nrg)
277{
278#ifdef CONFIG_CGROUP_HUGETLB
279 if (h_cg) {
280 nrg->reservation_counter =
281 &h_cg->rsvd_hugepage[hstate_index(h)];
282 nrg->css = &h_cg->css;
283 if (!resv->pages_per_hpage)
284 resv->pages_per_hpage = pages_per_huge_page(h);
285 /* pages_per_hpage should be the same for all entries in
286 * a resv_map.
287 */
288 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
289 } else {
290 nrg->reservation_counter = NULL;
291 nrg->css = NULL;
292 }
293#endif
294}
295
a9b3f867
MA
296static bool has_same_uncharge_info(struct file_region *rg,
297 struct file_region *org)
298{
299#ifdef CONFIG_CGROUP_HUGETLB
300 return rg && org &&
301 rg->reservation_counter == org->reservation_counter &&
302 rg->css == org->css;
303
304#else
305 return true;
306#endif
307}
308
309static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
310{
311 struct file_region *nrg = NULL, *prg = NULL;
312
313 prg = list_prev_entry(rg, link);
314 if (&prg->link != &resv->regions && prg->to == rg->from &&
315 has_same_uncharge_info(prg, rg)) {
316 prg->to = rg->to;
317
318 list_del(&rg->link);
319 kfree(rg);
320
7db5e7b6 321 rg = prg;
a9b3f867
MA
322 }
323
324 nrg = list_next_entry(rg, link);
325 if (&nrg->link != &resv->regions && nrg->from == rg->to &&
326 has_same_uncharge_info(nrg, rg)) {
327 nrg->from = rg->from;
328
329 list_del(&rg->link);
330 kfree(rg);
a9b3f867
MA
331 }
332}
333
2103cf9c
PX
334static inline long
335hugetlb_resv_map_add(struct resv_map *map, struct file_region *rg, long from,
336 long to, struct hstate *h, struct hugetlb_cgroup *cg,
337 long *regions_needed)
338{
339 struct file_region *nrg;
340
341 if (!regions_needed) {
342 nrg = get_file_region_entry_from_cache(map, from, to);
343 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
344 list_add(&nrg->link, rg->link.prev);
345 coalesce_file_region(map, nrg);
346 } else
347 *regions_needed += 1;
348
349 return to - from;
350}
351
972a3da3
WY
352/*
353 * Must be called with resv->lock held.
354 *
355 * Calling this with regions_needed != NULL will count the number of pages
356 * to be added but will not modify the linked list. And regions_needed will
357 * indicate the number of file_regions needed in the cache to carry out to add
358 * the regions for this range.
d75c6af9
MA
359 */
360static long add_reservation_in_range(struct resv_map *resv, long f, long t,
075a61d0 361 struct hugetlb_cgroup *h_cg,
972a3da3 362 struct hstate *h, long *regions_needed)
d75c6af9 363{
0db9d74e 364 long add = 0;
d75c6af9 365 struct list_head *head = &resv->regions;
0db9d74e 366 long last_accounted_offset = f;
2103cf9c 367 struct file_region *rg = NULL, *trg = NULL;
d75c6af9 368
0db9d74e
MA
369 if (regions_needed)
370 *regions_needed = 0;
d75c6af9 371
0db9d74e
MA
372 /* In this loop, we essentially handle an entry for the range
373 * [last_accounted_offset, rg->from), at every iteration, with some
374 * bounds checking.
375 */
376 list_for_each_entry_safe(rg, trg, head, link) {
377 /* Skip irrelevant regions that start before our range. */
378 if (rg->from < f) {
379 /* If this region ends after the last accounted offset,
380 * then we need to update last_accounted_offset.
381 */
382 if (rg->to > last_accounted_offset)
383 last_accounted_offset = rg->to;
384 continue;
385 }
d75c6af9 386
0db9d74e
MA
387 /* When we find a region that starts beyond our range, we've
388 * finished.
389 */
ca7e0457 390 if (rg->from >= t)
d75c6af9
MA
391 break;
392
0db9d74e
MA
393 /* Add an entry for last_accounted_offset -> rg->from, and
394 * update last_accounted_offset.
395 */
2103cf9c
PX
396 if (rg->from > last_accounted_offset)
397 add += hugetlb_resv_map_add(resv, rg,
398 last_accounted_offset,
399 rg->from, h, h_cg,
400 regions_needed);
0db9d74e
MA
401
402 last_accounted_offset = rg->to;
403 }
404
405 /* Handle the case where our range extends beyond
406 * last_accounted_offset.
407 */
2103cf9c
PX
408 if (last_accounted_offset < t)
409 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
410 t, h, h_cg, regions_needed);
0db9d74e
MA
411
412 VM_BUG_ON(add < 0);
413 return add;
414}
415
416/* Must be called with resv->lock acquired. Will drop lock to allocate entries.
417 */
418static int allocate_file_region_entries(struct resv_map *resv,
419 int regions_needed)
420 __must_hold(&resv->lock)
421{
422 struct list_head allocated_regions;
423 int to_allocate = 0, i = 0;
424 struct file_region *trg = NULL, *rg = NULL;
425
426 VM_BUG_ON(regions_needed < 0);
427
428 INIT_LIST_HEAD(&allocated_regions);
429
430 /*
431 * Check for sufficient descriptors in the cache to accommodate
432 * the number of in progress add operations plus regions_needed.
433 *
434 * This is a while loop because when we drop the lock, some other call
435 * to region_add or region_del may have consumed some region_entries,
436 * so we keep looping here until we finally have enough entries for
437 * (adds_in_progress + regions_needed).
438 */
439 while (resv->region_cache_count <
440 (resv->adds_in_progress + regions_needed)) {
441 to_allocate = resv->adds_in_progress + regions_needed -
442 resv->region_cache_count;
443
444 /* At this point, we should have enough entries in the cache
445 * for all the existings adds_in_progress. We should only be
446 * needing to allocate for regions_needed.
d75c6af9 447 */
0db9d74e
MA
448 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
449
450 spin_unlock(&resv->lock);
451 for (i = 0; i < to_allocate; i++) {
452 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
453 if (!trg)
454 goto out_of_memory;
455 list_add(&trg->link, &allocated_regions);
d75c6af9 456 }
d75c6af9 457
0db9d74e
MA
458 spin_lock(&resv->lock);
459
d3ec7b6e
WY
460 list_splice(&allocated_regions, &resv->region_cache);
461 resv->region_cache_count += to_allocate;
d75c6af9
MA
462 }
463
0db9d74e 464 return 0;
d75c6af9 465
0db9d74e
MA
466out_of_memory:
467 list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
468 list_del(&rg->link);
469 kfree(rg);
470 }
471 return -ENOMEM;
d75c6af9
MA
472}
473
1dd308a7
MK
474/*
475 * Add the huge page range represented by [f, t) to the reserve
0db9d74e
MA
476 * map. Regions will be taken from the cache to fill in this range.
477 * Sufficient regions should exist in the cache due to the previous
478 * call to region_chg with the same range, but in some cases the cache will not
479 * have sufficient entries due to races with other code doing region_add or
480 * region_del. The extra needed entries will be allocated.
cf3ad20b 481 *
0db9d74e
MA
482 * regions_needed is the out value provided by a previous call to region_chg.
483 *
484 * Return the number of new huge pages added to the map. This number is greater
485 * than or equal to zero. If file_region entries needed to be allocated for
7c8de358 486 * this operation and we were not able to allocate, it returns -ENOMEM.
0db9d74e
MA
487 * region_add of regions of length 1 never allocate file_regions and cannot
488 * fail; region_chg will always allocate at least 1 entry and a region_add for
489 * 1 page will only require at most 1 entry.
1dd308a7 490 */
0db9d74e 491static long region_add(struct resv_map *resv, long f, long t,
075a61d0
MA
492 long in_regions_needed, struct hstate *h,
493 struct hugetlb_cgroup *h_cg)
96822904 494{
0db9d74e 495 long add = 0, actual_regions_needed = 0;
96822904 496
7b24d861 497 spin_lock(&resv->lock);
0db9d74e
MA
498retry:
499
500 /* Count how many regions are actually needed to execute this add. */
972a3da3
WY
501 add_reservation_in_range(resv, f, t, NULL, NULL,
502 &actual_regions_needed);
96822904 503
5e911373 504 /*
0db9d74e
MA
505 * Check for sufficient descriptors in the cache to accommodate
506 * this add operation. Note that actual_regions_needed may be greater
507 * than in_regions_needed, as the resv_map may have been modified since
508 * the region_chg call. In this case, we need to make sure that we
509 * allocate extra entries, such that we have enough for all the
510 * existing adds_in_progress, plus the excess needed for this
511 * operation.
5e911373 512 */
0db9d74e
MA
513 if (actual_regions_needed > in_regions_needed &&
514 resv->region_cache_count <
515 resv->adds_in_progress +
516 (actual_regions_needed - in_regions_needed)) {
517 /* region_add operation of range 1 should never need to
518 * allocate file_region entries.
519 */
520 VM_BUG_ON(t - f <= 1);
5e911373 521
0db9d74e
MA
522 if (allocate_file_region_entries(
523 resv, actual_regions_needed - in_regions_needed)) {
524 return -ENOMEM;
525 }
5e911373 526
0db9d74e 527 goto retry;
5e911373
MK
528 }
529
972a3da3 530 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
0db9d74e
MA
531
532 resv->adds_in_progress -= in_regions_needed;
cf3ad20b 533
7b24d861 534 spin_unlock(&resv->lock);
cf3ad20b
MK
535 VM_BUG_ON(add < 0);
536 return add;
96822904
AW
537}
538
1dd308a7
MK
539/*
540 * Examine the existing reserve map and determine how many
541 * huge pages in the specified range [f, t) are NOT currently
542 * represented. This routine is called before a subsequent
543 * call to region_add that will actually modify the reserve
544 * map to add the specified range [f, t). region_chg does
545 * not change the number of huge pages represented by the
0db9d74e
MA
546 * map. A number of new file_region structures is added to the cache as a
547 * placeholder, for the subsequent region_add call to use. At least 1
548 * file_region structure is added.
549 *
550 * out_regions_needed is the number of regions added to the
551 * resv->adds_in_progress. This value needs to be provided to a follow up call
552 * to region_add or region_abort for proper accounting.
5e911373
MK
553 *
554 * Returns the number of huge pages that need to be added to the existing
555 * reservation map for the range [f, t). This number is greater or equal to
556 * zero. -ENOMEM is returned if a new file_region structure or cache entry
557 * is needed and can not be allocated.
1dd308a7 558 */
0db9d74e
MA
559static long region_chg(struct resv_map *resv, long f, long t,
560 long *out_regions_needed)
96822904 561{
96822904
AW
562 long chg = 0;
563
7b24d861 564 spin_lock(&resv->lock);
5e911373 565
972a3da3 566 /* Count how many hugepages in this range are NOT represented. */
075a61d0 567 chg = add_reservation_in_range(resv, f, t, NULL, NULL,
972a3da3 568 out_regions_needed);
5e911373 569
0db9d74e
MA
570 if (*out_regions_needed == 0)
571 *out_regions_needed = 1;
5e911373 572
0db9d74e
MA
573 if (allocate_file_region_entries(resv, *out_regions_needed))
574 return -ENOMEM;
5e911373 575
0db9d74e 576 resv->adds_in_progress += *out_regions_needed;
7b24d861 577
7b24d861 578 spin_unlock(&resv->lock);
96822904
AW
579 return chg;
580}
581
5e911373
MK
582/*
583 * Abort the in progress add operation. The adds_in_progress field
584 * of the resv_map keeps track of the operations in progress between
585 * calls to region_chg and region_add. Operations are sometimes
586 * aborted after the call to region_chg. In such cases, region_abort
0db9d74e
MA
587 * is called to decrement the adds_in_progress counter. regions_needed
588 * is the value returned by the region_chg call, it is used to decrement
589 * the adds_in_progress counter.
5e911373
MK
590 *
591 * NOTE: The range arguments [f, t) are not needed or used in this
592 * routine. They are kept to make reading the calling code easier as
593 * arguments will match the associated region_chg call.
594 */
0db9d74e
MA
595static void region_abort(struct resv_map *resv, long f, long t,
596 long regions_needed)
5e911373
MK
597{
598 spin_lock(&resv->lock);
599 VM_BUG_ON(!resv->region_cache_count);
0db9d74e 600 resv->adds_in_progress -= regions_needed;
5e911373
MK
601 spin_unlock(&resv->lock);
602}
603
1dd308a7 604/*
feba16e2
MK
605 * Delete the specified range [f, t) from the reserve map. If the
606 * t parameter is LONG_MAX, this indicates that ALL regions after f
607 * should be deleted. Locate the regions which intersect [f, t)
608 * and either trim, delete or split the existing regions.
609 *
610 * Returns the number of huge pages deleted from the reserve map.
611 * In the normal case, the return value is zero or more. In the
612 * case where a region must be split, a new region descriptor must
613 * be allocated. If the allocation fails, -ENOMEM will be returned.
614 * NOTE: If the parameter t == LONG_MAX, then we will never split
615 * a region and possibly return -ENOMEM. Callers specifying
616 * t == LONG_MAX do not need to check for -ENOMEM error.
1dd308a7 617 */
feba16e2 618static long region_del(struct resv_map *resv, long f, long t)
96822904 619{
1406ec9b 620 struct list_head *head = &resv->regions;
96822904 621 struct file_region *rg, *trg;
feba16e2
MK
622 struct file_region *nrg = NULL;
623 long del = 0;
96822904 624
feba16e2 625retry:
7b24d861 626 spin_lock(&resv->lock);
feba16e2 627 list_for_each_entry_safe(rg, trg, head, link) {
dbe409e4
MK
628 /*
629 * Skip regions before the range to be deleted. file_region
630 * ranges are normally of the form [from, to). However, there
631 * may be a "placeholder" entry in the map which is of the form
632 * (from, to) with from == to. Check for placeholder entries
633 * at the beginning of the range to be deleted.
634 */
635 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
feba16e2 636 continue;
dbe409e4 637
feba16e2 638 if (rg->from >= t)
96822904 639 break;
96822904 640
feba16e2
MK
641 if (f > rg->from && t < rg->to) { /* Must split region */
642 /*
643 * Check for an entry in the cache before dropping
644 * lock and attempting allocation.
645 */
646 if (!nrg &&
647 resv->region_cache_count > resv->adds_in_progress) {
648 nrg = list_first_entry(&resv->region_cache,
649 struct file_region,
650 link);
651 list_del(&nrg->link);
652 resv->region_cache_count--;
653 }
96822904 654
feba16e2
MK
655 if (!nrg) {
656 spin_unlock(&resv->lock);
657 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
658 if (!nrg)
659 return -ENOMEM;
660 goto retry;
661 }
662
663 del += t - f;
79aa925b
MK
664 hugetlb_cgroup_uncharge_file_region(
665 resv, rg, t - f);
feba16e2
MK
666
667 /* New entry for end of split region */
668 nrg->from = t;
669 nrg->to = rg->to;
075a61d0
MA
670
671 copy_hugetlb_cgroup_uncharge_info(nrg, rg);
672
feba16e2
MK
673 INIT_LIST_HEAD(&nrg->link);
674
675 /* Original entry is trimmed */
676 rg->to = f;
677
678 list_add(&nrg->link, &rg->link);
679 nrg = NULL;
96822904 680 break;
feba16e2
MK
681 }
682
683 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
684 del += rg->to - rg->from;
075a61d0
MA
685 hugetlb_cgroup_uncharge_file_region(resv, rg,
686 rg->to - rg->from);
feba16e2
MK
687 list_del(&rg->link);
688 kfree(rg);
689 continue;
690 }
691
692 if (f <= rg->from) { /* Trim beginning of region */
075a61d0
MA
693 hugetlb_cgroup_uncharge_file_region(resv, rg,
694 t - rg->from);
075a61d0 695
79aa925b
MK
696 del += t - rg->from;
697 rg->from = t;
698 } else { /* Trim end of region */
075a61d0
MA
699 hugetlb_cgroup_uncharge_file_region(resv, rg,
700 rg->to - f);
79aa925b
MK
701
702 del += rg->to - f;
703 rg->to = f;
feba16e2 704 }
96822904 705 }
7b24d861 706
7b24d861 707 spin_unlock(&resv->lock);
feba16e2
MK
708 kfree(nrg);
709 return del;
96822904
AW
710}
711
b5cec28d
MK
712/*
713 * A rare out of memory error was encountered which prevented removal of
714 * the reserve map region for a page. The huge page itself was free'ed
715 * and removed from the page cache. This routine will adjust the subpool
716 * usage count, and the global reserve count if needed. By incrementing
717 * these counts, the reserve map entry which could not be deleted will
718 * appear as a "reserved" entry instead of simply dangling with incorrect
719 * counts.
720 */
72e2936c 721void hugetlb_fix_reserve_counts(struct inode *inode)
b5cec28d
MK
722{
723 struct hugepage_subpool *spool = subpool_inode(inode);
724 long rsv_adjust;
725
726 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
72e2936c 727 if (rsv_adjust) {
b5cec28d
MK
728 struct hstate *h = hstate_inode(inode);
729
730 hugetlb_acct_memory(h, 1);
731 }
732}
733
1dd308a7
MK
734/*
735 * Count and return the number of huge pages in the reserve map
736 * that intersect with the range [f, t).
737 */
1406ec9b 738static long region_count(struct resv_map *resv, long f, long t)
84afd99b 739{
1406ec9b 740 struct list_head *head = &resv->regions;
84afd99b
AW
741 struct file_region *rg;
742 long chg = 0;
743
7b24d861 744 spin_lock(&resv->lock);
84afd99b
AW
745 /* Locate each segment we overlap with, and count that overlap. */
746 list_for_each_entry(rg, head, link) {
f2135a4a
WSH
747 long seg_from;
748 long seg_to;
84afd99b
AW
749
750 if (rg->to <= f)
751 continue;
752 if (rg->from >= t)
753 break;
754
755 seg_from = max(rg->from, f);
756 seg_to = min(rg->to, t);
757
758 chg += seg_to - seg_from;
759 }
7b24d861 760 spin_unlock(&resv->lock);
84afd99b
AW
761
762 return chg;
763}
764
e7c4b0bf
AW
765/*
766 * Convert the address within this vma to the page offset within
767 * the mapping, in pagecache page units; huge pages here.
768 */
a5516438
AK
769static pgoff_t vma_hugecache_offset(struct hstate *h,
770 struct vm_area_struct *vma, unsigned long address)
e7c4b0bf 771{
a5516438
AK
772 return ((address - vma->vm_start) >> huge_page_shift(h)) +
773 (vma->vm_pgoff >> huge_page_order(h));
e7c4b0bf
AW
774}
775
0fe6e20b
NH
776pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
777 unsigned long address)
778{
779 return vma_hugecache_offset(hstate_vma(vma), vma, address);
780}
dee41079 781EXPORT_SYMBOL_GPL(linear_hugepage_index);
0fe6e20b 782
08fba699
MG
783/*
784 * Return the size of the pages allocated when backing a VMA. In the majority
785 * cases this will be same size as used by the page table entries.
786 */
787unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
788{
05ea8860
DW
789 if (vma->vm_ops && vma->vm_ops->pagesize)
790 return vma->vm_ops->pagesize(vma);
791 return PAGE_SIZE;
08fba699 792}
f340ca0f 793EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
08fba699 794
3340289d
MG
795/*
796 * Return the page size being used by the MMU to back a VMA. In the majority
797 * of cases, the page size used by the kernel matches the MMU size. On
09135cc5
DW
798 * architectures where it differs, an architecture-specific 'strong'
799 * version of this symbol is required.
3340289d 800 */
09135cc5 801__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
3340289d
MG
802{
803 return vma_kernel_pagesize(vma);
804}
3340289d 805
84afd99b
AW
806/*
807 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
808 * bits of the reservation map pointer, which are always clear due to
809 * alignment.
810 */
811#define HPAGE_RESV_OWNER (1UL << 0)
812#define HPAGE_RESV_UNMAPPED (1UL << 1)
04f2cbe3 813#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
84afd99b 814
a1e78772
MG
815/*
816 * These helpers are used to track how many pages are reserved for
817 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
818 * is guaranteed to have their future faults succeed.
819 *
820 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
821 * the reserve counters are updated with the hugetlb_lock held. It is safe
822 * to reset the VMA at fork() time as it is not in use yet and there is no
823 * chance of the global counters getting corrupted as a result of the values.
84afd99b
AW
824 *
825 * The private mapping reservation is represented in a subtly different
826 * manner to a shared mapping. A shared mapping has a region map associated
827 * with the underlying file, this region map represents the backing file
828 * pages which have ever had a reservation assigned which this persists even
829 * after the page is instantiated. A private mapping has a region map
830 * associated with the original mmap which is attached to all VMAs which
831 * reference it, this region map represents those offsets which have consumed
832 * reservation ie. where pages have been instantiated.
a1e78772 833 */
e7c4b0bf
AW
834static unsigned long get_vma_private_data(struct vm_area_struct *vma)
835{
836 return (unsigned long)vma->vm_private_data;
837}
838
839static void set_vma_private_data(struct vm_area_struct *vma,
840 unsigned long value)
841{
842 vma->vm_private_data = (void *)value;
843}
844
e9fe92ae
MA
845static void
846resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
847 struct hugetlb_cgroup *h_cg,
848 struct hstate *h)
849{
850#ifdef CONFIG_CGROUP_HUGETLB
851 if (!h_cg || !h) {
852 resv_map->reservation_counter = NULL;
853 resv_map->pages_per_hpage = 0;
854 resv_map->css = NULL;
855 } else {
856 resv_map->reservation_counter =
857 &h_cg->rsvd_hugepage[hstate_index(h)];
858 resv_map->pages_per_hpage = pages_per_huge_page(h);
859 resv_map->css = &h_cg->css;
860 }
861#endif
862}
863
9119a41e 864struct resv_map *resv_map_alloc(void)
84afd99b
AW
865{
866 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
5e911373
MK
867 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
868
869 if (!resv_map || !rg) {
870 kfree(resv_map);
871 kfree(rg);
84afd99b 872 return NULL;
5e911373 873 }
84afd99b
AW
874
875 kref_init(&resv_map->refs);
7b24d861 876 spin_lock_init(&resv_map->lock);
84afd99b
AW
877 INIT_LIST_HEAD(&resv_map->regions);
878
5e911373 879 resv_map->adds_in_progress = 0;
e9fe92ae
MA
880 /*
881 * Initialize these to 0. On shared mappings, 0's here indicate these
882 * fields don't do cgroup accounting. On private mappings, these will be
883 * re-initialized to the proper values, to indicate that hugetlb cgroup
884 * reservations are to be un-charged from here.
885 */
886 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
5e911373
MK
887
888 INIT_LIST_HEAD(&resv_map->region_cache);
889 list_add(&rg->link, &resv_map->region_cache);
890 resv_map->region_cache_count = 1;
891
84afd99b
AW
892 return resv_map;
893}
894
9119a41e 895void resv_map_release(struct kref *ref)
84afd99b
AW
896{
897 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
5e911373
MK
898 struct list_head *head = &resv_map->region_cache;
899 struct file_region *rg, *trg;
84afd99b
AW
900
901 /* Clear out any active regions before we release the map. */
feba16e2 902 region_del(resv_map, 0, LONG_MAX);
5e911373
MK
903
904 /* ... and any entries left in the cache */
905 list_for_each_entry_safe(rg, trg, head, link) {
906 list_del(&rg->link);
907 kfree(rg);
908 }
909
910 VM_BUG_ON(resv_map->adds_in_progress);
911
84afd99b
AW
912 kfree(resv_map);
913}
914
4e35f483
JK
915static inline struct resv_map *inode_resv_map(struct inode *inode)
916{
f27a5136
MK
917 /*
918 * At inode evict time, i_mapping may not point to the original
919 * address space within the inode. This original address space
920 * contains the pointer to the resv_map. So, always use the
921 * address space embedded within the inode.
922 * The VERY common case is inode->mapping == &inode->i_data but,
923 * this may not be true for device special inodes.
924 */
925 return (struct resv_map *)(&inode->i_data)->private_data;
4e35f483
JK
926}
927
84afd99b 928static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
a1e78772 929{
81d1b09c 930 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
4e35f483
JK
931 if (vma->vm_flags & VM_MAYSHARE) {
932 struct address_space *mapping = vma->vm_file->f_mapping;
933 struct inode *inode = mapping->host;
934
935 return inode_resv_map(inode);
936
937 } else {
84afd99b
AW
938 return (struct resv_map *)(get_vma_private_data(vma) &
939 ~HPAGE_RESV_MASK);
4e35f483 940 }
a1e78772
MG
941}
942
84afd99b 943static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
a1e78772 944{
81d1b09c
SL
945 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
946 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
a1e78772 947
84afd99b
AW
948 set_vma_private_data(vma, (get_vma_private_data(vma) &
949 HPAGE_RESV_MASK) | (unsigned long)map);
04f2cbe3
MG
950}
951
952static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
953{
81d1b09c
SL
954 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
955 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
e7c4b0bf
AW
956
957 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
04f2cbe3
MG
958}
959
960static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
961{
81d1b09c 962 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
e7c4b0bf
AW
963
964 return (get_vma_private_data(vma) & flag) != 0;
a1e78772
MG
965}
966
04f2cbe3 967/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
a1e78772
MG
968void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
969{
81d1b09c 970 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
f83a275d 971 if (!(vma->vm_flags & VM_MAYSHARE))
a1e78772
MG
972 vma->vm_private_data = (void *)0;
973}
974
975/* Returns true if the VMA has associated reserve pages */
559ec2f8 976static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
a1e78772 977{
af0ed73e
JK
978 if (vma->vm_flags & VM_NORESERVE) {
979 /*
980 * This address is already reserved by other process(chg == 0),
981 * so, we should decrement reserved count. Without decrementing,
982 * reserve count remains after releasing inode, because this
983 * allocated page will go into page cache and is regarded as
984 * coming from reserved pool in releasing step. Currently, we
985 * don't have any other solution to deal with this situation
986 * properly, so add work-around here.
987 */
988 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
559ec2f8 989 return true;
af0ed73e 990 else
559ec2f8 991 return false;
af0ed73e 992 }
a63884e9
JK
993
994 /* Shared mappings always use reserves */
1fb1b0e9
MK
995 if (vma->vm_flags & VM_MAYSHARE) {
996 /*
997 * We know VM_NORESERVE is not set. Therefore, there SHOULD
998 * be a region map for all pages. The only situation where
999 * there is no region map is if a hole was punched via
7c8de358 1000 * fallocate. In this case, there really are no reserves to
1fb1b0e9
MK
1001 * use. This situation is indicated if chg != 0.
1002 */
1003 if (chg)
1004 return false;
1005 else
1006 return true;
1007 }
a63884e9
JK
1008
1009 /*
1010 * Only the process that called mmap() has reserves for
1011 * private mappings.
1012 */
67961f9d
MK
1013 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1014 /*
1015 * Like the shared case above, a hole punch or truncate
1016 * could have been performed on the private mapping.
1017 * Examine the value of chg to determine if reserves
1018 * actually exist or were previously consumed.
1019 * Very Subtle - The value of chg comes from a previous
1020 * call to vma_needs_reserves(). The reserve map for
1021 * private mappings has different (opposite) semantics
1022 * than that of shared mappings. vma_needs_reserves()
1023 * has already taken this difference in semantics into
1024 * account. Therefore, the meaning of chg is the same
1025 * as in the shared case above. Code could easily be
1026 * combined, but keeping it separate draws attention to
1027 * subtle differences.
1028 */
1029 if (chg)
1030 return false;
1031 else
1032 return true;
1033 }
a63884e9 1034
559ec2f8 1035 return false;
a1e78772
MG
1036}
1037
a5516438 1038static void enqueue_huge_page(struct hstate *h, struct page *page)
1da177e4
LT
1039{
1040 int nid = page_to_nid(page);
0edaecfa 1041 list_move(&page->lru, &h->hugepage_freelists[nid]);
a5516438
AK
1042 h->free_huge_pages++;
1043 h->free_huge_pages_node[nid]++;
6c037149 1044 SetHPageFreed(page);
1da177e4
LT
1045}
1046
94310cbc 1047static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
bf50bab2
NH
1048{
1049 struct page *page;
bbe88753
JK
1050 bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA);
1051
1052 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1053 if (nocma && is_migrate_cma_page(page))
1054 continue;
bf50bab2 1055
6664bfc8
WY
1056 if (PageHWPoison(page))
1057 continue;
1058
1059 list_move(&page->lru, &h->hugepage_activelist);
1060 set_page_refcounted(page);
6c037149 1061 ClearHPageFreed(page);
6664bfc8
WY
1062 h->free_huge_pages--;
1063 h->free_huge_pages_node[nid]--;
1064 return page;
bbe88753
JK
1065 }
1066
6664bfc8 1067 return NULL;
bf50bab2
NH
1068}
1069
3e59fcb0
MH
1070static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1071 nodemask_t *nmask)
94310cbc 1072{
3e59fcb0
MH
1073 unsigned int cpuset_mems_cookie;
1074 struct zonelist *zonelist;
1075 struct zone *zone;
1076 struct zoneref *z;
98fa15f3 1077 int node = NUMA_NO_NODE;
94310cbc 1078
3e59fcb0
MH
1079 zonelist = node_zonelist(nid, gfp_mask);
1080
1081retry_cpuset:
1082 cpuset_mems_cookie = read_mems_allowed_begin();
1083 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1084 struct page *page;
1085
1086 if (!cpuset_zone_allowed(zone, gfp_mask))
1087 continue;
1088 /*
1089 * no need to ask again on the same node. Pool is node rather than
1090 * zone aware
1091 */
1092 if (zone_to_nid(zone) == node)
1093 continue;
1094 node = zone_to_nid(zone);
94310cbc 1095
94310cbc
AK
1096 page = dequeue_huge_page_node_exact(h, node);
1097 if (page)
1098 return page;
1099 }
3e59fcb0
MH
1100 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1101 goto retry_cpuset;
1102
94310cbc
AK
1103 return NULL;
1104}
1105
a5516438
AK
1106static struct page *dequeue_huge_page_vma(struct hstate *h,
1107 struct vm_area_struct *vma,
af0ed73e
JK
1108 unsigned long address, int avoid_reserve,
1109 long chg)
1da177e4 1110{
3e59fcb0 1111 struct page *page;
480eccf9 1112 struct mempolicy *mpol;
04ec6264 1113 gfp_t gfp_mask;
3e59fcb0 1114 nodemask_t *nodemask;
04ec6264 1115 int nid;
1da177e4 1116
a1e78772
MG
1117 /*
1118 * A child process with MAP_PRIVATE mappings created by their parent
1119 * have no page reserves. This check ensures that reservations are
1120 * not "stolen". The child may still get SIGKILLed
1121 */
af0ed73e 1122 if (!vma_has_reserves(vma, chg) &&
a5516438 1123 h->free_huge_pages - h->resv_huge_pages == 0)
c0ff7453 1124 goto err;
a1e78772 1125
04f2cbe3 1126 /* If reserves cannot be used, ensure enough pages are in the pool */
a5516438 1127 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
6eab04a8 1128 goto err;
04f2cbe3 1129
04ec6264
VB
1130 gfp_mask = htlb_alloc_mask(h);
1131 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
3e59fcb0
MH
1132 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1133 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
d6995da3 1134 SetHPageRestoreReserve(page);
3e59fcb0 1135 h->resv_huge_pages--;
1da177e4 1136 }
cc9a6c87 1137
52cd3b07 1138 mpol_cond_put(mpol);
1da177e4 1139 return page;
cc9a6c87
MG
1140
1141err:
cc9a6c87 1142 return NULL;
1da177e4
LT
1143}
1144
1cac6f2c
LC
1145/*
1146 * common helper functions for hstate_next_node_to_{alloc|free}.
1147 * We may have allocated or freed a huge page based on a different
1148 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1149 * be outside of *nodes_allowed. Ensure that we use an allowed
1150 * node for alloc or free.
1151 */
1152static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1153{
0edaf86c 1154 nid = next_node_in(nid, *nodes_allowed);
1cac6f2c
LC
1155 VM_BUG_ON(nid >= MAX_NUMNODES);
1156
1157 return nid;
1158}
1159
1160static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1161{
1162 if (!node_isset(nid, *nodes_allowed))
1163 nid = next_node_allowed(nid, nodes_allowed);
1164 return nid;
1165}
1166
1167/*
1168 * returns the previously saved node ["this node"] from which to
1169 * allocate a persistent huge page for the pool and advance the
1170 * next node from which to allocate, handling wrap at end of node
1171 * mask.
1172 */
1173static int hstate_next_node_to_alloc(struct hstate *h,
1174 nodemask_t *nodes_allowed)
1175{
1176 int nid;
1177
1178 VM_BUG_ON(!nodes_allowed);
1179
1180 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1181 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1182
1183 return nid;
1184}
1185
1186/*
1187 * helper for free_pool_huge_page() - return the previously saved
1188 * node ["this node"] from which to free a huge page. Advance the
1189 * next node id whether or not we find a free huge page to free so
1190 * that the next attempt to free addresses the next node.
1191 */
1192static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1193{
1194 int nid;
1195
1196 VM_BUG_ON(!nodes_allowed);
1197
1198 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1199 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1200
1201 return nid;
1202}
1203
1204#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1205 for (nr_nodes = nodes_weight(*mask); \
1206 nr_nodes > 0 && \
1207 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1208 nr_nodes--)
1209
1210#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1211 for (nr_nodes = nodes_weight(*mask); \
1212 nr_nodes > 0 && \
1213 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1214 nr_nodes--)
1215
e1073d1e 1216#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
944d9fec 1217static void destroy_compound_gigantic_page(struct page *page,
d00181b9 1218 unsigned int order)
944d9fec
LC
1219{
1220 int i;
1221 int nr_pages = 1 << order;
1222 struct page *p = page + 1;
1223
c8cc708a 1224 atomic_set(compound_mapcount_ptr(page), 0);
5291c09b 1225 atomic_set(compound_pincount_ptr(page), 0);
47e29d32 1226
944d9fec 1227 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1d798ca3 1228 clear_compound_head(p);
944d9fec 1229 set_page_refcounted(p);
944d9fec
LC
1230 }
1231
1232 set_compound_order(page, 0);
ba9c1201 1233 page[1].compound_nr = 0;
944d9fec
LC
1234 __ClearPageHead(page);
1235}
1236
d00181b9 1237static void free_gigantic_page(struct page *page, unsigned int order)
944d9fec 1238{
cf11e85f
RG
1239 /*
1240 * If the page isn't allocated using the cma allocator,
1241 * cma_release() returns false.
1242 */
dbda8fea
BS
1243#ifdef CONFIG_CMA
1244 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
cf11e85f 1245 return;
dbda8fea 1246#endif
cf11e85f 1247
944d9fec
LC
1248 free_contig_range(page_to_pfn(page), 1 << order);
1249}
1250
4eb0716e 1251#ifdef CONFIG_CONTIG_ALLOC
d9cc948f
MH
1252static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1253 int nid, nodemask_t *nodemask)
944d9fec 1254{
5e27a2df 1255 unsigned long nr_pages = 1UL << huge_page_order(h);
953f064a
LX
1256 if (nid == NUMA_NO_NODE)
1257 nid = numa_mem_id();
944d9fec 1258
dbda8fea
BS
1259#ifdef CONFIG_CMA
1260 {
cf11e85f
RG
1261 struct page *page;
1262 int node;
1263
953f064a
LX
1264 if (hugetlb_cma[nid]) {
1265 page = cma_alloc(hugetlb_cma[nid], nr_pages,
1266 huge_page_order(h), true);
cf11e85f
RG
1267 if (page)
1268 return page;
1269 }
953f064a
LX
1270
1271 if (!(gfp_mask & __GFP_THISNODE)) {
1272 for_each_node_mask(node, *nodemask) {
1273 if (node == nid || !hugetlb_cma[node])
1274 continue;
1275
1276 page = cma_alloc(hugetlb_cma[node], nr_pages,
1277 huge_page_order(h), true);
1278 if (page)
1279 return page;
1280 }
1281 }
cf11e85f 1282 }
dbda8fea 1283#endif
cf11e85f 1284
5e27a2df 1285 return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
944d9fec
LC
1286}
1287
1288static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
d00181b9 1289static void prep_compound_gigantic_page(struct page *page, unsigned int order);
4eb0716e
AG
1290#else /* !CONFIG_CONTIG_ALLOC */
1291static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1292 int nid, nodemask_t *nodemask)
1293{
1294 return NULL;
1295}
1296#endif /* CONFIG_CONTIG_ALLOC */
944d9fec 1297
e1073d1e 1298#else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
d9cc948f 1299static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
4eb0716e
AG
1300 int nid, nodemask_t *nodemask)
1301{
1302 return NULL;
1303}
d00181b9 1304static inline void free_gigantic_page(struct page *page, unsigned int order) { }
944d9fec 1305static inline void destroy_compound_gigantic_page(struct page *page,
d00181b9 1306 unsigned int order) { }
944d9fec
LC
1307#endif
1308
a5516438 1309static void update_and_free_page(struct hstate *h, struct page *page)
6af2acb6
AL
1310{
1311 int i;
dbfee5ae 1312 struct page *subpage = page;
a5516438 1313
4eb0716e 1314 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
944d9fec 1315 return;
18229df5 1316
a5516438
AK
1317 h->nr_huge_pages--;
1318 h->nr_huge_pages_node[page_to_nid(page)]--;
dbfee5ae
MK
1319 for (i = 0; i < pages_per_huge_page(h);
1320 i++, subpage = mem_map_next(subpage, page, i)) {
1321 subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
32f84528 1322 1 << PG_referenced | 1 << PG_dirty |
a7407a27
LC
1323 1 << PG_active | 1 << PG_private |
1324 1 << PG_writeback);
6af2acb6 1325 }
309381fe 1326 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1adc4d41 1327 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
f1e61557 1328 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
6af2acb6 1329 set_page_refcounted(page);
944d9fec 1330 if (hstate_is_gigantic(h)) {
cf11e85f
RG
1331 /*
1332 * Temporarily drop the hugetlb_lock, because
1333 * we might block in free_gigantic_page().
1334 */
1335 spin_unlock(&hugetlb_lock);
944d9fec
LC
1336 destroy_compound_gigantic_page(page, huge_page_order(h));
1337 free_gigantic_page(page, huge_page_order(h));
cf11e85f 1338 spin_lock(&hugetlb_lock);
944d9fec 1339 } else {
944d9fec
LC
1340 __free_pages(page, huge_page_order(h));
1341 }
6af2acb6
AL
1342}
1343
e5ff2159
AK
1344struct hstate *size_to_hstate(unsigned long size)
1345{
1346 struct hstate *h;
1347
1348 for_each_hstate(h) {
1349 if (huge_page_size(h) == size)
1350 return h;
1351 }
1352 return NULL;
1353}
1354
c77c0a8a 1355static void __free_huge_page(struct page *page)
27a85ef1 1356{
a5516438
AK
1357 /*
1358 * Can't pass hstate in here because it is called from the
1359 * compound page destructor.
1360 */
e5ff2159 1361 struct hstate *h = page_hstate(page);
7893d1d5 1362 int nid = page_to_nid(page);
d6995da3 1363 struct hugepage_subpool *spool = hugetlb_page_subpool(page);
07443a85 1364 bool restore_reserve;
27a85ef1 1365
b4330afb
MK
1366 VM_BUG_ON_PAGE(page_count(page), page);
1367 VM_BUG_ON_PAGE(page_mapcount(page), page);
8ace22bc 1368
d6995da3 1369 hugetlb_set_page_subpool(page, NULL);
8ace22bc 1370 page->mapping = NULL;
d6995da3
MK
1371 restore_reserve = HPageRestoreReserve(page);
1372 ClearHPageRestoreReserve(page);
27a85ef1 1373
1c5ecae3 1374 /*
d6995da3 1375 * If HPageRestoreReserve was set on page, page allocation consumed a
0919e1b6
MK
1376 * reservation. If the page was associated with a subpool, there
1377 * would have been a page reserved in the subpool before allocation
1378 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
6c26d310 1379 * reservation, do not call hugepage_subpool_put_pages() as this will
0919e1b6 1380 * remove the reserved page from the subpool.
1c5ecae3 1381 */
0919e1b6
MK
1382 if (!restore_reserve) {
1383 /*
1384 * A return code of zero implies that the subpool will be
1385 * under its minimum size if the reservation is not restored
1386 * after page is free. Therefore, force restore_reserve
1387 * operation.
1388 */
1389 if (hugepage_subpool_put_pages(spool, 1) == 0)
1390 restore_reserve = true;
1391 }
1c5ecae3 1392
27a85ef1 1393 spin_lock(&hugetlb_lock);
8f251a3d 1394 ClearHPageMigratable(page);
6d76dcf4
AK
1395 hugetlb_cgroup_uncharge_page(hstate_index(h),
1396 pages_per_huge_page(h), page);
08cf9faf
MA
1397 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1398 pages_per_huge_page(h), page);
07443a85
JK
1399 if (restore_reserve)
1400 h->resv_huge_pages++;
1401
9157c311 1402 if (HPageTemporary(page)) {
ab5ac90a 1403 list_del(&page->lru);
9157c311 1404 ClearHPageTemporary(page);
ab5ac90a
MH
1405 update_and_free_page(h, page);
1406 } else if (h->surplus_huge_pages_node[nid]) {
0edaecfa
AK
1407 /* remove the page from active list */
1408 list_del(&page->lru);
a5516438
AK
1409 update_and_free_page(h, page);
1410 h->surplus_huge_pages--;
1411 h->surplus_huge_pages_node[nid]--;
7893d1d5 1412 } else {
5d3a551c 1413 arch_clear_hugepage_flags(page);
a5516438 1414 enqueue_huge_page(h, page);
7893d1d5 1415 }
27a85ef1
DG
1416 spin_unlock(&hugetlb_lock);
1417}
1418
c77c0a8a
WL
1419/*
1420 * As free_huge_page() can be called from a non-task context, we have
1421 * to defer the actual freeing in a workqueue to prevent potential
1422 * hugetlb_lock deadlock.
1423 *
1424 * free_hpage_workfn() locklessly retrieves the linked list of pages to
1425 * be freed and frees them one-by-one. As the page->mapping pointer is
1426 * going to be cleared in __free_huge_page() anyway, it is reused as the
1427 * llist_node structure of a lockless linked list of huge pages to be freed.
1428 */
1429static LLIST_HEAD(hpage_freelist);
1430
1431static void free_hpage_workfn(struct work_struct *work)
1432{
1433 struct llist_node *node;
1434 struct page *page;
1435
1436 node = llist_del_all(&hpage_freelist);
1437
1438 while (node) {
1439 page = container_of((struct address_space **)node,
1440 struct page, mapping);
1441 node = node->next;
1442 __free_huge_page(page);
1443 }
1444}
1445static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1446
1447void free_huge_page(struct page *page)
1448{
1449 /*
1450 * Defer freeing if in non-task context to avoid hugetlb_lock deadlock.
1451 */
1452 if (!in_task()) {
1453 /*
1454 * Only call schedule_work() if hpage_freelist is previously
1455 * empty. Otherwise, schedule_work() had been called but the
1456 * workfn hasn't retrieved the list yet.
1457 */
1458 if (llist_add((struct llist_node *)&page->mapping,
1459 &hpage_freelist))
1460 schedule_work(&free_hpage_work);
1461 return;
1462 }
1463
1464 __free_huge_page(page);
1465}
1466
a5516438 1467static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
b7ba30c6 1468{
0edaecfa 1469 INIT_LIST_HEAD(&page->lru);
f1e61557 1470 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
ff546117 1471 hugetlb_set_page_subpool(page, NULL);
9dd540e2 1472 set_hugetlb_cgroup(page, NULL);
1adc4d41 1473 set_hugetlb_cgroup_rsvd(page, NULL);
2f37511c 1474 spin_lock(&hugetlb_lock);
a5516438
AK
1475 h->nr_huge_pages++;
1476 h->nr_huge_pages_node[nid]++;
6c037149 1477 ClearHPageFreed(page);
b7ba30c6 1478 spin_unlock(&hugetlb_lock);
b7ba30c6
AK
1479}
1480
d00181b9 1481static void prep_compound_gigantic_page(struct page *page, unsigned int order)
20a0307c
WF
1482{
1483 int i;
1484 int nr_pages = 1 << order;
1485 struct page *p = page + 1;
1486
1487 /* we rely on prep_new_huge_page to set the destructor */
1488 set_compound_order(page, order);
ef5a22be 1489 __ClearPageReserved(page);
de09d31d 1490 __SetPageHead(page);
20a0307c 1491 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
ef5a22be
AA
1492 /*
1493 * For gigantic hugepages allocated through bootmem at
1494 * boot, it's safer to be consistent with the not-gigantic
1495 * hugepages and clear the PG_reserved bit from all tail pages
7c8de358 1496 * too. Otherwise drivers using get_user_pages() to access tail
ef5a22be
AA
1497 * pages may get the reference counting wrong if they see
1498 * PG_reserved set on a tail page (despite the head page not
1499 * having PG_reserved set). Enforcing this consistency between
1500 * head and tail pages allows drivers to optimize away a check
1501 * on the head page when they need know if put_page() is needed
1502 * after get_user_pages().
1503 */
1504 __ClearPageReserved(p);
58a84aa9 1505 set_page_count(p, 0);
1d798ca3 1506 set_compound_head(p, page);
20a0307c 1507 }
b4330afb 1508 atomic_set(compound_mapcount_ptr(page), -1);
5291c09b 1509 atomic_set(compound_pincount_ptr(page), 0);
20a0307c
WF
1510}
1511
7795912c
AM
1512/*
1513 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1514 * transparent huge pages. See the PageTransHuge() documentation for more
1515 * details.
1516 */
20a0307c
WF
1517int PageHuge(struct page *page)
1518{
20a0307c
WF
1519 if (!PageCompound(page))
1520 return 0;
1521
1522 page = compound_head(page);
f1e61557 1523 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
20a0307c 1524}
43131e14
NH
1525EXPORT_SYMBOL_GPL(PageHuge);
1526
27c73ae7
AA
1527/*
1528 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1529 * normal or transparent huge pages.
1530 */
1531int PageHeadHuge(struct page *page_head)
1532{
27c73ae7
AA
1533 if (!PageHead(page_head))
1534 return 0;
1535
d4af73e3 1536 return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
27c73ae7 1537}
27c73ae7 1538
c0d0381a
MK
1539/*
1540 * Find and lock address space (mapping) in write mode.
1541 *
336bf30e
MK
1542 * Upon entry, the page is locked which means that page_mapping() is
1543 * stable. Due to locking order, we can only trylock_write. If we can
1544 * not get the lock, simply return NULL to caller.
c0d0381a
MK
1545 */
1546struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1547{
336bf30e 1548 struct address_space *mapping = page_mapping(hpage);
c0d0381a 1549
c0d0381a
MK
1550 if (!mapping)
1551 return mapping;
1552
c0d0381a
MK
1553 if (i_mmap_trylock_write(mapping))
1554 return mapping;
1555
336bf30e 1556 return NULL;
c0d0381a
MK
1557}
1558
13d60f4b
ZY
1559pgoff_t __basepage_index(struct page *page)
1560{
1561 struct page *page_head = compound_head(page);
1562 pgoff_t index = page_index(page_head);
1563 unsigned long compound_idx;
1564
1565 if (!PageHuge(page_head))
1566 return page_index(page);
1567
1568 if (compound_order(page_head) >= MAX_ORDER)
1569 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1570 else
1571 compound_idx = page - page_head;
1572
1573 return (index << compound_order(page_head)) + compound_idx;
1574}
1575
0c397dae 1576static struct page *alloc_buddy_huge_page(struct hstate *h,
f60858f9
MK
1577 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1578 nodemask_t *node_alloc_noretry)
1da177e4 1579{
af0fb9df 1580 int order = huge_page_order(h);
1da177e4 1581 struct page *page;
f60858f9 1582 bool alloc_try_hard = true;
f96efd58 1583
f60858f9
MK
1584 /*
1585 * By default we always try hard to allocate the page with
1586 * __GFP_RETRY_MAYFAIL flag. However, if we are allocating pages in
1587 * a loop (to adjust global huge page counts) and previous allocation
1588 * failed, do not continue to try hard on the same node. Use the
1589 * node_alloc_noretry bitmap to manage this state information.
1590 */
1591 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1592 alloc_try_hard = false;
1593 gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1594 if (alloc_try_hard)
1595 gfp_mask |= __GFP_RETRY_MAYFAIL;
af0fb9df
MH
1596 if (nid == NUMA_NO_NODE)
1597 nid = numa_mem_id();
1598 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1599 if (page)
1600 __count_vm_event(HTLB_BUDDY_PGALLOC);
1601 else
1602 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
63b4613c 1603
f60858f9
MK
1604 /*
1605 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1606 * indicates an overall state change. Clear bit so that we resume
1607 * normal 'try hard' allocations.
1608 */
1609 if (node_alloc_noretry && page && !alloc_try_hard)
1610 node_clear(nid, *node_alloc_noretry);
1611
1612 /*
1613 * If we tried hard to get a page but failed, set bit so that
1614 * subsequent attempts will not try as hard until there is an
1615 * overall state change.
1616 */
1617 if (node_alloc_noretry && !page && alloc_try_hard)
1618 node_set(nid, *node_alloc_noretry);
1619
63b4613c
NA
1620 return page;
1621}
1622
0c397dae
MH
1623/*
1624 * Common helper to allocate a fresh hugetlb page. All specific allocators
1625 * should use this function to get new hugetlb pages
1626 */
1627static struct page *alloc_fresh_huge_page(struct hstate *h,
f60858f9
MK
1628 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1629 nodemask_t *node_alloc_noretry)
0c397dae
MH
1630{
1631 struct page *page;
1632
1633 if (hstate_is_gigantic(h))
1634 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1635 else
1636 page = alloc_buddy_huge_page(h, gfp_mask,
f60858f9 1637 nid, nmask, node_alloc_noretry);
0c397dae
MH
1638 if (!page)
1639 return NULL;
1640
1641 if (hstate_is_gigantic(h))
1642 prep_compound_gigantic_page(page, huge_page_order(h));
1643 prep_new_huge_page(h, page, page_to_nid(page));
1644
1645 return page;
1646}
1647
af0fb9df
MH
1648/*
1649 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1650 * manner.
1651 */
f60858f9
MK
1652static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1653 nodemask_t *node_alloc_noretry)
b2261026
JK
1654{
1655 struct page *page;
1656 int nr_nodes, node;
af0fb9df 1657 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
b2261026
JK
1658
1659 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
f60858f9
MK
1660 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1661 node_alloc_noretry);
af0fb9df 1662 if (page)
b2261026 1663 break;
b2261026
JK
1664 }
1665
af0fb9df
MH
1666 if (!page)
1667 return 0;
b2261026 1668
af0fb9df
MH
1669 put_page(page); /* free it into the hugepage allocator */
1670
1671 return 1;
b2261026
JK
1672}
1673
e8c5c824
LS
1674/*
1675 * Free huge page from pool from next node to free.
1676 * Attempt to keep persistent huge pages more or less
1677 * balanced over allowed nodes.
1678 * Called with hugetlb_lock locked.
1679 */
6ae11b27
LS
1680static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1681 bool acct_surplus)
e8c5c824 1682{
b2261026 1683 int nr_nodes, node;
e8c5c824
LS
1684 int ret = 0;
1685
b2261026 1686 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
685f3457
LS
1687 /*
1688 * If we're returning unused surplus pages, only examine
1689 * nodes with surplus pages.
1690 */
b2261026
JK
1691 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1692 !list_empty(&h->hugepage_freelists[node])) {
e8c5c824 1693 struct page *page =
b2261026 1694 list_entry(h->hugepage_freelists[node].next,
e8c5c824
LS
1695 struct page, lru);
1696 list_del(&page->lru);
1697 h->free_huge_pages--;
b2261026 1698 h->free_huge_pages_node[node]--;
685f3457
LS
1699 if (acct_surplus) {
1700 h->surplus_huge_pages--;
b2261026 1701 h->surplus_huge_pages_node[node]--;
685f3457 1702 }
e8c5c824
LS
1703 update_and_free_page(h, page);
1704 ret = 1;
9a76db09 1705 break;
e8c5c824 1706 }
b2261026 1707 }
e8c5c824
LS
1708
1709 return ret;
1710}
1711
c8721bbb
NH
1712/*
1713 * Dissolve a given free hugepage into free buddy pages. This function does
faf53def
NH
1714 * nothing for in-use hugepages and non-hugepages.
1715 * This function returns values like below:
1716 *
1717 * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1718 * (allocated or reserved.)
1719 * 0: successfully dissolved free hugepages or the page is not a
1720 * hugepage (considered as already dissolved)
c8721bbb 1721 */
c3114a84 1722int dissolve_free_huge_page(struct page *page)
c8721bbb 1723{
6bc9b564 1724 int rc = -EBUSY;
082d5b6b 1725
7ffddd49 1726retry:
faf53def
NH
1727 /* Not to disrupt normal path by vainly holding hugetlb_lock */
1728 if (!PageHuge(page))
1729 return 0;
1730
c8721bbb 1731 spin_lock(&hugetlb_lock);
faf53def
NH
1732 if (!PageHuge(page)) {
1733 rc = 0;
1734 goto out;
1735 }
1736
1737 if (!page_count(page)) {
2247bb33
GS
1738 struct page *head = compound_head(page);
1739 struct hstate *h = page_hstate(head);
1740 int nid = page_to_nid(head);
6bc9b564 1741 if (h->free_huge_pages - h->resv_huge_pages == 0)
082d5b6b 1742 goto out;
7ffddd49
MS
1743
1744 /*
1745 * We should make sure that the page is already on the free list
1746 * when it is dissolved.
1747 */
6c037149 1748 if (unlikely(!HPageFreed(head))) {
7ffddd49
MS
1749 spin_unlock(&hugetlb_lock);
1750 cond_resched();
1751
1752 /*
1753 * Theoretically, we should return -EBUSY when we
1754 * encounter this race. In fact, we have a chance
1755 * to successfully dissolve the page if we do a
1756 * retry. Because the race window is quite small.
1757 * If we seize this opportunity, it is an optimization
1758 * for increasing the success rate of dissolving page.
1759 */
1760 goto retry;
1761 }
1762
c3114a84
AK
1763 /*
1764 * Move PageHWPoison flag from head page to the raw error page,
1765 * which makes any subpages rather than the error page reusable.
1766 */
1767 if (PageHWPoison(head) && page != head) {
1768 SetPageHWPoison(page);
1769 ClearPageHWPoison(head);
1770 }
2247bb33 1771 list_del(&head->lru);
c8721bbb
NH
1772 h->free_huge_pages--;
1773 h->free_huge_pages_node[nid]--;
c1470b33 1774 h->max_huge_pages--;
2247bb33 1775 update_and_free_page(h, head);
6bc9b564 1776 rc = 0;
c8721bbb 1777 }
082d5b6b 1778out:
c8721bbb 1779 spin_unlock(&hugetlb_lock);
082d5b6b 1780 return rc;
c8721bbb
NH
1781}
1782
1783/*
1784 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1785 * make specified memory blocks removable from the system.
2247bb33
GS
1786 * Note that this will dissolve a free gigantic hugepage completely, if any
1787 * part of it lies within the given range.
082d5b6b
GS
1788 * Also note that if dissolve_free_huge_page() returns with an error, all
1789 * free hugepages that were dissolved before that error are lost.
c8721bbb 1790 */
082d5b6b 1791int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
c8721bbb 1792{
c8721bbb 1793 unsigned long pfn;
eb03aa00 1794 struct page *page;
082d5b6b 1795 int rc = 0;
c8721bbb 1796
d0177639 1797 if (!hugepages_supported())
082d5b6b 1798 return rc;
d0177639 1799
eb03aa00
GS
1800 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1801 page = pfn_to_page(pfn);
faf53def
NH
1802 rc = dissolve_free_huge_page(page);
1803 if (rc)
1804 break;
eb03aa00 1805 }
082d5b6b
GS
1806
1807 return rc;
c8721bbb
NH
1808}
1809
ab5ac90a
MH
1810/*
1811 * Allocates a fresh surplus page from the page allocator.
1812 */
0c397dae 1813static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
aaf14e40 1814 int nid, nodemask_t *nmask)
7893d1d5 1815{
9980d744 1816 struct page *page = NULL;
7893d1d5 1817
bae7f4ae 1818 if (hstate_is_gigantic(h))
aa888a74
AK
1819 return NULL;
1820
d1c3fb1f 1821 spin_lock(&hugetlb_lock);
9980d744
MH
1822 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1823 goto out_unlock;
d1c3fb1f
NA
1824 spin_unlock(&hugetlb_lock);
1825
f60858f9 1826 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
9980d744 1827 if (!page)
0c397dae 1828 return NULL;
d1c3fb1f
NA
1829
1830 spin_lock(&hugetlb_lock);
9980d744
MH
1831 /*
1832 * We could have raced with the pool size change.
1833 * Double check that and simply deallocate the new page
1834 * if we would end up overcommiting the surpluses. Abuse
1835 * temporary page to workaround the nasty free_huge_page
1836 * codeflow
1837 */
1838 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
9157c311 1839 SetHPageTemporary(page);
2bf753e6 1840 spin_unlock(&hugetlb_lock);
9980d744 1841 put_page(page);
2bf753e6 1842 return NULL;
9980d744 1843 } else {
9980d744 1844 h->surplus_huge_pages++;
4704dea3 1845 h->surplus_huge_pages_node[page_to_nid(page)]++;
7893d1d5 1846 }
9980d744
MH
1847
1848out_unlock:
d1c3fb1f 1849 spin_unlock(&hugetlb_lock);
7893d1d5
AL
1850
1851 return page;
1852}
1853
bbe88753 1854static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
9a4e9f3b 1855 int nid, nodemask_t *nmask)
ab5ac90a
MH
1856{
1857 struct page *page;
1858
1859 if (hstate_is_gigantic(h))
1860 return NULL;
1861
f60858f9 1862 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
ab5ac90a
MH
1863 if (!page)
1864 return NULL;
1865
1866 /*
1867 * We do not account these pages as surplus because they are only
1868 * temporary and will be released properly on the last reference
1869 */
9157c311 1870 SetHPageTemporary(page);
ab5ac90a
MH
1871
1872 return page;
1873}
1874
099730d6
DH
1875/*
1876 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1877 */
e0ec90ee 1878static
0c397dae 1879struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
099730d6
DH
1880 struct vm_area_struct *vma, unsigned long addr)
1881{
aaf14e40
MH
1882 struct page *page;
1883 struct mempolicy *mpol;
1884 gfp_t gfp_mask = htlb_alloc_mask(h);
1885 int nid;
1886 nodemask_t *nodemask;
1887
1888 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
0c397dae 1889 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
aaf14e40
MH
1890 mpol_cond_put(mpol);
1891
1892 return page;
099730d6
DH
1893}
1894
ab5ac90a 1895/* page migration callback function */
3e59fcb0 1896struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
d92bbc27 1897 nodemask_t *nmask, gfp_t gfp_mask)
4db9b2ef 1898{
4db9b2ef
MH
1899 spin_lock(&hugetlb_lock);
1900 if (h->free_huge_pages - h->resv_huge_pages > 0) {
3e59fcb0
MH
1901 struct page *page;
1902
1903 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1904 if (page) {
1905 spin_unlock(&hugetlb_lock);
1906 return page;
4db9b2ef
MH
1907 }
1908 }
1909 spin_unlock(&hugetlb_lock);
4db9b2ef 1910
0c397dae 1911 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
4db9b2ef
MH
1912}
1913
ebd63723 1914/* mempolicy aware migration callback */
389c8178
MH
1915struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1916 unsigned long address)
ebd63723
MH
1917{
1918 struct mempolicy *mpol;
1919 nodemask_t *nodemask;
1920 struct page *page;
ebd63723
MH
1921 gfp_t gfp_mask;
1922 int node;
1923
ebd63723
MH
1924 gfp_mask = htlb_alloc_mask(h);
1925 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
d92bbc27 1926 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
ebd63723
MH
1927 mpol_cond_put(mpol);
1928
1929 return page;
1930}
1931
e4e574b7 1932/*
25985edc 1933 * Increase the hugetlb pool such that it can accommodate a reservation
e4e574b7
AL
1934 * of size 'delta'.
1935 */
0a4f3d1b 1936static int gather_surplus_pages(struct hstate *h, long delta)
1b2a1e7b 1937 __must_hold(&hugetlb_lock)
e4e574b7
AL
1938{
1939 struct list_head surplus_list;
1940 struct page *page, *tmp;
0a4f3d1b
LX
1941 int ret;
1942 long i;
1943 long needed, allocated;
28073b02 1944 bool alloc_ok = true;
e4e574b7 1945
a5516438 1946 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
ac09b3a1 1947 if (needed <= 0) {
a5516438 1948 h->resv_huge_pages += delta;
e4e574b7 1949 return 0;
ac09b3a1 1950 }
e4e574b7
AL
1951
1952 allocated = 0;
1953 INIT_LIST_HEAD(&surplus_list);
1954
1955 ret = -ENOMEM;
1956retry:
1957 spin_unlock(&hugetlb_lock);
1958 for (i = 0; i < needed; i++) {
0c397dae 1959 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
aaf14e40 1960 NUMA_NO_NODE, NULL);
28073b02
HD
1961 if (!page) {
1962 alloc_ok = false;
1963 break;
1964 }
e4e574b7 1965 list_add(&page->lru, &surplus_list);
69ed779a 1966 cond_resched();
e4e574b7 1967 }
28073b02 1968 allocated += i;
e4e574b7
AL
1969
1970 /*
1971 * After retaking hugetlb_lock, we need to recalculate 'needed'
1972 * because either resv_huge_pages or free_huge_pages may have changed.
1973 */
1974 spin_lock(&hugetlb_lock);
a5516438
AK
1975 needed = (h->resv_huge_pages + delta) -
1976 (h->free_huge_pages + allocated);
28073b02
HD
1977 if (needed > 0) {
1978 if (alloc_ok)
1979 goto retry;
1980 /*
1981 * We were not able to allocate enough pages to
1982 * satisfy the entire reservation so we free what
1983 * we've allocated so far.
1984 */
1985 goto free;
1986 }
e4e574b7
AL
1987 /*
1988 * The surplus_list now contains _at_least_ the number of extra pages
25985edc 1989 * needed to accommodate the reservation. Add the appropriate number
e4e574b7 1990 * of pages to the hugetlb pool and free the extras back to the buddy
ac09b3a1
AL
1991 * allocator. Commit the entire reservation here to prevent another
1992 * process from stealing the pages as they are added to the pool but
1993 * before they are reserved.
e4e574b7
AL
1994 */
1995 needed += allocated;
a5516438 1996 h->resv_huge_pages += delta;
e4e574b7 1997 ret = 0;
a9869b83 1998
19fc3f0a 1999 /* Free the needed pages to the hugetlb pool */
e4e574b7 2000 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
e558464b
MS
2001 int zeroed;
2002
19fc3f0a
AL
2003 if ((--needed) < 0)
2004 break;
a9869b83
NH
2005 /*
2006 * This page is now managed by the hugetlb allocator and has
2007 * no users -- drop the buddy allocator's reference.
2008 */
e558464b
MS
2009 zeroed = put_page_testzero(page);
2010 VM_BUG_ON_PAGE(!zeroed, page);
a5516438 2011 enqueue_huge_page(h, page);
19fc3f0a 2012 }
28073b02 2013free:
b0365c8d 2014 spin_unlock(&hugetlb_lock);
19fc3f0a
AL
2015
2016 /* Free unnecessary surplus pages to the buddy allocator */
c0d934ba
JK
2017 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2018 put_page(page);
a9869b83 2019 spin_lock(&hugetlb_lock);
e4e574b7
AL
2020
2021 return ret;
2022}
2023
2024/*
e5bbc8a6
MK
2025 * This routine has two main purposes:
2026 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2027 * in unused_resv_pages. This corresponds to the prior adjustments made
2028 * to the associated reservation map.
2029 * 2) Free any unused surplus pages that may have been allocated to satisfy
2030 * the reservation. As many as unused_resv_pages may be freed.
2031 *
2032 * Called with hugetlb_lock held. However, the lock could be dropped (and
2033 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
2034 * we must make sure nobody else can claim pages we are in the process of
2035 * freeing. Do this by ensuring resv_huge_page always is greater than the
2036 * number of huge pages we plan to free when dropping the lock.
e4e574b7 2037 */
a5516438
AK
2038static void return_unused_surplus_pages(struct hstate *h,
2039 unsigned long unused_resv_pages)
e4e574b7 2040{
e4e574b7
AL
2041 unsigned long nr_pages;
2042
aa888a74 2043 /* Cannot return gigantic pages currently */
bae7f4ae 2044 if (hstate_is_gigantic(h))
e5bbc8a6 2045 goto out;
aa888a74 2046
e5bbc8a6
MK
2047 /*
2048 * Part (or even all) of the reservation could have been backed
2049 * by pre-allocated pages. Only free surplus pages.
2050 */
a5516438 2051 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
e4e574b7 2052
685f3457
LS
2053 /*
2054 * We want to release as many surplus pages as possible, spread
9b5e5d0f
LS
2055 * evenly across all nodes with memory. Iterate across these nodes
2056 * until we can no longer free unreserved surplus pages. This occurs
2057 * when the nodes with surplus pages have no free pages.
9e7ee400 2058 * free_pool_huge_page() will balance the freed pages across the
9b5e5d0f 2059 * on-line nodes with memory and will handle the hstate accounting.
e5bbc8a6
MK
2060 *
2061 * Note that we decrement resv_huge_pages as we free the pages. If
2062 * we drop the lock, resv_huge_pages will still be sufficiently large
2063 * to cover subsequent pages we may free.
685f3457
LS
2064 */
2065 while (nr_pages--) {
e5bbc8a6
MK
2066 h->resv_huge_pages--;
2067 unused_resv_pages--;
8cebfcd0 2068 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
e5bbc8a6 2069 goto out;
7848a4bf 2070 cond_resched_lock(&hugetlb_lock);
e4e574b7 2071 }
e5bbc8a6
MK
2072
2073out:
2074 /* Fully uncommit the reservation */
2075 h->resv_huge_pages -= unused_resv_pages;
e4e574b7
AL
2076}
2077
5e911373 2078
c37f9fb1 2079/*
feba16e2 2080 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
5e911373 2081 * are used by the huge page allocation routines to manage reservations.
cf3ad20b
MK
2082 *
2083 * vma_needs_reservation is called to determine if the huge page at addr
2084 * within the vma has an associated reservation. If a reservation is
2085 * needed, the value 1 is returned. The caller is then responsible for
2086 * managing the global reservation and subpool usage counts. After
2087 * the huge page has been allocated, vma_commit_reservation is called
feba16e2
MK
2088 * to add the page to the reservation map. If the page allocation fails,
2089 * the reservation must be ended instead of committed. vma_end_reservation
2090 * is called in such cases.
cf3ad20b
MK
2091 *
2092 * In the normal case, vma_commit_reservation returns the same value
2093 * as the preceding vma_needs_reservation call. The only time this
2094 * is not the case is if a reserve map was changed between calls. It
2095 * is the responsibility of the caller to notice the difference and
2096 * take appropriate action.
96b96a96
MK
2097 *
2098 * vma_add_reservation is used in error paths where a reservation must
2099 * be restored when a newly allocated huge page must be freed. It is
2100 * to be called after calling vma_needs_reservation to determine if a
2101 * reservation exists.
c37f9fb1 2102 */
5e911373
MK
2103enum vma_resv_mode {
2104 VMA_NEEDS_RESV,
2105 VMA_COMMIT_RESV,
feba16e2 2106 VMA_END_RESV,
96b96a96 2107 VMA_ADD_RESV,
5e911373 2108};
cf3ad20b
MK
2109static long __vma_reservation_common(struct hstate *h,
2110 struct vm_area_struct *vma, unsigned long addr,
5e911373 2111 enum vma_resv_mode mode)
c37f9fb1 2112{
4e35f483
JK
2113 struct resv_map *resv;
2114 pgoff_t idx;
cf3ad20b 2115 long ret;
0db9d74e 2116 long dummy_out_regions_needed;
c37f9fb1 2117
4e35f483
JK
2118 resv = vma_resv_map(vma);
2119 if (!resv)
84afd99b 2120 return 1;
c37f9fb1 2121
4e35f483 2122 idx = vma_hugecache_offset(h, vma, addr);
5e911373
MK
2123 switch (mode) {
2124 case VMA_NEEDS_RESV:
0db9d74e
MA
2125 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2126 /* We assume that vma_reservation_* routines always operate on
2127 * 1 page, and that adding to resv map a 1 page entry can only
2128 * ever require 1 region.
2129 */
2130 VM_BUG_ON(dummy_out_regions_needed != 1);
5e911373
MK
2131 break;
2132 case VMA_COMMIT_RESV:
075a61d0 2133 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
0db9d74e
MA
2134 /* region_add calls of range 1 should never fail. */
2135 VM_BUG_ON(ret < 0);
5e911373 2136 break;
feba16e2 2137 case VMA_END_RESV:
0db9d74e 2138 region_abort(resv, idx, idx + 1, 1);
5e911373
MK
2139 ret = 0;
2140 break;
96b96a96 2141 case VMA_ADD_RESV:
0db9d74e 2142 if (vma->vm_flags & VM_MAYSHARE) {
075a61d0 2143 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
0db9d74e
MA
2144 /* region_add calls of range 1 should never fail. */
2145 VM_BUG_ON(ret < 0);
2146 } else {
2147 region_abort(resv, idx, idx + 1, 1);
96b96a96
MK
2148 ret = region_del(resv, idx, idx + 1);
2149 }
2150 break;
5e911373
MK
2151 default:
2152 BUG();
2153 }
84afd99b 2154
4e35f483 2155 if (vma->vm_flags & VM_MAYSHARE)
cf3ad20b 2156 return ret;
67961f9d
MK
2157 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
2158 /*
2159 * In most cases, reserves always exist for private mappings.
2160 * However, a file associated with mapping could have been
2161 * hole punched or truncated after reserves were consumed.
2162 * As subsequent fault on such a range will not use reserves.
2163 * Subtle - The reserve map for private mappings has the
2164 * opposite meaning than that of shared mappings. If NO
2165 * entry is in the reserve map, it means a reservation exists.
2166 * If an entry exists in the reserve map, it means the
2167 * reservation has already been consumed. As a result, the
2168 * return value of this routine is the opposite of the
2169 * value returned from reserve map manipulation routines above.
2170 */
2171 if (ret)
2172 return 0;
2173 else
2174 return 1;
2175 }
4e35f483 2176 else
cf3ad20b 2177 return ret < 0 ? ret : 0;
c37f9fb1 2178}
cf3ad20b
MK
2179
2180static long vma_needs_reservation(struct hstate *h,
a5516438 2181 struct vm_area_struct *vma, unsigned long addr)
c37f9fb1 2182{
5e911373 2183 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
cf3ad20b 2184}
84afd99b 2185
cf3ad20b
MK
2186static long vma_commit_reservation(struct hstate *h,
2187 struct vm_area_struct *vma, unsigned long addr)
2188{
5e911373
MK
2189 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2190}
2191
feba16e2 2192static void vma_end_reservation(struct hstate *h,
5e911373
MK
2193 struct vm_area_struct *vma, unsigned long addr)
2194{
feba16e2 2195 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
c37f9fb1
AW
2196}
2197
96b96a96
MK
2198static long vma_add_reservation(struct hstate *h,
2199 struct vm_area_struct *vma, unsigned long addr)
2200{
2201 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2202}
2203
2204/*
2205 * This routine is called to restore a reservation on error paths. In the
2206 * specific error paths, a huge page was allocated (via alloc_huge_page)
2207 * and is about to be freed. If a reservation for the page existed,
d6995da3
MK
2208 * alloc_huge_page would have consumed the reservation and set
2209 * HPageRestoreReserve in the newly allocated page. When the page is freed
2210 * via free_huge_page, the global reservation count will be incremented if
2211 * HPageRestoreReserve is set. However, free_huge_page can not adjust the
2212 * reserve map. Adjust the reserve map here to be consistent with global
2213 * reserve count adjustments to be made by free_huge_page.
96b96a96
MK
2214 */
2215static void restore_reserve_on_error(struct hstate *h,
2216 struct vm_area_struct *vma, unsigned long address,
2217 struct page *page)
2218{
d6995da3 2219 if (unlikely(HPageRestoreReserve(page))) {
96b96a96
MK
2220 long rc = vma_needs_reservation(h, vma, address);
2221
2222 if (unlikely(rc < 0)) {
2223 /*
2224 * Rare out of memory condition in reserve map
d6995da3 2225 * manipulation. Clear HPageRestoreReserve so that
96b96a96
MK
2226 * global reserve count will not be incremented
2227 * by free_huge_page. This will make it appear
2228 * as though the reservation for this page was
2229 * consumed. This may prevent the task from
2230 * faulting in the page at a later time. This
2231 * is better than inconsistent global huge page
2232 * accounting of reserve counts.
2233 */
d6995da3 2234 ClearHPageRestoreReserve(page);
96b96a96
MK
2235 } else if (rc) {
2236 rc = vma_add_reservation(h, vma, address);
2237 if (unlikely(rc < 0))
2238 /*
2239 * See above comment about rare out of
2240 * memory condition.
2241 */
d6995da3 2242 ClearHPageRestoreReserve(page);
96b96a96
MK
2243 } else
2244 vma_end_reservation(h, vma, address);
2245 }
2246}
2247
70c3547e 2248struct page *alloc_huge_page(struct vm_area_struct *vma,
04f2cbe3 2249 unsigned long addr, int avoid_reserve)
1da177e4 2250{
90481622 2251 struct hugepage_subpool *spool = subpool_vma(vma);
a5516438 2252 struct hstate *h = hstate_vma(vma);
348ea204 2253 struct page *page;
d85f69b0
MK
2254 long map_chg, map_commit;
2255 long gbl_chg;
6d76dcf4
AK
2256 int ret, idx;
2257 struct hugetlb_cgroup *h_cg;
08cf9faf 2258 bool deferred_reserve;
a1e78772 2259
6d76dcf4 2260 idx = hstate_index(h);
a1e78772 2261 /*
d85f69b0
MK
2262 * Examine the region/reserve map to determine if the process
2263 * has a reservation for the page to be allocated. A return
2264 * code of zero indicates a reservation exists (no change).
a1e78772 2265 */
d85f69b0
MK
2266 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2267 if (map_chg < 0)
76dcee75 2268 return ERR_PTR(-ENOMEM);
d85f69b0
MK
2269
2270 /*
2271 * Processes that did not create the mapping will have no
2272 * reserves as indicated by the region/reserve map. Check
2273 * that the allocation will not exceed the subpool limit.
2274 * Allocations for MAP_NORESERVE mappings also need to be
2275 * checked against any subpool limit.
2276 */
2277 if (map_chg || avoid_reserve) {
2278 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2279 if (gbl_chg < 0) {
feba16e2 2280 vma_end_reservation(h, vma, addr);
76dcee75 2281 return ERR_PTR(-ENOSPC);
5e911373 2282 }
1da177e4 2283
d85f69b0
MK
2284 /*
2285 * Even though there was no reservation in the region/reserve
2286 * map, there could be reservations associated with the
2287 * subpool that can be used. This would be indicated if the
2288 * return value of hugepage_subpool_get_pages() is zero.
2289 * However, if avoid_reserve is specified we still avoid even
2290 * the subpool reservations.
2291 */
2292 if (avoid_reserve)
2293 gbl_chg = 1;
2294 }
2295
08cf9faf
MA
2296 /* If this allocation is not consuming a reservation, charge it now.
2297 */
2298 deferred_reserve = map_chg || avoid_reserve || !vma_resv_map(vma);
2299 if (deferred_reserve) {
2300 ret = hugetlb_cgroup_charge_cgroup_rsvd(
2301 idx, pages_per_huge_page(h), &h_cg);
2302 if (ret)
2303 goto out_subpool_put;
2304 }
2305
6d76dcf4 2306 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
8f34af6f 2307 if (ret)
08cf9faf 2308 goto out_uncharge_cgroup_reservation;
8f34af6f 2309
1da177e4 2310 spin_lock(&hugetlb_lock);
d85f69b0
MK
2311 /*
2312 * glb_chg is passed to indicate whether or not a page must be taken
2313 * from the global free pool (global change). gbl_chg == 0 indicates
2314 * a reservation exists for the allocation.
2315 */
2316 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
81a6fcae 2317 if (!page) {
94ae8ba7 2318 spin_unlock(&hugetlb_lock);
0c397dae 2319 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
8f34af6f
JZ
2320 if (!page)
2321 goto out_uncharge_cgroup;
a88c7695 2322 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
d6995da3 2323 SetHPageRestoreReserve(page);
a88c7695
NH
2324 h->resv_huge_pages--;
2325 }
79dbb236 2326 spin_lock(&hugetlb_lock);
15a8d68e 2327 list_add(&page->lru, &h->hugepage_activelist);
81a6fcae 2328 /* Fall through */
68842c9b 2329 }
81a6fcae 2330 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
08cf9faf
MA
2331 /* If allocation is not consuming a reservation, also store the
2332 * hugetlb_cgroup pointer on the page.
2333 */
2334 if (deferred_reserve) {
2335 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2336 h_cg, page);
2337 }
2338
81a6fcae 2339 spin_unlock(&hugetlb_lock);
348ea204 2340
d6995da3 2341 hugetlb_set_page_subpool(page, spool);
90d8b7e6 2342
d85f69b0
MK
2343 map_commit = vma_commit_reservation(h, vma, addr);
2344 if (unlikely(map_chg > map_commit)) {
33039678
MK
2345 /*
2346 * The page was added to the reservation map between
2347 * vma_needs_reservation and vma_commit_reservation.
2348 * This indicates a race with hugetlb_reserve_pages.
2349 * Adjust for the subpool count incremented above AND
2350 * in hugetlb_reserve_pages for the same page. Also,
2351 * the reservation count added in hugetlb_reserve_pages
2352 * no longer applies.
2353 */
2354 long rsv_adjust;
2355
2356 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2357 hugetlb_acct_memory(h, -rsv_adjust);
79aa925b
MK
2358 if (deferred_reserve)
2359 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
2360 pages_per_huge_page(h), page);
33039678 2361 }
90d8b7e6 2362 return page;
8f34af6f
JZ
2363
2364out_uncharge_cgroup:
2365 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
08cf9faf
MA
2366out_uncharge_cgroup_reservation:
2367 if (deferred_reserve)
2368 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2369 h_cg);
8f34af6f 2370out_subpool_put:
d85f69b0 2371 if (map_chg || avoid_reserve)
8f34af6f 2372 hugepage_subpool_put_pages(spool, 1);
feba16e2 2373 vma_end_reservation(h, vma, addr);
8f34af6f 2374 return ERR_PTR(-ENOSPC);
b45b5bd6
DG
2375}
2376
e24a1307
AK
2377int alloc_bootmem_huge_page(struct hstate *h)
2378 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2379int __alloc_bootmem_huge_page(struct hstate *h)
aa888a74
AK
2380{
2381 struct huge_bootmem_page *m;
b2261026 2382 int nr_nodes, node;
aa888a74 2383
b2261026 2384 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
aa888a74
AK
2385 void *addr;
2386
eb31d559 2387 addr = memblock_alloc_try_nid_raw(
8b89a116 2388 huge_page_size(h), huge_page_size(h),
97ad1087 2389 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
aa888a74
AK
2390 if (addr) {
2391 /*
2392 * Use the beginning of the huge page to store the
2393 * huge_bootmem_page struct (until gather_bootmem
2394 * puts them into the mem_map).
2395 */
2396 m = addr;
91f47662 2397 goto found;
aa888a74 2398 }
aa888a74
AK
2399 }
2400 return 0;
2401
2402found:
df994ead 2403 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
aa888a74 2404 /* Put them into a private list first because mem_map is not up yet */
330d6e48 2405 INIT_LIST_HEAD(&m->list);
aa888a74
AK
2406 list_add(&m->list, &huge_boot_pages);
2407 m->hstate = h;
2408 return 1;
2409}
2410
d00181b9
KS
2411static void __init prep_compound_huge_page(struct page *page,
2412 unsigned int order)
18229df5
AW
2413{
2414 if (unlikely(order > (MAX_ORDER - 1)))
2415 prep_compound_gigantic_page(page, order);
2416 else
2417 prep_compound_page(page, order);
2418}
2419
aa888a74
AK
2420/* Put bootmem huge pages into the standard lists after mem_map is up */
2421static void __init gather_bootmem_prealloc(void)
2422{
2423 struct huge_bootmem_page *m;
2424
2425 list_for_each_entry(m, &huge_boot_pages, list) {
40d18ebf 2426 struct page *page = virt_to_page(m);
aa888a74 2427 struct hstate *h = m->hstate;
ee8f248d 2428
aa888a74 2429 WARN_ON(page_count(page) != 1);
c78a7f36 2430 prep_compound_huge_page(page, huge_page_order(h));
ef5a22be 2431 WARN_ON(PageReserved(page));
aa888a74 2432 prep_new_huge_page(h, page, page_to_nid(page));
af0fb9df
MH
2433 put_page(page); /* free it into the hugepage allocator */
2434
b0320c7b
RA
2435 /*
2436 * If we had gigantic hugepages allocated at boot time, we need
2437 * to restore the 'stolen' pages to totalram_pages in order to
2438 * fix confusing memory reports from free(1) and another
2439 * side-effects, like CommitLimit going negative.
2440 */
bae7f4ae 2441 if (hstate_is_gigantic(h))
c78a7f36 2442 adjust_managed_page_count(page, pages_per_huge_page(h));
520495fe 2443 cond_resched();
aa888a74
AK
2444 }
2445}
2446
8faa8b07 2447static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1da177e4
LT
2448{
2449 unsigned long i;
f60858f9
MK
2450 nodemask_t *node_alloc_noretry;
2451
2452 if (!hstate_is_gigantic(h)) {
2453 /*
2454 * Bit mask controlling how hard we retry per-node allocations.
2455 * Ignore errors as lower level routines can deal with
2456 * node_alloc_noretry == NULL. If this kmalloc fails at boot
2457 * time, we are likely in bigger trouble.
2458 */
2459 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2460 GFP_KERNEL);
2461 } else {
2462 /* allocations done at boot time */
2463 node_alloc_noretry = NULL;
2464 }
2465
2466 /* bit mask controlling how hard we retry per-node allocations */
2467 if (node_alloc_noretry)
2468 nodes_clear(*node_alloc_noretry);
a5516438 2469
e5ff2159 2470 for (i = 0; i < h->max_huge_pages; ++i) {
bae7f4ae 2471 if (hstate_is_gigantic(h)) {
dbda8fea 2472 if (hugetlb_cma_size) {
cf11e85f 2473 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
7ecc9565 2474 goto free;
cf11e85f 2475 }
aa888a74
AK
2476 if (!alloc_bootmem_huge_page(h))
2477 break;
0c397dae 2478 } else if (!alloc_pool_huge_page(h,
f60858f9
MK
2479 &node_states[N_MEMORY],
2480 node_alloc_noretry))
1da177e4 2481 break;
69ed779a 2482 cond_resched();
1da177e4 2483 }
d715cf80
LH
2484 if (i < h->max_huge_pages) {
2485 char buf[32];
2486
c6247f72 2487 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
d715cf80
LH
2488 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2489 h->max_huge_pages, buf, i);
2490 h->max_huge_pages = i;
2491 }
7ecc9565 2492free:
f60858f9 2493 kfree(node_alloc_noretry);
e5ff2159
AK
2494}
2495
2496static void __init hugetlb_init_hstates(void)
2497{
2498 struct hstate *h;
2499
2500 for_each_hstate(h) {
641844f5
NH
2501 if (minimum_order > huge_page_order(h))
2502 minimum_order = huge_page_order(h);
2503
8faa8b07 2504 /* oversize hugepages were init'ed in early boot */
bae7f4ae 2505 if (!hstate_is_gigantic(h))
8faa8b07 2506 hugetlb_hstate_alloc_pages(h);
e5ff2159 2507 }
641844f5 2508 VM_BUG_ON(minimum_order == UINT_MAX);
e5ff2159
AK
2509}
2510
2511static void __init report_hugepages(void)
2512{
2513 struct hstate *h;
2514
2515 for_each_hstate(h) {
4abd32db 2516 char buf[32];
c6247f72
MW
2517
2518 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
ffb22af5 2519 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
c6247f72 2520 buf, h->free_huge_pages);
e5ff2159
AK
2521 }
2522}
2523
1da177e4 2524#ifdef CONFIG_HIGHMEM
6ae11b27
LS
2525static void try_to_free_low(struct hstate *h, unsigned long count,
2526 nodemask_t *nodes_allowed)
1da177e4 2527{
4415cc8d
CL
2528 int i;
2529
bae7f4ae 2530 if (hstate_is_gigantic(h))
aa888a74
AK
2531 return;
2532
6ae11b27 2533 for_each_node_mask(i, *nodes_allowed) {
1da177e4 2534 struct page *page, *next;
a5516438
AK
2535 struct list_head *freel = &h->hugepage_freelists[i];
2536 list_for_each_entry_safe(page, next, freel, lru) {
2537 if (count >= h->nr_huge_pages)
6b0c880d 2538 return;
1da177e4
LT
2539 if (PageHighMem(page))
2540 continue;
2541 list_del(&page->lru);
e5ff2159 2542 update_and_free_page(h, page);
a5516438
AK
2543 h->free_huge_pages--;
2544 h->free_huge_pages_node[page_to_nid(page)]--;
1da177e4
LT
2545 }
2546 }
2547}
2548#else
6ae11b27
LS
2549static inline void try_to_free_low(struct hstate *h, unsigned long count,
2550 nodemask_t *nodes_allowed)
1da177e4
LT
2551{
2552}
2553#endif
2554
20a0307c
WF
2555/*
2556 * Increment or decrement surplus_huge_pages. Keep node-specific counters
2557 * balanced by operating on them in a round-robin fashion.
2558 * Returns 1 if an adjustment was made.
2559 */
6ae11b27
LS
2560static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2561 int delta)
20a0307c 2562{
b2261026 2563 int nr_nodes, node;
20a0307c
WF
2564
2565 VM_BUG_ON(delta != -1 && delta != 1);
20a0307c 2566
b2261026
JK
2567 if (delta < 0) {
2568 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2569 if (h->surplus_huge_pages_node[node])
2570 goto found;
e8c5c824 2571 }
b2261026
JK
2572 } else {
2573 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2574 if (h->surplus_huge_pages_node[node] <
2575 h->nr_huge_pages_node[node])
2576 goto found;
e8c5c824 2577 }
b2261026
JK
2578 }
2579 return 0;
20a0307c 2580
b2261026
JK
2581found:
2582 h->surplus_huge_pages += delta;
2583 h->surplus_huge_pages_node[node] += delta;
2584 return 1;
20a0307c
WF
2585}
2586
a5516438 2587#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
fd875dca 2588static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
4eb0716e 2589 nodemask_t *nodes_allowed)
1da177e4 2590{
7893d1d5 2591 unsigned long min_count, ret;
f60858f9
MK
2592 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2593
2594 /*
2595 * Bit mask controlling how hard we retry per-node allocations.
2596 * If we can not allocate the bit mask, do not attempt to allocate
2597 * the requested huge pages.
2598 */
2599 if (node_alloc_noretry)
2600 nodes_clear(*node_alloc_noretry);
2601 else
2602 return -ENOMEM;
1da177e4 2603
4eb0716e
AG
2604 spin_lock(&hugetlb_lock);
2605
fd875dca
MK
2606 /*
2607 * Check for a node specific request.
2608 * Changing node specific huge page count may require a corresponding
2609 * change to the global count. In any case, the passed node mask
2610 * (nodes_allowed) will restrict alloc/free to the specified node.
2611 */
2612 if (nid != NUMA_NO_NODE) {
2613 unsigned long old_count = count;
2614
2615 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2616 /*
2617 * User may have specified a large count value which caused the
2618 * above calculation to overflow. In this case, they wanted
2619 * to allocate as many huge pages as possible. Set count to
2620 * largest possible value to align with their intention.
2621 */
2622 if (count < old_count)
2623 count = ULONG_MAX;
2624 }
2625
4eb0716e
AG
2626 /*
2627 * Gigantic pages runtime allocation depend on the capability for large
2628 * page range allocation.
2629 * If the system does not provide this feature, return an error when
2630 * the user tries to allocate gigantic pages but let the user free the
2631 * boottime allocated gigantic pages.
2632 */
2633 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2634 if (count > persistent_huge_pages(h)) {
2635 spin_unlock(&hugetlb_lock);
f60858f9 2636 NODEMASK_FREE(node_alloc_noretry);
4eb0716e
AG
2637 return -EINVAL;
2638 }
2639 /* Fall through to decrease pool */
2640 }
aa888a74 2641
7893d1d5
AL
2642 /*
2643 * Increase the pool size
2644 * First take pages out of surplus state. Then make up the
2645 * remaining difference by allocating fresh huge pages.
d1c3fb1f 2646 *
0c397dae 2647 * We might race with alloc_surplus_huge_page() here and be unable
d1c3fb1f
NA
2648 * to convert a surplus huge page to a normal huge page. That is
2649 * not critical, though, it just means the overall size of the
2650 * pool might be one hugepage larger than it needs to be, but
2651 * within all the constraints specified by the sysctls.
7893d1d5 2652 */
a5516438 2653 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
6ae11b27 2654 if (!adjust_pool_surplus(h, nodes_allowed, -1))
7893d1d5
AL
2655 break;
2656 }
2657
a5516438 2658 while (count > persistent_huge_pages(h)) {
7893d1d5
AL
2659 /*
2660 * If this allocation races such that we no longer need the
2661 * page, free_huge_page will handle it by freeing the page
2662 * and reducing the surplus.
2663 */
2664 spin_unlock(&hugetlb_lock);
649920c6
JH
2665
2666 /* yield cpu to avoid soft lockup */
2667 cond_resched();
2668
f60858f9
MK
2669 ret = alloc_pool_huge_page(h, nodes_allowed,
2670 node_alloc_noretry);
7893d1d5
AL
2671 spin_lock(&hugetlb_lock);
2672 if (!ret)
2673 goto out;
2674
536240f2
MG
2675 /* Bail for signals. Probably ctrl-c from user */
2676 if (signal_pending(current))
2677 goto out;
7893d1d5 2678 }
7893d1d5
AL
2679
2680 /*
2681 * Decrease the pool size
2682 * First return free pages to the buddy allocator (being careful
2683 * to keep enough around to satisfy reservations). Then place
2684 * pages into surplus state as needed so the pool will shrink
2685 * to the desired size as pages become free.
d1c3fb1f
NA
2686 *
2687 * By placing pages into the surplus state independent of the
2688 * overcommit value, we are allowing the surplus pool size to
2689 * exceed overcommit. There are few sane options here. Since
0c397dae 2690 * alloc_surplus_huge_page() is checking the global counter,
d1c3fb1f
NA
2691 * though, we'll note that we're not allowed to exceed surplus
2692 * and won't grow the pool anywhere else. Not until one of the
2693 * sysctls are changed, or the surplus pages go out of use.
7893d1d5 2694 */
a5516438 2695 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
6b0c880d 2696 min_count = max(count, min_count);
6ae11b27 2697 try_to_free_low(h, min_count, nodes_allowed);
a5516438 2698 while (min_count < persistent_huge_pages(h)) {
6ae11b27 2699 if (!free_pool_huge_page(h, nodes_allowed, 0))
1da177e4 2700 break;
55f67141 2701 cond_resched_lock(&hugetlb_lock);
1da177e4 2702 }
a5516438 2703 while (count < persistent_huge_pages(h)) {
6ae11b27 2704 if (!adjust_pool_surplus(h, nodes_allowed, 1))
7893d1d5
AL
2705 break;
2706 }
2707out:
4eb0716e 2708 h->max_huge_pages = persistent_huge_pages(h);
1da177e4 2709 spin_unlock(&hugetlb_lock);
4eb0716e 2710
f60858f9
MK
2711 NODEMASK_FREE(node_alloc_noretry);
2712
4eb0716e 2713 return 0;
1da177e4
LT
2714}
2715
a3437870
NA
2716#define HSTATE_ATTR_RO(_name) \
2717 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2718
2719#define HSTATE_ATTR(_name) \
2720 static struct kobj_attribute _name##_attr = \
2721 __ATTR(_name, 0644, _name##_show, _name##_store)
2722
2723static struct kobject *hugepages_kobj;
2724static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2725
9a305230
LS
2726static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2727
2728static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
a3437870
NA
2729{
2730 int i;
9a305230 2731
a3437870 2732 for (i = 0; i < HUGE_MAX_HSTATE; i++)
9a305230
LS
2733 if (hstate_kobjs[i] == kobj) {
2734 if (nidp)
2735 *nidp = NUMA_NO_NODE;
a3437870 2736 return &hstates[i];
9a305230
LS
2737 }
2738
2739 return kobj_to_node_hstate(kobj, nidp);
a3437870
NA
2740}
2741
06808b08 2742static ssize_t nr_hugepages_show_common(struct kobject *kobj,
a3437870
NA
2743 struct kobj_attribute *attr, char *buf)
2744{
9a305230
LS
2745 struct hstate *h;
2746 unsigned long nr_huge_pages;
2747 int nid;
2748
2749 h = kobj_to_hstate(kobj, &nid);
2750 if (nid == NUMA_NO_NODE)
2751 nr_huge_pages = h->nr_huge_pages;
2752 else
2753 nr_huge_pages = h->nr_huge_pages_node[nid];
2754
ae7a927d 2755 return sysfs_emit(buf, "%lu\n", nr_huge_pages);
a3437870 2756}
adbe8726 2757
238d3c13
DR
2758static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2759 struct hstate *h, int nid,
2760 unsigned long count, size_t len)
a3437870
NA
2761{
2762 int err;
2d0adf7e 2763 nodemask_t nodes_allowed, *n_mask;
a3437870 2764
2d0adf7e
OS
2765 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2766 return -EINVAL;
adbe8726 2767
9a305230
LS
2768 if (nid == NUMA_NO_NODE) {
2769 /*
2770 * global hstate attribute
2771 */
2772 if (!(obey_mempolicy &&
2d0adf7e
OS
2773 init_nodemask_of_mempolicy(&nodes_allowed)))
2774 n_mask = &node_states[N_MEMORY];
2775 else
2776 n_mask = &nodes_allowed;
2777 } else {
9a305230 2778 /*
fd875dca
MK
2779 * Node specific request. count adjustment happens in
2780 * set_max_huge_pages() after acquiring hugetlb_lock.
9a305230 2781 */
2d0adf7e
OS
2782 init_nodemask_of_node(&nodes_allowed, nid);
2783 n_mask = &nodes_allowed;
fd875dca 2784 }
9a305230 2785
2d0adf7e 2786 err = set_max_huge_pages(h, count, nid, n_mask);
06808b08 2787
4eb0716e 2788 return err ? err : len;
06808b08
LS
2789}
2790
238d3c13
DR
2791static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2792 struct kobject *kobj, const char *buf,
2793 size_t len)
2794{
2795 struct hstate *h;
2796 unsigned long count;
2797 int nid;
2798 int err;
2799
2800 err = kstrtoul(buf, 10, &count);
2801 if (err)
2802 return err;
2803
2804 h = kobj_to_hstate(kobj, &nid);
2805 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2806}
2807
06808b08
LS
2808static ssize_t nr_hugepages_show(struct kobject *kobj,
2809 struct kobj_attribute *attr, char *buf)
2810{
2811 return nr_hugepages_show_common(kobj, attr, buf);
2812}
2813
2814static ssize_t nr_hugepages_store(struct kobject *kobj,
2815 struct kobj_attribute *attr, const char *buf, size_t len)
2816{
238d3c13 2817 return nr_hugepages_store_common(false, kobj, buf, len);
a3437870
NA
2818}
2819HSTATE_ATTR(nr_hugepages);
2820
06808b08
LS
2821#ifdef CONFIG_NUMA
2822
2823/*
2824 * hstate attribute for optionally mempolicy-based constraint on persistent
2825 * huge page alloc/free.
2826 */
2827static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
ae7a927d
JP
2828 struct kobj_attribute *attr,
2829 char *buf)
06808b08
LS
2830{
2831 return nr_hugepages_show_common(kobj, attr, buf);
2832}
2833
2834static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2835 struct kobj_attribute *attr, const char *buf, size_t len)
2836{
238d3c13 2837 return nr_hugepages_store_common(true, kobj, buf, len);
06808b08
LS
2838}
2839HSTATE_ATTR(nr_hugepages_mempolicy);
2840#endif
2841
2842
a3437870
NA
2843static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2844 struct kobj_attribute *attr, char *buf)
2845{
9a305230 2846 struct hstate *h = kobj_to_hstate(kobj, NULL);
ae7a927d 2847 return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
a3437870 2848}
adbe8726 2849
a3437870
NA
2850static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2851 struct kobj_attribute *attr, const char *buf, size_t count)
2852{
2853 int err;
2854 unsigned long input;
9a305230 2855 struct hstate *h = kobj_to_hstate(kobj, NULL);
a3437870 2856
bae7f4ae 2857 if (hstate_is_gigantic(h))
adbe8726
EM
2858 return -EINVAL;
2859
3dbb95f7 2860 err = kstrtoul(buf, 10, &input);
a3437870 2861 if (err)
73ae31e5 2862 return err;
a3437870
NA
2863
2864 spin_lock(&hugetlb_lock);
2865 h->nr_overcommit_huge_pages = input;
2866 spin_unlock(&hugetlb_lock);
2867
2868 return count;
2869}
2870HSTATE_ATTR(nr_overcommit_hugepages);
2871
2872static ssize_t free_hugepages_show(struct kobject *kobj,
2873 struct kobj_attribute *attr, char *buf)
2874{
9a305230
LS
2875 struct hstate *h;
2876 unsigned long free_huge_pages;
2877 int nid;
2878
2879 h = kobj_to_hstate(kobj, &nid);
2880 if (nid == NUMA_NO_NODE)
2881 free_huge_pages = h->free_huge_pages;
2882 else
2883 free_huge_pages = h->free_huge_pages_node[nid];
2884
ae7a927d 2885 return sysfs_emit(buf, "%lu\n", free_huge_pages);
a3437870
NA
2886}
2887HSTATE_ATTR_RO(free_hugepages);
2888
2889static ssize_t resv_hugepages_show(struct kobject *kobj,
2890 struct kobj_attribute *attr, char *buf)
2891{
9a305230 2892 struct hstate *h = kobj_to_hstate(kobj, NULL);
ae7a927d 2893 return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
a3437870
NA
2894}
2895HSTATE_ATTR_RO(resv_hugepages);
2896
2897static ssize_t surplus_hugepages_show(struct kobject *kobj,
2898 struct kobj_attribute *attr, char *buf)
2899{
9a305230
LS
2900 struct hstate *h;
2901 unsigned long surplus_huge_pages;
2902 int nid;
2903
2904 h = kobj_to_hstate(kobj, &nid);
2905 if (nid == NUMA_NO_NODE)
2906 surplus_huge_pages = h->surplus_huge_pages;
2907 else
2908 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2909
ae7a927d 2910 return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
a3437870
NA
2911}
2912HSTATE_ATTR_RO(surplus_hugepages);
2913
2914static struct attribute *hstate_attrs[] = {
2915 &nr_hugepages_attr.attr,
2916 &nr_overcommit_hugepages_attr.attr,
2917 &free_hugepages_attr.attr,
2918 &resv_hugepages_attr.attr,
2919 &surplus_hugepages_attr.attr,
06808b08
LS
2920#ifdef CONFIG_NUMA
2921 &nr_hugepages_mempolicy_attr.attr,
2922#endif
a3437870
NA
2923 NULL,
2924};
2925
67e5ed96 2926static const struct attribute_group hstate_attr_group = {
a3437870
NA
2927 .attrs = hstate_attrs,
2928};
2929
094e9539
JM
2930static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2931 struct kobject **hstate_kobjs,
67e5ed96 2932 const struct attribute_group *hstate_attr_group)
a3437870
NA
2933{
2934 int retval;
972dc4de 2935 int hi = hstate_index(h);
a3437870 2936
9a305230
LS
2937 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2938 if (!hstate_kobjs[hi])
a3437870
NA
2939 return -ENOMEM;
2940
9a305230 2941 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
cc2205a6 2942 if (retval) {
9a305230 2943 kobject_put(hstate_kobjs[hi]);
cc2205a6
ML
2944 hstate_kobjs[hi] = NULL;
2945 }
a3437870
NA
2946
2947 return retval;
2948}
2949
2950static void __init hugetlb_sysfs_init(void)
2951{
2952 struct hstate *h;
2953 int err;
2954
2955 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2956 if (!hugepages_kobj)
2957 return;
2958
2959 for_each_hstate(h) {
9a305230
LS
2960 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2961 hstate_kobjs, &hstate_attr_group);
a3437870 2962 if (err)
282f4214 2963 pr_err("HugeTLB: Unable to add hstate %s", h->name);
a3437870
NA
2964 }
2965}
2966
9a305230
LS
2967#ifdef CONFIG_NUMA
2968
2969/*
2970 * node_hstate/s - associate per node hstate attributes, via their kobjects,
10fbcf4c
KS
2971 * with node devices in node_devices[] using a parallel array. The array
2972 * index of a node device or _hstate == node id.
2973 * This is here to avoid any static dependency of the node device driver, in
9a305230
LS
2974 * the base kernel, on the hugetlb module.
2975 */
2976struct node_hstate {
2977 struct kobject *hugepages_kobj;
2978 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2979};
b4e289a6 2980static struct node_hstate node_hstates[MAX_NUMNODES];
9a305230
LS
2981
2982/*
10fbcf4c 2983 * A subset of global hstate attributes for node devices
9a305230
LS
2984 */
2985static struct attribute *per_node_hstate_attrs[] = {
2986 &nr_hugepages_attr.attr,
2987 &free_hugepages_attr.attr,
2988 &surplus_hugepages_attr.attr,
2989 NULL,
2990};
2991
67e5ed96 2992static const struct attribute_group per_node_hstate_attr_group = {
9a305230
LS
2993 .attrs = per_node_hstate_attrs,
2994};
2995
2996/*
10fbcf4c 2997 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
9a305230
LS
2998 * Returns node id via non-NULL nidp.
2999 */
3000static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3001{
3002 int nid;
3003
3004 for (nid = 0; nid < nr_node_ids; nid++) {
3005 struct node_hstate *nhs = &node_hstates[nid];
3006 int i;
3007 for (i = 0; i < HUGE_MAX_HSTATE; i++)
3008 if (nhs->hstate_kobjs[i] == kobj) {
3009 if (nidp)
3010 *nidp = nid;
3011 return &hstates[i];
3012 }
3013 }
3014
3015 BUG();
3016 return NULL;
3017}
3018
3019/*
10fbcf4c 3020 * Unregister hstate attributes from a single node device.
9a305230
LS
3021 * No-op if no hstate attributes attached.
3022 */
3cd8b44f 3023static void hugetlb_unregister_node(struct node *node)
9a305230
LS
3024{
3025 struct hstate *h;
10fbcf4c 3026 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
3027
3028 if (!nhs->hugepages_kobj)
9b5e5d0f 3029 return; /* no hstate attributes */
9a305230 3030
972dc4de
AK
3031 for_each_hstate(h) {
3032 int idx = hstate_index(h);
3033 if (nhs->hstate_kobjs[idx]) {
3034 kobject_put(nhs->hstate_kobjs[idx]);
3035 nhs->hstate_kobjs[idx] = NULL;
9a305230 3036 }
972dc4de 3037 }
9a305230
LS
3038
3039 kobject_put(nhs->hugepages_kobj);
3040 nhs->hugepages_kobj = NULL;
3041}
3042
9a305230
LS
3043
3044/*
10fbcf4c 3045 * Register hstate attributes for a single node device.
9a305230
LS
3046 * No-op if attributes already registered.
3047 */
3cd8b44f 3048static void hugetlb_register_node(struct node *node)
9a305230
LS
3049{
3050 struct hstate *h;
10fbcf4c 3051 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
3052 int err;
3053
3054 if (nhs->hugepages_kobj)
3055 return; /* already allocated */
3056
3057 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
10fbcf4c 3058 &node->dev.kobj);
9a305230
LS
3059 if (!nhs->hugepages_kobj)
3060 return;
3061
3062 for_each_hstate(h) {
3063 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3064 nhs->hstate_kobjs,
3065 &per_node_hstate_attr_group);
3066 if (err) {
282f4214 3067 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
ffb22af5 3068 h->name, node->dev.id);
9a305230
LS
3069 hugetlb_unregister_node(node);
3070 break;
3071 }
3072 }
3073}
3074
3075/*
9b5e5d0f 3076 * hugetlb init time: register hstate attributes for all registered node
10fbcf4c
KS
3077 * devices of nodes that have memory. All on-line nodes should have
3078 * registered their associated device by this time.
9a305230 3079 */
7d9ca000 3080static void __init hugetlb_register_all_nodes(void)
9a305230
LS
3081{
3082 int nid;
3083
8cebfcd0 3084 for_each_node_state(nid, N_MEMORY) {
8732794b 3085 struct node *node = node_devices[nid];
10fbcf4c 3086 if (node->dev.id == nid)
9a305230
LS
3087 hugetlb_register_node(node);
3088 }
3089
3090 /*
10fbcf4c 3091 * Let the node device driver know we're here so it can
9a305230
LS
3092 * [un]register hstate attributes on node hotplug.
3093 */
3094 register_hugetlbfs_with_node(hugetlb_register_node,
3095 hugetlb_unregister_node);
3096}
3097#else /* !CONFIG_NUMA */
3098
3099static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3100{
3101 BUG();
3102 if (nidp)
3103 *nidp = -1;
3104 return NULL;
3105}
3106
9a305230
LS
3107static void hugetlb_register_all_nodes(void) { }
3108
3109#endif
3110
a3437870
NA
3111static int __init hugetlb_init(void)
3112{
8382d914
DB
3113 int i;
3114
d6995da3
MK
3115 BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
3116 __NR_HPAGEFLAGS);
3117
c2833a5b
MK
3118 if (!hugepages_supported()) {
3119 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
3120 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
0ef89d25 3121 return 0;
c2833a5b 3122 }
a3437870 3123
282f4214
MK
3124 /*
3125 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists. Some
3126 * architectures depend on setup being done here.
3127 */
3128 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
3129 if (!parsed_default_hugepagesz) {
3130 /*
3131 * If we did not parse a default huge page size, set
3132 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
3133 * number of huge pages for this default size was implicitly
3134 * specified, set that here as well.
3135 * Note that the implicit setting will overwrite an explicit
3136 * setting. A warning will be printed in this case.
3137 */
3138 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
3139 if (default_hstate_max_huge_pages) {
3140 if (default_hstate.max_huge_pages) {
3141 char buf[32];
3142
3143 string_get_size(huge_page_size(&default_hstate),
3144 1, STRING_UNITS_2, buf, 32);
3145 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
3146 default_hstate.max_huge_pages, buf);
3147 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
3148 default_hstate_max_huge_pages);
3149 }
3150 default_hstate.max_huge_pages =
3151 default_hstate_max_huge_pages;
d715cf80 3152 }
f8b74815 3153 }
a3437870 3154
cf11e85f 3155 hugetlb_cma_check();
a3437870 3156 hugetlb_init_hstates();
aa888a74 3157 gather_bootmem_prealloc();
a3437870
NA
3158 report_hugepages();
3159
3160 hugetlb_sysfs_init();
9a305230 3161 hugetlb_register_all_nodes();
7179e7bf 3162 hugetlb_cgroup_file_init();
9a305230 3163
8382d914
DB
3164#ifdef CONFIG_SMP
3165 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
3166#else
3167 num_fault_mutexes = 1;
3168#endif
c672c7f2 3169 hugetlb_fault_mutex_table =
6da2ec56
KC
3170 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
3171 GFP_KERNEL);
c672c7f2 3172 BUG_ON(!hugetlb_fault_mutex_table);
8382d914
DB
3173
3174 for (i = 0; i < num_fault_mutexes; i++)
c672c7f2 3175 mutex_init(&hugetlb_fault_mutex_table[i]);
a3437870
NA
3176 return 0;
3177}
3e89e1c5 3178subsys_initcall(hugetlb_init);
a3437870 3179
ae94da89
MK
3180/* Overwritten by architectures with more huge page sizes */
3181bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
9fee021d 3182{
ae94da89 3183 return size == HPAGE_SIZE;
9fee021d
VT
3184}
3185
d00181b9 3186void __init hugetlb_add_hstate(unsigned int order)
a3437870
NA
3187{
3188 struct hstate *h;
8faa8b07
AK
3189 unsigned long i;
3190
a3437870 3191 if (size_to_hstate(PAGE_SIZE << order)) {
a3437870
NA
3192 return;
3193 }
47d38344 3194 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
a3437870 3195 BUG_ON(order == 0);
47d38344 3196 h = &hstates[hugetlb_max_hstate++];
a3437870 3197 h->order = order;
aca78307 3198 h->mask = ~(huge_page_size(h) - 1);
8faa8b07
AK
3199 for (i = 0; i < MAX_NUMNODES; ++i)
3200 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
0edaecfa 3201 INIT_LIST_HEAD(&h->hugepage_activelist);
54f18d35
AM
3202 h->next_nid_to_alloc = first_memory_node;
3203 h->next_nid_to_free = first_memory_node;
a3437870
NA
3204 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
3205 huge_page_size(h)/1024);
8faa8b07 3206
a3437870
NA
3207 parsed_hstate = h;
3208}
3209
282f4214
MK
3210/*
3211 * hugepages command line processing
3212 * hugepages normally follows a valid hugepagsz or default_hugepagsz
3213 * specification. If not, ignore the hugepages value. hugepages can also
3214 * be the first huge page command line option in which case it implicitly
3215 * specifies the number of huge pages for the default size.
3216 */
3217static int __init hugepages_setup(char *s)
a3437870
NA
3218{
3219 unsigned long *mhp;
8faa8b07 3220 static unsigned long *last_mhp;
a3437870 3221
9fee021d 3222 if (!parsed_valid_hugepagesz) {
282f4214 3223 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
9fee021d 3224 parsed_valid_hugepagesz = true;
282f4214 3225 return 0;
9fee021d 3226 }
282f4214 3227
a3437870 3228 /*
282f4214
MK
3229 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
3230 * yet, so this hugepages= parameter goes to the "default hstate".
3231 * Otherwise, it goes with the previously parsed hugepagesz or
3232 * default_hugepagesz.
a3437870 3233 */
9fee021d 3234 else if (!hugetlb_max_hstate)
a3437870
NA
3235 mhp = &default_hstate_max_huge_pages;
3236 else
3237 mhp = &parsed_hstate->max_huge_pages;
3238
8faa8b07 3239 if (mhp == last_mhp) {
282f4214
MK
3240 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
3241 return 0;
8faa8b07
AK
3242 }
3243
a3437870
NA
3244 if (sscanf(s, "%lu", mhp) <= 0)
3245 *mhp = 0;
3246
8faa8b07
AK
3247 /*
3248 * Global state is always initialized later in hugetlb_init.
3249 * But we need to allocate >= MAX_ORDER hstates here early to still
3250 * use the bootmem allocator.
3251 */
47d38344 3252 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
8faa8b07
AK
3253 hugetlb_hstate_alloc_pages(parsed_hstate);
3254
3255 last_mhp = mhp;
3256
a3437870
NA
3257 return 1;
3258}
282f4214 3259__setup("hugepages=", hugepages_setup);
e11bfbfc 3260
282f4214
MK
3261/*
3262 * hugepagesz command line processing
3263 * A specific huge page size can only be specified once with hugepagesz.
3264 * hugepagesz is followed by hugepages on the command line. The global
3265 * variable 'parsed_valid_hugepagesz' is used to determine if prior
3266 * hugepagesz argument was valid.
3267 */
359f2544 3268static int __init hugepagesz_setup(char *s)
e11bfbfc 3269{
359f2544 3270 unsigned long size;
282f4214
MK
3271 struct hstate *h;
3272
3273 parsed_valid_hugepagesz = false;
359f2544
MK
3274 size = (unsigned long)memparse(s, NULL);
3275
3276 if (!arch_hugetlb_valid_size(size)) {
282f4214 3277 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
359f2544
MK
3278 return 0;
3279 }
3280
282f4214
MK
3281 h = size_to_hstate(size);
3282 if (h) {
3283 /*
3284 * hstate for this size already exists. This is normally
3285 * an error, but is allowed if the existing hstate is the
3286 * default hstate. More specifically, it is only allowed if
3287 * the number of huge pages for the default hstate was not
3288 * previously specified.
3289 */
3290 if (!parsed_default_hugepagesz || h != &default_hstate ||
3291 default_hstate.max_huge_pages) {
3292 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
3293 return 0;
3294 }
3295
3296 /*
3297 * No need to call hugetlb_add_hstate() as hstate already
3298 * exists. But, do set parsed_hstate so that a following
3299 * hugepages= parameter will be applied to this hstate.
3300 */
3301 parsed_hstate = h;
3302 parsed_valid_hugepagesz = true;
3303 return 1;
38237830
MK
3304 }
3305
359f2544 3306 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
282f4214 3307 parsed_valid_hugepagesz = true;
e11bfbfc
NP
3308 return 1;
3309}
359f2544
MK
3310__setup("hugepagesz=", hugepagesz_setup);
3311
282f4214
MK
3312/*
3313 * default_hugepagesz command line input
3314 * Only one instance of default_hugepagesz allowed on command line.
3315 */
ae94da89 3316static int __init default_hugepagesz_setup(char *s)
e11bfbfc 3317{
ae94da89
MK
3318 unsigned long size;
3319
282f4214 3320 parsed_valid_hugepagesz = false;
282f4214
MK
3321 if (parsed_default_hugepagesz) {
3322 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
3323 return 0;
3324 }
3325
ae94da89
MK
3326 size = (unsigned long)memparse(s, NULL);
3327
3328 if (!arch_hugetlb_valid_size(size)) {
282f4214 3329 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
ae94da89
MK
3330 return 0;
3331 }
3332
282f4214
MK
3333 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3334 parsed_valid_hugepagesz = true;
3335 parsed_default_hugepagesz = true;
3336 default_hstate_idx = hstate_index(size_to_hstate(size));
3337
3338 /*
3339 * The number of default huge pages (for this size) could have been
3340 * specified as the first hugetlb parameter: hugepages=X. If so,
3341 * then default_hstate_max_huge_pages is set. If the default huge
3342 * page size is gigantic (>= MAX_ORDER), then the pages must be
3343 * allocated here from bootmem allocator.
3344 */
3345 if (default_hstate_max_huge_pages) {
3346 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
3347 if (hstate_is_gigantic(&default_hstate))
3348 hugetlb_hstate_alloc_pages(&default_hstate);
3349 default_hstate_max_huge_pages = 0;
3350 }
3351
e11bfbfc
NP
3352 return 1;
3353}
ae94da89 3354__setup("default_hugepagesz=", default_hugepagesz_setup);
a3437870 3355
8ca39e68 3356static unsigned int allowed_mems_nr(struct hstate *h)
8a213460
NA
3357{
3358 int node;
3359 unsigned int nr = 0;
8ca39e68
MS
3360 nodemask_t *mpol_allowed;
3361 unsigned int *array = h->free_huge_pages_node;
3362 gfp_t gfp_mask = htlb_alloc_mask(h);
3363
3364 mpol_allowed = policy_nodemask_current(gfp_mask);
8a213460 3365
8ca39e68 3366 for_each_node_mask(node, cpuset_current_mems_allowed) {
c93b0a99 3367 if (!mpol_allowed || node_isset(node, *mpol_allowed))
8ca39e68
MS
3368 nr += array[node];
3369 }
8a213460
NA
3370
3371 return nr;
3372}
3373
3374#ifdef CONFIG_SYSCTL
17743798
MS
3375static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
3376 void *buffer, size_t *length,
3377 loff_t *ppos, unsigned long *out)
3378{
3379 struct ctl_table dup_table;
3380
3381 /*
3382 * In order to avoid races with __do_proc_doulongvec_minmax(), we
3383 * can duplicate the @table and alter the duplicate of it.
3384 */
3385 dup_table = *table;
3386 dup_table.data = out;
3387
3388 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
3389}
3390
06808b08
LS
3391static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3392 struct ctl_table *table, int write,
32927393 3393 void *buffer, size_t *length, loff_t *ppos)
1da177e4 3394{
e5ff2159 3395 struct hstate *h = &default_hstate;
238d3c13 3396 unsigned long tmp = h->max_huge_pages;
08d4a246 3397 int ret;
e5ff2159 3398
457c1b27 3399 if (!hugepages_supported())
86613628 3400 return -EOPNOTSUPP;
457c1b27 3401
17743798
MS
3402 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3403 &tmp);
08d4a246
MH
3404 if (ret)
3405 goto out;
e5ff2159 3406
238d3c13
DR
3407 if (write)
3408 ret = __nr_hugepages_store_common(obey_mempolicy, h,
3409 NUMA_NO_NODE, tmp, *length);
08d4a246
MH
3410out:
3411 return ret;
1da177e4 3412}
396faf03 3413
06808b08 3414int hugetlb_sysctl_handler(struct ctl_table *table, int write,
32927393 3415 void *buffer, size_t *length, loff_t *ppos)
06808b08
LS
3416{
3417
3418 return hugetlb_sysctl_handler_common(false, table, write,
3419 buffer, length, ppos);
3420}
3421
3422#ifdef CONFIG_NUMA
3423int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
32927393 3424 void *buffer, size_t *length, loff_t *ppos)
06808b08
LS
3425{
3426 return hugetlb_sysctl_handler_common(true, table, write,
3427 buffer, length, ppos);
3428}
3429#endif /* CONFIG_NUMA */
3430
a3d0c6aa 3431int hugetlb_overcommit_handler(struct ctl_table *table, int write,
32927393 3432 void *buffer, size_t *length, loff_t *ppos)
a3d0c6aa 3433{
a5516438 3434 struct hstate *h = &default_hstate;
e5ff2159 3435 unsigned long tmp;
08d4a246 3436 int ret;
e5ff2159 3437
457c1b27 3438 if (!hugepages_supported())
86613628 3439 return -EOPNOTSUPP;
457c1b27 3440
c033a93c 3441 tmp = h->nr_overcommit_huge_pages;
e5ff2159 3442
bae7f4ae 3443 if (write && hstate_is_gigantic(h))
adbe8726
EM
3444 return -EINVAL;
3445
17743798
MS
3446 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3447 &tmp);
08d4a246
MH
3448 if (ret)
3449 goto out;
e5ff2159
AK
3450
3451 if (write) {
3452 spin_lock(&hugetlb_lock);
3453 h->nr_overcommit_huge_pages = tmp;
3454 spin_unlock(&hugetlb_lock);
3455 }
08d4a246
MH
3456out:
3457 return ret;
a3d0c6aa
NA
3458}
3459
1da177e4
LT
3460#endif /* CONFIG_SYSCTL */
3461
e1759c21 3462void hugetlb_report_meminfo(struct seq_file *m)
1da177e4 3463{
fcb2b0c5
RG
3464 struct hstate *h;
3465 unsigned long total = 0;
3466
457c1b27
NA
3467 if (!hugepages_supported())
3468 return;
fcb2b0c5
RG
3469
3470 for_each_hstate(h) {
3471 unsigned long count = h->nr_huge_pages;
3472
aca78307 3473 total += huge_page_size(h) * count;
fcb2b0c5
RG
3474
3475 if (h == &default_hstate)
3476 seq_printf(m,
3477 "HugePages_Total: %5lu\n"
3478 "HugePages_Free: %5lu\n"
3479 "HugePages_Rsvd: %5lu\n"
3480 "HugePages_Surp: %5lu\n"
3481 "Hugepagesize: %8lu kB\n",
3482 count,
3483 h->free_huge_pages,
3484 h->resv_huge_pages,
3485 h->surplus_huge_pages,
aca78307 3486 huge_page_size(h) / SZ_1K);
fcb2b0c5
RG
3487 }
3488
aca78307 3489 seq_printf(m, "Hugetlb: %8lu kB\n", total / SZ_1K);
1da177e4
LT
3490}
3491
7981593b 3492int hugetlb_report_node_meminfo(char *buf, int len, int nid)
1da177e4 3493{
a5516438 3494 struct hstate *h = &default_hstate;
7981593b 3495
457c1b27
NA
3496 if (!hugepages_supported())
3497 return 0;
7981593b
JP
3498
3499 return sysfs_emit_at(buf, len,
3500 "Node %d HugePages_Total: %5u\n"
3501 "Node %d HugePages_Free: %5u\n"
3502 "Node %d HugePages_Surp: %5u\n",
3503 nid, h->nr_huge_pages_node[nid],
3504 nid, h->free_huge_pages_node[nid],
3505 nid, h->surplus_huge_pages_node[nid]);
1da177e4
LT
3506}
3507
949f7ec5
DR
3508void hugetlb_show_meminfo(void)
3509{
3510 struct hstate *h;
3511 int nid;
3512
457c1b27
NA
3513 if (!hugepages_supported())
3514 return;
3515
949f7ec5
DR
3516 for_each_node_state(nid, N_MEMORY)
3517 for_each_hstate(h)
3518 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3519 nid,
3520 h->nr_huge_pages_node[nid],
3521 h->free_huge_pages_node[nid],
3522 h->surplus_huge_pages_node[nid],
aca78307 3523 huge_page_size(h) / SZ_1K);
949f7ec5
DR
3524}
3525
5d317b2b
NH
3526void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3527{
3528 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3529 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3530}
3531
1da177e4
LT
3532/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3533unsigned long hugetlb_total_pages(void)
3534{
d0028588
WL
3535 struct hstate *h;
3536 unsigned long nr_total_pages = 0;
3537
3538 for_each_hstate(h)
3539 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3540 return nr_total_pages;
1da177e4 3541}
1da177e4 3542
a5516438 3543static int hugetlb_acct_memory(struct hstate *h, long delta)
fc1b8a73
MG
3544{
3545 int ret = -ENOMEM;
3546
0aa7f354
ML
3547 if (!delta)
3548 return 0;
3549
fc1b8a73
MG
3550 spin_lock(&hugetlb_lock);
3551 /*
3552 * When cpuset is configured, it breaks the strict hugetlb page
3553 * reservation as the accounting is done on a global variable. Such
3554 * reservation is completely rubbish in the presence of cpuset because
3555 * the reservation is not checked against page availability for the
3556 * current cpuset. Application can still potentially OOM'ed by kernel
3557 * with lack of free htlb page in cpuset that the task is in.
3558 * Attempt to enforce strict accounting with cpuset is almost
3559 * impossible (or too ugly) because cpuset is too fluid that
3560 * task or memory node can be dynamically moved between cpusets.
3561 *
3562 * The change of semantics for shared hugetlb mapping with cpuset is
3563 * undesirable. However, in order to preserve some of the semantics,
3564 * we fall back to check against current free page availability as
3565 * a best attempt and hopefully to minimize the impact of changing
3566 * semantics that cpuset has.
8ca39e68
MS
3567 *
3568 * Apart from cpuset, we also have memory policy mechanism that
3569 * also determines from which node the kernel will allocate memory
3570 * in a NUMA system. So similar to cpuset, we also should consider
3571 * the memory policy of the current task. Similar to the description
3572 * above.
fc1b8a73
MG
3573 */
3574 if (delta > 0) {
a5516438 3575 if (gather_surplus_pages(h, delta) < 0)
fc1b8a73
MG
3576 goto out;
3577
8ca39e68 3578 if (delta > allowed_mems_nr(h)) {
a5516438 3579 return_unused_surplus_pages(h, delta);
fc1b8a73
MG
3580 goto out;
3581 }
3582 }
3583
3584 ret = 0;
3585 if (delta < 0)
a5516438 3586 return_unused_surplus_pages(h, (unsigned long) -delta);
fc1b8a73
MG
3587
3588out:
3589 spin_unlock(&hugetlb_lock);
3590 return ret;
3591}
3592
84afd99b
AW
3593static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3594{
f522c3ac 3595 struct resv_map *resv = vma_resv_map(vma);
84afd99b
AW
3596
3597 /*
3598 * This new VMA should share its siblings reservation map if present.
3599 * The VMA will only ever have a valid reservation map pointer where
3600 * it is being copied for another still existing VMA. As that VMA
25985edc 3601 * has a reference to the reservation map it cannot disappear until
84afd99b
AW
3602 * after this open call completes. It is therefore safe to take a
3603 * new reference here without additional locking.
3604 */
4e35f483 3605 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
f522c3ac 3606 kref_get(&resv->refs);
84afd99b
AW
3607}
3608
a1e78772
MG
3609static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3610{
a5516438 3611 struct hstate *h = hstate_vma(vma);
f522c3ac 3612 struct resv_map *resv = vma_resv_map(vma);
90481622 3613 struct hugepage_subpool *spool = subpool_vma(vma);
4e35f483 3614 unsigned long reserve, start, end;
1c5ecae3 3615 long gbl_reserve;
84afd99b 3616
4e35f483
JK
3617 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3618 return;
84afd99b 3619
4e35f483
JK
3620 start = vma_hugecache_offset(h, vma, vma->vm_start);
3621 end = vma_hugecache_offset(h, vma, vma->vm_end);
84afd99b 3622
4e35f483 3623 reserve = (end - start) - region_count(resv, start, end);
e9fe92ae 3624 hugetlb_cgroup_uncharge_counter(resv, start, end);
4e35f483 3625 if (reserve) {
1c5ecae3
MK
3626 /*
3627 * Decrement reserve counts. The global reserve count may be
3628 * adjusted if the subpool has a minimum size.
3629 */
3630 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3631 hugetlb_acct_memory(h, -gbl_reserve);
84afd99b 3632 }
e9fe92ae
MA
3633
3634 kref_put(&resv->refs, resv_map_release);
a1e78772
MG
3635}
3636
31383c68
DW
3637static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3638{
3639 if (addr & ~(huge_page_mask(hstate_vma(vma))))
3640 return -EINVAL;
3641 return 0;
3642}
3643
05ea8860
DW
3644static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3645{
aca78307 3646 return huge_page_size(hstate_vma(vma));
05ea8860
DW
3647}
3648
1da177e4
LT
3649/*
3650 * We cannot handle pagefaults against hugetlb pages at all. They cause
3651 * handle_mm_fault() to try to instantiate regular-sized pages in the
6c26d310 3652 * hugepage VMA. do_page_fault() is supposed to trap this, so BUG is we get
1da177e4
LT
3653 * this far.
3654 */
b3ec9f33 3655static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
1da177e4
LT
3656{
3657 BUG();
d0217ac0 3658 return 0;
1da177e4
LT
3659}
3660
eec3636a
JC
3661/*
3662 * When a new function is introduced to vm_operations_struct and added
3663 * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3664 * This is because under System V memory model, mappings created via
3665 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3666 * their original vm_ops are overwritten with shm_vm_ops.
3667 */
f0f37e2f 3668const struct vm_operations_struct hugetlb_vm_ops = {
d0217ac0 3669 .fault = hugetlb_vm_op_fault,
84afd99b 3670 .open = hugetlb_vm_op_open,
a1e78772 3671 .close = hugetlb_vm_op_close,
dd3b614f 3672 .may_split = hugetlb_vm_op_split,
05ea8860 3673 .pagesize = hugetlb_vm_op_pagesize,
1da177e4
LT
3674};
3675
1e8f889b
DG
3676static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3677 int writable)
63551ae0
DG
3678{
3679 pte_t entry;
3680
1e8f889b 3681 if (writable) {
106c992a
GS
3682 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3683 vma->vm_page_prot)));
63551ae0 3684 } else {
106c992a
GS
3685 entry = huge_pte_wrprotect(mk_huge_pte(page,
3686 vma->vm_page_prot));
63551ae0
DG
3687 }
3688 entry = pte_mkyoung(entry);
3689 entry = pte_mkhuge(entry);
d9ed9faa 3690 entry = arch_make_huge_pte(entry, vma, page, writable);
63551ae0
DG
3691
3692 return entry;
3693}
3694
1e8f889b
DG
3695static void set_huge_ptep_writable(struct vm_area_struct *vma,
3696 unsigned long address, pte_t *ptep)
3697{
3698 pte_t entry;
3699
106c992a 3700 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
32f84528 3701 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4b3073e1 3702 update_mmu_cache(vma, address, ptep);
1e8f889b
DG
3703}
3704
d5ed7444 3705bool is_hugetlb_entry_migration(pte_t pte)
4a705fef
NH
3706{
3707 swp_entry_t swp;
3708
3709 if (huge_pte_none(pte) || pte_present(pte))
d5ed7444 3710 return false;
4a705fef 3711 swp = pte_to_swp_entry(pte);
d79d176a 3712 if (is_migration_entry(swp))
d5ed7444 3713 return true;
4a705fef 3714 else
d5ed7444 3715 return false;
4a705fef
NH
3716}
3717
3e5c3600 3718static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
4a705fef
NH
3719{
3720 swp_entry_t swp;
3721
3722 if (huge_pte_none(pte) || pte_present(pte))
3e5c3600 3723 return false;
4a705fef 3724 swp = pte_to_swp_entry(pte);
d79d176a 3725 if (is_hwpoison_entry(swp))
3e5c3600 3726 return true;
4a705fef 3727 else
3e5c3600 3728 return false;
4a705fef 3729}
1e8f889b 3730
4eae4efa
PX
3731static void
3732hugetlb_install_page(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
3733 struct page *new_page)
3734{
3735 __SetPageUptodate(new_page);
3736 set_huge_pte_at(vma->vm_mm, addr, ptep, make_huge_pte(vma, new_page, 1));
3737 hugepage_add_new_anon_rmap(new_page, vma, addr);
3738 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
3739 ClearHPageRestoreReserve(new_page);
3740 SetHPageMigratable(new_page);
3741}
3742
63551ae0
DG
3743int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3744 struct vm_area_struct *vma)
3745{
5e41540c 3746 pte_t *src_pte, *dst_pte, entry, dst_entry;
63551ae0 3747 struct page *ptepage;
1c59827d 3748 unsigned long addr;
ca6eb14d 3749 bool cow = is_cow_mapping(vma->vm_flags);
a5516438
AK
3750 struct hstate *h = hstate_vma(vma);
3751 unsigned long sz = huge_page_size(h);
4eae4efa 3752 unsigned long npages = pages_per_huge_page(h);
c0d0381a 3753 struct address_space *mapping = vma->vm_file->f_mapping;
ac46d4f3 3754 struct mmu_notifier_range range;
e8569dd2 3755 int ret = 0;
1e8f889b 3756
ac46d4f3 3757 if (cow) {
7269f999 3758 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
6f4f13e8 3759 vma->vm_start,
ac46d4f3
JG
3760 vma->vm_end);
3761 mmu_notifier_invalidate_range_start(&range);
c0d0381a
MK
3762 } else {
3763 /*
3764 * For shared mappings i_mmap_rwsem must be held to call
3765 * huge_pte_alloc, otherwise the returned ptep could go
3766 * away if part of a shared pmd and another thread calls
3767 * huge_pmd_unshare.
3768 */
3769 i_mmap_lock_read(mapping);
ac46d4f3 3770 }
e8569dd2 3771
a5516438 3772 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
cb900f41 3773 spinlock_t *src_ptl, *dst_ptl;
7868a208 3774 src_pte = huge_pte_offset(src, addr, sz);
c74df32c
HD
3775 if (!src_pte)
3776 continue;
a5516438 3777 dst_pte = huge_pte_alloc(dst, addr, sz);
e8569dd2
AS
3778 if (!dst_pte) {
3779 ret = -ENOMEM;
3780 break;
3781 }
c5c99429 3782
5e41540c
MK
3783 /*
3784 * If the pagetables are shared don't copy or take references.
3785 * dst_pte == src_pte is the common case of src/dest sharing.
3786 *
3787 * However, src could have 'unshared' and dst shares with
3788 * another vma. If dst_pte !none, this implies sharing.
3789 * Check here before taking page table lock, and once again
3790 * after taking the lock below.
3791 */
3792 dst_entry = huge_ptep_get(dst_pte);
3793 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
c5c99429
LW
3794 continue;
3795
cb900f41
KS
3796 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3797 src_ptl = huge_pte_lockptr(h, src, src_pte);
3798 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4a705fef 3799 entry = huge_ptep_get(src_pte);
5e41540c 3800 dst_entry = huge_ptep_get(dst_pte);
4eae4efa 3801again:
5e41540c
MK
3802 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3803 /*
3804 * Skip if src entry none. Also, skip in the
3805 * unlikely case dst entry !none as this implies
3806 * sharing with another vma.
3807 */
4a705fef
NH
3808 ;
3809 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3810 is_hugetlb_entry_hwpoisoned(entry))) {
3811 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3812
3813 if (is_write_migration_entry(swp_entry) && cow) {
3814 /*
3815 * COW mappings require pages in both
3816 * parent and child to be set to read.
3817 */
3818 make_migration_entry_read(&swp_entry);
3819 entry = swp_entry_to_pte(swp_entry);
e5251fd4
PA
3820 set_huge_swap_pte_at(src, addr, src_pte,
3821 entry, sz);
4a705fef 3822 }
e5251fd4 3823 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
4a705fef 3824 } else {
4eae4efa
PX
3825 entry = huge_ptep_get(src_pte);
3826 ptepage = pte_page(entry);
3827 get_page(ptepage);
3828
3829 /*
3830 * This is a rare case where we see pinned hugetlb
3831 * pages while they're prone to COW. We need to do the
3832 * COW earlier during fork.
3833 *
3834 * When pre-allocating the page or copying data, we
3835 * need to be without the pgtable locks since we could
3836 * sleep during the process.
3837 */
3838 if (unlikely(page_needs_cow_for_dma(vma, ptepage))) {
3839 pte_t src_pte_old = entry;
3840 struct page *new;
3841
3842 spin_unlock(src_ptl);
3843 spin_unlock(dst_ptl);
3844 /* Do not use reserve as it's private owned */
3845 new = alloc_huge_page(vma, addr, 1);
3846 if (IS_ERR(new)) {
3847 put_page(ptepage);
3848 ret = PTR_ERR(new);
3849 break;
3850 }
3851 copy_user_huge_page(new, ptepage, addr, vma,
3852 npages);
3853 put_page(ptepage);
3854
3855 /* Install the new huge page if src pte stable */
3856 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3857 src_ptl = huge_pte_lockptr(h, src, src_pte);
3858 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3859 entry = huge_ptep_get(src_pte);
3860 if (!pte_same(src_pte_old, entry)) {
3861 put_page(new);
3862 /* dst_entry won't change as in child */
3863 goto again;
3864 }
3865 hugetlb_install_page(vma, dst_pte, addr, new);
3866 spin_unlock(src_ptl);
3867 spin_unlock(dst_ptl);
3868 continue;
3869 }
3870
34ee645e 3871 if (cow) {
0f10851e
JG
3872 /*
3873 * No need to notify as we are downgrading page
3874 * table protection not changing it to point
3875 * to a new page.
3876 *
ad56b738 3877 * See Documentation/vm/mmu_notifier.rst
0f10851e 3878 */
7f2e9525 3879 huge_ptep_set_wrprotect(src, addr, src_pte);
34ee645e 3880 }
4eae4efa 3881
53f9263b 3882 page_dup_rmap(ptepage, true);
1c59827d 3883 set_huge_pte_at(dst, addr, dst_pte, entry);
4eae4efa 3884 hugetlb_count_add(npages, dst);
1c59827d 3885 }
cb900f41
KS
3886 spin_unlock(src_ptl);
3887 spin_unlock(dst_ptl);
63551ae0 3888 }
63551ae0 3889
e8569dd2 3890 if (cow)
ac46d4f3 3891 mmu_notifier_invalidate_range_end(&range);
c0d0381a
MK
3892 else
3893 i_mmap_unlock_read(mapping);
e8569dd2
AS
3894
3895 return ret;
63551ae0
DG
3896}
3897
24669e58
AK
3898void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3899 unsigned long start, unsigned long end,
3900 struct page *ref_page)
63551ae0
DG
3901{
3902 struct mm_struct *mm = vma->vm_mm;
3903 unsigned long address;
c7546f8f 3904 pte_t *ptep;
63551ae0 3905 pte_t pte;
cb900f41 3906 spinlock_t *ptl;
63551ae0 3907 struct page *page;
a5516438
AK
3908 struct hstate *h = hstate_vma(vma);
3909 unsigned long sz = huge_page_size(h);
ac46d4f3 3910 struct mmu_notifier_range range;
a5516438 3911
63551ae0 3912 WARN_ON(!is_vm_hugetlb_page(vma));
a5516438
AK
3913 BUG_ON(start & ~huge_page_mask(h));
3914 BUG_ON(end & ~huge_page_mask(h));
63551ae0 3915
07e32661
AK
3916 /*
3917 * This is a hugetlb vma, all the pte entries should point
3918 * to huge page.
3919 */
ed6a7935 3920 tlb_change_page_size(tlb, sz);
24669e58 3921 tlb_start_vma(tlb, vma);
dff11abe
MK
3922
3923 /*
3924 * If sharing possible, alert mmu notifiers of worst case.
3925 */
6f4f13e8
JG
3926 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3927 end);
ac46d4f3
JG
3928 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3929 mmu_notifier_invalidate_range_start(&range);
569f48b8 3930 address = start;
569f48b8 3931 for (; address < end; address += sz) {
7868a208 3932 ptep = huge_pte_offset(mm, address, sz);
4c887265 3933 if (!ptep)
c7546f8f
DG
3934 continue;
3935
cb900f41 3936 ptl = huge_pte_lock(h, mm, ptep);
34ae204f 3937 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
31d49da5 3938 spin_unlock(ptl);
dff11abe
MK
3939 /*
3940 * We just unmapped a page of PMDs by clearing a PUD.
3941 * The caller's TLB flush range should cover this area.
3942 */
31d49da5
AK
3943 continue;
3944 }
39dde65c 3945
6629326b 3946 pte = huge_ptep_get(ptep);
31d49da5
AK
3947 if (huge_pte_none(pte)) {
3948 spin_unlock(ptl);
3949 continue;
3950 }
6629326b
HD
3951
3952 /*
9fbc1f63
NH
3953 * Migrating hugepage or HWPoisoned hugepage is already
3954 * unmapped and its refcount is dropped, so just clear pte here.
6629326b 3955 */
9fbc1f63 3956 if (unlikely(!pte_present(pte))) {
9386fac3 3957 huge_pte_clear(mm, address, ptep, sz);
31d49da5
AK
3958 spin_unlock(ptl);
3959 continue;
8c4894c6 3960 }
6629326b
HD
3961
3962 page = pte_page(pte);
04f2cbe3
MG
3963 /*
3964 * If a reference page is supplied, it is because a specific
3965 * page is being unmapped, not a range. Ensure the page we
3966 * are about to unmap is the actual page of interest.
3967 */
3968 if (ref_page) {
31d49da5
AK
3969 if (page != ref_page) {
3970 spin_unlock(ptl);
3971 continue;
3972 }
04f2cbe3
MG
3973 /*
3974 * Mark the VMA as having unmapped its page so that
3975 * future faults in this VMA will fail rather than
3976 * looking like data was lost
3977 */
3978 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3979 }
3980
c7546f8f 3981 pte = huge_ptep_get_and_clear(mm, address, ptep);
b528e4b6 3982 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
106c992a 3983 if (huge_pte_dirty(pte))
6649a386 3984 set_page_dirty(page);
9e81130b 3985
5d317b2b 3986 hugetlb_count_sub(pages_per_huge_page(h), mm);
d281ee61 3987 page_remove_rmap(page, true);
31d49da5 3988
cb900f41 3989 spin_unlock(ptl);
e77b0852 3990 tlb_remove_page_size(tlb, page, huge_page_size(h));
31d49da5
AK
3991 /*
3992 * Bail out after unmapping reference page if supplied
3993 */
3994 if (ref_page)
3995 break;
fe1668ae 3996 }
ac46d4f3 3997 mmu_notifier_invalidate_range_end(&range);
24669e58 3998 tlb_end_vma(tlb, vma);
1da177e4 3999}
63551ae0 4000
d833352a
MG
4001void __unmap_hugepage_range_final(struct mmu_gather *tlb,
4002 struct vm_area_struct *vma, unsigned long start,
4003 unsigned long end, struct page *ref_page)
4004{
4005 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
4006
4007 /*
4008 * Clear this flag so that x86's huge_pmd_share page_table_shareable
4009 * test will fail on a vma being torn down, and not grab a page table
4010 * on its way out. We're lucky that the flag has such an appropriate
4011 * name, and can in fact be safely cleared here. We could clear it
4012 * before the __unmap_hugepage_range above, but all that's necessary
c8c06efa 4013 * is to clear it before releasing the i_mmap_rwsem. This works
d833352a 4014 * because in the context this is called, the VMA is about to be
c8c06efa 4015 * destroyed and the i_mmap_rwsem is held.
d833352a
MG
4016 */
4017 vma->vm_flags &= ~VM_MAYSHARE;
4018}
4019
502717f4 4020void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
04f2cbe3 4021 unsigned long end, struct page *ref_page)
502717f4 4022{
24669e58 4023 struct mmu_gather tlb;
dff11abe 4024
a72afd87 4025 tlb_gather_mmu(&tlb, vma->vm_mm);
24669e58 4026 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
ae8eba8b 4027 tlb_finish_mmu(&tlb);
502717f4
CK
4028}
4029
04f2cbe3
MG
4030/*
4031 * This is called when the original mapper is failing to COW a MAP_PRIVATE
578b7725 4032 * mapping it owns the reserve page for. The intention is to unmap the page
04f2cbe3
MG
4033 * from other VMAs and let the children be SIGKILLed if they are faulting the
4034 * same region.
4035 */
2f4612af
DB
4036static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
4037 struct page *page, unsigned long address)
04f2cbe3 4038{
7526674d 4039 struct hstate *h = hstate_vma(vma);
04f2cbe3
MG
4040 struct vm_area_struct *iter_vma;
4041 struct address_space *mapping;
04f2cbe3
MG
4042 pgoff_t pgoff;
4043
4044 /*
4045 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
4046 * from page cache lookup which is in HPAGE_SIZE units.
4047 */
7526674d 4048 address = address & huge_page_mask(h);
36e4f20a
MH
4049 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
4050 vma->vm_pgoff;
93c76a3d 4051 mapping = vma->vm_file->f_mapping;
04f2cbe3 4052
4eb2b1dc
MG
4053 /*
4054 * Take the mapping lock for the duration of the table walk. As
4055 * this mapping should be shared between all the VMAs,
4056 * __unmap_hugepage_range() is called as the lock is already held
4057 */
83cde9e8 4058 i_mmap_lock_write(mapping);
6b2dbba8 4059 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
04f2cbe3
MG
4060 /* Do not unmap the current VMA */
4061 if (iter_vma == vma)
4062 continue;
4063
2f84a899
MG
4064 /*
4065 * Shared VMAs have their own reserves and do not affect
4066 * MAP_PRIVATE accounting but it is possible that a shared
4067 * VMA is using the same page so check and skip such VMAs.
4068 */
4069 if (iter_vma->vm_flags & VM_MAYSHARE)
4070 continue;
4071
04f2cbe3
MG
4072 /*
4073 * Unmap the page from other VMAs without their own reserves.
4074 * They get marked to be SIGKILLed if they fault in these
4075 * areas. This is because a future no-page fault on this VMA
4076 * could insert a zeroed page instead of the data existing
4077 * from the time of fork. This would look like data corruption
4078 */
4079 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
24669e58
AK
4080 unmap_hugepage_range(iter_vma, address,
4081 address + huge_page_size(h), page);
04f2cbe3 4082 }
83cde9e8 4083 i_mmap_unlock_write(mapping);
04f2cbe3
MG
4084}
4085
0fe6e20b
NH
4086/*
4087 * Hugetlb_cow() should be called with page lock of the original hugepage held.
ef009b25
MH
4088 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
4089 * cannot race with other handlers or page migration.
4090 * Keep the pte_same checks anyway to make transition from the mutex easier.
0fe6e20b 4091 */
2b740303 4092static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
974e6d66 4093 unsigned long address, pte_t *ptep,
3999f52e 4094 struct page *pagecache_page, spinlock_t *ptl)
1e8f889b 4095{
3999f52e 4096 pte_t pte;
a5516438 4097 struct hstate *h = hstate_vma(vma);
1e8f889b 4098 struct page *old_page, *new_page;
2b740303
SJ
4099 int outside_reserve = 0;
4100 vm_fault_t ret = 0;
974e6d66 4101 unsigned long haddr = address & huge_page_mask(h);
ac46d4f3 4102 struct mmu_notifier_range range;
1e8f889b 4103
3999f52e 4104 pte = huge_ptep_get(ptep);
1e8f889b
DG
4105 old_page = pte_page(pte);
4106
04f2cbe3 4107retry_avoidcopy:
1e8f889b
DG
4108 /* If no-one else is actually using this page, avoid the copy
4109 * and just make the page writable */
37a2140d 4110 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
5a49973d 4111 page_move_anon_rmap(old_page, vma);
5b7a1d40 4112 set_huge_ptep_writable(vma, haddr, ptep);
83c54070 4113 return 0;
1e8f889b
DG
4114 }
4115
04f2cbe3
MG
4116 /*
4117 * If the process that created a MAP_PRIVATE mapping is about to
4118 * perform a COW due to a shared page count, attempt to satisfy
4119 * the allocation without using the existing reserves. The pagecache
4120 * page is used to determine if the reserve at this address was
4121 * consumed or not. If reserves were used, a partial faulted mapping
4122 * at the time of fork() could consume its reserves on COW instead
4123 * of the full address range.
4124 */
5944d011 4125 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
04f2cbe3
MG
4126 old_page != pagecache_page)
4127 outside_reserve = 1;
4128
09cbfeaf 4129 get_page(old_page);
b76c8cfb 4130
ad4404a2
DB
4131 /*
4132 * Drop page table lock as buddy allocator may be called. It will
4133 * be acquired again before returning to the caller, as expected.
4134 */
cb900f41 4135 spin_unlock(ptl);
5b7a1d40 4136 new_page = alloc_huge_page(vma, haddr, outside_reserve);
1e8f889b 4137
2fc39cec 4138 if (IS_ERR(new_page)) {
04f2cbe3
MG
4139 /*
4140 * If a process owning a MAP_PRIVATE mapping fails to COW,
4141 * it is due to references held by a child and an insufficient
4142 * huge page pool. To guarantee the original mappers
4143 * reliability, unmap the page from child processes. The child
4144 * may get SIGKILLed if it later faults.
4145 */
4146 if (outside_reserve) {
e7dd91c4
MK
4147 struct address_space *mapping = vma->vm_file->f_mapping;
4148 pgoff_t idx;
4149 u32 hash;
4150
09cbfeaf 4151 put_page(old_page);
04f2cbe3 4152 BUG_ON(huge_pte_none(pte));
e7dd91c4
MK
4153 /*
4154 * Drop hugetlb_fault_mutex and i_mmap_rwsem before
4155 * unmapping. unmapping needs to hold i_mmap_rwsem
4156 * in write mode. Dropping i_mmap_rwsem in read mode
4157 * here is OK as COW mappings do not interact with
4158 * PMD sharing.
4159 *
4160 * Reacquire both after unmap operation.
4161 */
4162 idx = vma_hugecache_offset(h, vma, haddr);
4163 hash = hugetlb_fault_mutex_hash(mapping, idx);
4164 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4165 i_mmap_unlock_read(mapping);
4166
5b7a1d40 4167 unmap_ref_private(mm, vma, old_page, haddr);
e7dd91c4
MK
4168
4169 i_mmap_lock_read(mapping);
4170 mutex_lock(&hugetlb_fault_mutex_table[hash]);
2f4612af 4171 spin_lock(ptl);
5b7a1d40 4172 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
2f4612af
DB
4173 if (likely(ptep &&
4174 pte_same(huge_ptep_get(ptep), pte)))
4175 goto retry_avoidcopy;
4176 /*
4177 * race occurs while re-acquiring page table
4178 * lock, and our job is done.
4179 */
4180 return 0;
04f2cbe3
MG
4181 }
4182
2b740303 4183 ret = vmf_error(PTR_ERR(new_page));
ad4404a2 4184 goto out_release_old;
1e8f889b
DG
4185 }
4186
0fe6e20b
NH
4187 /*
4188 * When the original hugepage is shared one, it does not have
4189 * anon_vma prepared.
4190 */
44e2aa93 4191 if (unlikely(anon_vma_prepare(vma))) {
ad4404a2
DB
4192 ret = VM_FAULT_OOM;
4193 goto out_release_all;
44e2aa93 4194 }
0fe6e20b 4195
974e6d66 4196 copy_user_huge_page(new_page, old_page, address, vma,
47ad8475 4197 pages_per_huge_page(h));
0ed361de 4198 __SetPageUptodate(new_page);
1e8f889b 4199
7269f999 4200 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
6f4f13e8 4201 haddr + huge_page_size(h));
ac46d4f3 4202 mmu_notifier_invalidate_range_start(&range);
ad4404a2 4203
b76c8cfb 4204 /*
cb900f41 4205 * Retake the page table lock to check for racing updates
b76c8cfb
LW
4206 * before the page tables are altered
4207 */
cb900f41 4208 spin_lock(ptl);
5b7a1d40 4209 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
a9af0c5d 4210 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
d6995da3 4211 ClearHPageRestoreReserve(new_page);
07443a85 4212
1e8f889b 4213 /* Break COW */
5b7a1d40 4214 huge_ptep_clear_flush(vma, haddr, ptep);
ac46d4f3 4215 mmu_notifier_invalidate_range(mm, range.start, range.end);
5b7a1d40 4216 set_huge_pte_at(mm, haddr, ptep,
1e8f889b 4217 make_huge_pte(vma, new_page, 1));
d281ee61 4218 page_remove_rmap(old_page, true);
5b7a1d40 4219 hugepage_add_new_anon_rmap(new_page, vma, haddr);
8f251a3d 4220 SetHPageMigratable(new_page);
1e8f889b
DG
4221 /* Make the old page be freed below */
4222 new_page = old_page;
4223 }
cb900f41 4224 spin_unlock(ptl);
ac46d4f3 4225 mmu_notifier_invalidate_range_end(&range);
ad4404a2 4226out_release_all:
5b7a1d40 4227 restore_reserve_on_error(h, vma, haddr, new_page);
09cbfeaf 4228 put_page(new_page);
ad4404a2 4229out_release_old:
09cbfeaf 4230 put_page(old_page);
8312034f 4231
ad4404a2
DB
4232 spin_lock(ptl); /* Caller expects lock to be held */
4233 return ret;
1e8f889b
DG
4234}
4235
04f2cbe3 4236/* Return the pagecache page at a given address within a VMA */
a5516438
AK
4237static struct page *hugetlbfs_pagecache_page(struct hstate *h,
4238 struct vm_area_struct *vma, unsigned long address)
04f2cbe3
MG
4239{
4240 struct address_space *mapping;
e7c4b0bf 4241 pgoff_t idx;
04f2cbe3
MG
4242
4243 mapping = vma->vm_file->f_mapping;
a5516438 4244 idx = vma_hugecache_offset(h, vma, address);
04f2cbe3
MG
4245
4246 return find_lock_page(mapping, idx);
4247}
4248
3ae77f43
HD
4249/*
4250 * Return whether there is a pagecache page to back given address within VMA.
4251 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
4252 */
4253static bool hugetlbfs_pagecache_present(struct hstate *h,
2a15efc9
HD
4254 struct vm_area_struct *vma, unsigned long address)
4255{
4256 struct address_space *mapping;
4257 pgoff_t idx;
4258 struct page *page;
4259
4260 mapping = vma->vm_file->f_mapping;
4261 idx = vma_hugecache_offset(h, vma, address);
4262
4263 page = find_get_page(mapping, idx);
4264 if (page)
4265 put_page(page);
4266 return page != NULL;
4267}
4268
ab76ad54
MK
4269int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
4270 pgoff_t idx)
4271{
4272 struct inode *inode = mapping->host;
4273 struct hstate *h = hstate_inode(inode);
4274 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
4275
4276 if (err)
4277 return err;
d6995da3 4278 ClearHPageRestoreReserve(page);
ab76ad54 4279
22146c3c
MK
4280 /*
4281 * set page dirty so that it will not be removed from cache/file
4282 * by non-hugetlbfs specific code paths.
4283 */
4284 set_page_dirty(page);
4285
ab76ad54
MK
4286 spin_lock(&inode->i_lock);
4287 inode->i_blocks += blocks_per_huge_page(h);
4288 spin_unlock(&inode->i_lock);
4289 return 0;
4290}
4291
2b740303
SJ
4292static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
4293 struct vm_area_struct *vma,
4294 struct address_space *mapping, pgoff_t idx,
4295 unsigned long address, pte_t *ptep, unsigned int flags)
ac9b9c66 4296{
a5516438 4297 struct hstate *h = hstate_vma(vma);
2b740303 4298 vm_fault_t ret = VM_FAULT_SIGBUS;
409eb8c2 4299 int anon_rmap = 0;
4c887265 4300 unsigned long size;
4c887265 4301 struct page *page;
1e8f889b 4302 pte_t new_pte;
cb900f41 4303 spinlock_t *ptl;
285b8dca 4304 unsigned long haddr = address & huge_page_mask(h);
cb6acd01 4305 bool new_page = false;
4c887265 4306
04f2cbe3
MG
4307 /*
4308 * Currently, we are forced to kill the process in the event the
4309 * original mapper has unmapped pages from the child due to a failed
25985edc 4310 * COW. Warn that such a situation has occurred as it may not be obvious
04f2cbe3
MG
4311 */
4312 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
910154d5 4313 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
ffb22af5 4314 current->pid);
04f2cbe3
MG
4315 return ret;
4316 }
4317
4c887265 4318 /*
87bf91d3
MK
4319 * We can not race with truncation due to holding i_mmap_rwsem.
4320 * i_size is modified when holding i_mmap_rwsem, so check here
4321 * once for faults beyond end of file.
4c887265 4322 */
87bf91d3
MK
4323 size = i_size_read(mapping->host) >> huge_page_shift(h);
4324 if (idx >= size)
4325 goto out;
4326
6bda666a
CL
4327retry:
4328 page = find_lock_page(mapping, idx);
4329 if (!page) {
1a1aad8a
MK
4330 /*
4331 * Check for page in userfault range
4332 */
4333 if (userfaultfd_missing(vma)) {
4334 u32 hash;
4335 struct vm_fault vmf = {
4336 .vma = vma,
285b8dca 4337 .address = haddr,
1a1aad8a
MK
4338 .flags = flags,
4339 /*
4340 * Hard to debug if it ends up being
4341 * used by a callee that assumes
4342 * something about the other
4343 * uninitialized fields... same as in
4344 * memory.c
4345 */
4346 };
4347
4348 /*
c0d0381a
MK
4349 * hugetlb_fault_mutex and i_mmap_rwsem must be
4350 * dropped before handling userfault. Reacquire
4351 * after handling fault to make calling code simpler.
1a1aad8a 4352 */
188b04a7 4353 hash = hugetlb_fault_mutex_hash(mapping, idx);
1a1aad8a 4354 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
c0d0381a 4355 i_mmap_unlock_read(mapping);
1a1aad8a 4356 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
c0d0381a 4357 i_mmap_lock_read(mapping);
1a1aad8a
MK
4358 mutex_lock(&hugetlb_fault_mutex_table[hash]);
4359 goto out;
4360 }
4361
285b8dca 4362 page = alloc_huge_page(vma, haddr, 0);
2fc39cec 4363 if (IS_ERR(page)) {
4643d67e
MK
4364 /*
4365 * Returning error will result in faulting task being
4366 * sent SIGBUS. The hugetlb fault mutex prevents two
4367 * tasks from racing to fault in the same page which
4368 * could result in false unable to allocate errors.
4369 * Page migration does not take the fault mutex, but
4370 * does a clear then write of pte's under page table
4371 * lock. Page fault code could race with migration,
4372 * notice the clear pte and try to allocate a page
4373 * here. Before returning error, get ptl and make
4374 * sure there really is no pte entry.
4375 */
4376 ptl = huge_pte_lock(h, mm, ptep);
4377 if (!huge_pte_none(huge_ptep_get(ptep))) {
4378 ret = 0;
4379 spin_unlock(ptl);
4380 goto out;
4381 }
4382 spin_unlock(ptl);
2b740303 4383 ret = vmf_error(PTR_ERR(page));
6bda666a
CL
4384 goto out;
4385 }
47ad8475 4386 clear_huge_page(page, address, pages_per_huge_page(h));
0ed361de 4387 __SetPageUptodate(page);
cb6acd01 4388 new_page = true;
ac9b9c66 4389
f83a275d 4390 if (vma->vm_flags & VM_MAYSHARE) {
ab76ad54 4391 int err = huge_add_to_page_cache(page, mapping, idx);
6bda666a
CL
4392 if (err) {
4393 put_page(page);
6bda666a
CL
4394 if (err == -EEXIST)
4395 goto retry;
4396 goto out;
4397 }
23be7468 4398 } else {
6bda666a 4399 lock_page(page);
0fe6e20b
NH
4400 if (unlikely(anon_vma_prepare(vma))) {
4401 ret = VM_FAULT_OOM;
4402 goto backout_unlocked;
4403 }
409eb8c2 4404 anon_rmap = 1;
23be7468 4405 }
0fe6e20b 4406 } else {
998b4382
NH
4407 /*
4408 * If memory error occurs between mmap() and fault, some process
4409 * don't have hwpoisoned swap entry for errored virtual address.
4410 * So we need to block hugepage fault by PG_hwpoison bit check.
4411 */
4412 if (unlikely(PageHWPoison(page))) {
0eb98f15 4413 ret = VM_FAULT_HWPOISON_LARGE |
972dc4de 4414 VM_FAULT_SET_HINDEX(hstate_index(h));
998b4382
NH
4415 goto backout_unlocked;
4416 }
6bda666a 4417 }
1e8f889b 4418
57303d80
AW
4419 /*
4420 * If we are going to COW a private mapping later, we examine the
4421 * pending reservations for this page now. This will ensure that
4422 * any allocations necessary to record that reservation occur outside
4423 * the spinlock.
4424 */
5e911373 4425 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
285b8dca 4426 if (vma_needs_reservation(h, vma, haddr) < 0) {
2b26736c
AW
4427 ret = VM_FAULT_OOM;
4428 goto backout_unlocked;
4429 }
5e911373 4430 /* Just decrements count, does not deallocate */
285b8dca 4431 vma_end_reservation(h, vma, haddr);
5e911373 4432 }
57303d80 4433
8bea8052 4434 ptl = huge_pte_lock(h, mm, ptep);
83c54070 4435 ret = 0;
7f2e9525 4436 if (!huge_pte_none(huge_ptep_get(ptep)))
4c887265
AL
4437 goto backout;
4438
07443a85 4439 if (anon_rmap) {
d6995da3 4440 ClearHPageRestoreReserve(page);
285b8dca 4441 hugepage_add_new_anon_rmap(page, vma, haddr);
ac714904 4442 } else
53f9263b 4443 page_dup_rmap(page, true);
1e8f889b
DG
4444 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4445 && (vma->vm_flags & VM_SHARED)));
285b8dca 4446 set_huge_pte_at(mm, haddr, ptep, new_pte);
1e8f889b 4447
5d317b2b 4448 hugetlb_count_add(pages_per_huge_page(h), mm);
788c7df4 4449 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
1e8f889b 4450 /* Optimization, do the COW without a second fault */
974e6d66 4451 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
1e8f889b
DG
4452 }
4453
cb900f41 4454 spin_unlock(ptl);
cb6acd01
MK
4455
4456 /*
8f251a3d
MK
4457 * Only set HPageMigratable in newly allocated pages. Existing pages
4458 * found in the pagecache may not have HPageMigratableset if they have
4459 * been isolated for migration.
cb6acd01
MK
4460 */
4461 if (new_page)
8f251a3d 4462 SetHPageMigratable(page);
cb6acd01 4463
4c887265
AL
4464 unlock_page(page);
4465out:
ac9b9c66 4466 return ret;
4c887265
AL
4467
4468backout:
cb900f41 4469 spin_unlock(ptl);
2b26736c 4470backout_unlocked:
4c887265 4471 unlock_page(page);
285b8dca 4472 restore_reserve_on_error(h, vma, haddr, page);
4c887265
AL
4473 put_page(page);
4474 goto out;
ac9b9c66
HD
4475}
4476
8382d914 4477#ifdef CONFIG_SMP
188b04a7 4478u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
8382d914
DB
4479{
4480 unsigned long key[2];
4481 u32 hash;
4482
1b426bac
MK
4483 key[0] = (unsigned long) mapping;
4484 key[1] = idx;
8382d914 4485
55254636 4486 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
8382d914
DB
4487
4488 return hash & (num_fault_mutexes - 1);
4489}
4490#else
4491/*
6c26d310 4492 * For uniprocessor systems we always use a single mutex, so just
8382d914
DB
4493 * return 0 and avoid the hashing overhead.
4494 */
188b04a7 4495u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
8382d914
DB
4496{
4497 return 0;
4498}
4499#endif
4500
2b740303 4501vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 4502 unsigned long address, unsigned int flags)
86e5216f 4503{
8382d914 4504 pte_t *ptep, entry;
cb900f41 4505 spinlock_t *ptl;
2b740303 4506 vm_fault_t ret;
8382d914
DB
4507 u32 hash;
4508 pgoff_t idx;
0fe6e20b 4509 struct page *page = NULL;
57303d80 4510 struct page *pagecache_page = NULL;
a5516438 4511 struct hstate *h = hstate_vma(vma);
8382d914 4512 struct address_space *mapping;
0f792cf9 4513 int need_wait_lock = 0;
285b8dca 4514 unsigned long haddr = address & huge_page_mask(h);
86e5216f 4515
285b8dca 4516 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
fd6a03ed 4517 if (ptep) {
c0d0381a
MK
4518 /*
4519 * Since we hold no locks, ptep could be stale. That is
4520 * OK as we are only making decisions based on content and
4521 * not actually modifying content here.
4522 */
fd6a03ed 4523 entry = huge_ptep_get(ptep);
290408d4 4524 if (unlikely(is_hugetlb_entry_migration(entry))) {
cb900f41 4525 migration_entry_wait_huge(vma, mm, ptep);
290408d4
NH
4526 return 0;
4527 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
32f84528 4528 return VM_FAULT_HWPOISON_LARGE |
972dc4de 4529 VM_FAULT_SET_HINDEX(hstate_index(h));
fd6a03ed
NH
4530 }
4531
c0d0381a
MK
4532 /*
4533 * Acquire i_mmap_rwsem before calling huge_pte_alloc and hold
87bf91d3
MK
4534 * until finished with ptep. This serves two purposes:
4535 * 1) It prevents huge_pmd_unshare from being called elsewhere
4536 * and making the ptep no longer valid.
4537 * 2) It synchronizes us with i_size modifications during truncation.
c0d0381a
MK
4538 *
4539 * ptep could have already be assigned via huge_pte_offset. That
4540 * is OK, as huge_pte_alloc will return the same value unless
4541 * something has changed.
4542 */
8382d914 4543 mapping = vma->vm_file->f_mapping;
c0d0381a
MK
4544 i_mmap_lock_read(mapping);
4545 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4546 if (!ptep) {
4547 i_mmap_unlock_read(mapping);
4548 return VM_FAULT_OOM;
4549 }
8382d914 4550
3935baa9
DG
4551 /*
4552 * Serialize hugepage allocation and instantiation, so that we don't
4553 * get spurious allocation failures if two CPUs race to instantiate
4554 * the same page in the page cache.
4555 */
c0d0381a 4556 idx = vma_hugecache_offset(h, vma, haddr);
188b04a7 4557 hash = hugetlb_fault_mutex_hash(mapping, idx);
c672c7f2 4558 mutex_lock(&hugetlb_fault_mutex_table[hash]);
8382d914 4559
7f2e9525
GS
4560 entry = huge_ptep_get(ptep);
4561 if (huge_pte_none(entry)) {
8382d914 4562 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
b4d1d99f 4563 goto out_mutex;
3935baa9 4564 }
86e5216f 4565
83c54070 4566 ret = 0;
1e8f889b 4567
0f792cf9
NH
4568 /*
4569 * entry could be a migration/hwpoison entry at this point, so this
4570 * check prevents the kernel from going below assuming that we have
7c8de358
EP
4571 * an active hugepage in pagecache. This goto expects the 2nd page
4572 * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
4573 * properly handle it.
0f792cf9
NH
4574 */
4575 if (!pte_present(entry))
4576 goto out_mutex;
4577
57303d80
AW
4578 /*
4579 * If we are going to COW the mapping later, we examine the pending
4580 * reservations for this page now. This will ensure that any
4581 * allocations necessary to record that reservation occur outside the
4582 * spinlock. For private mappings, we also lookup the pagecache
4583 * page now as it is used to determine if a reservation has been
4584 * consumed.
4585 */
106c992a 4586 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
285b8dca 4587 if (vma_needs_reservation(h, vma, haddr) < 0) {
2b26736c 4588 ret = VM_FAULT_OOM;
b4d1d99f 4589 goto out_mutex;
2b26736c 4590 }
5e911373 4591 /* Just decrements count, does not deallocate */
285b8dca 4592 vma_end_reservation(h, vma, haddr);
57303d80 4593
f83a275d 4594 if (!(vma->vm_flags & VM_MAYSHARE))
57303d80 4595 pagecache_page = hugetlbfs_pagecache_page(h,
285b8dca 4596 vma, haddr);
57303d80
AW
4597 }
4598
0f792cf9
NH
4599 ptl = huge_pte_lock(h, mm, ptep);
4600
4601 /* Check for a racing update before calling hugetlb_cow */
4602 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4603 goto out_ptl;
4604
56c9cfb1
NH
4605 /*
4606 * hugetlb_cow() requires page locks of pte_page(entry) and
4607 * pagecache_page, so here we need take the former one
4608 * when page != pagecache_page or !pagecache_page.
56c9cfb1
NH
4609 */
4610 page = pte_page(entry);
4611 if (page != pagecache_page)
0f792cf9
NH
4612 if (!trylock_page(page)) {
4613 need_wait_lock = 1;
4614 goto out_ptl;
4615 }
b4d1d99f 4616
0f792cf9 4617 get_page(page);
b4d1d99f 4618
788c7df4 4619 if (flags & FAULT_FLAG_WRITE) {
106c992a 4620 if (!huge_pte_write(entry)) {
974e6d66 4621 ret = hugetlb_cow(mm, vma, address, ptep,
3999f52e 4622 pagecache_page, ptl);
0f792cf9 4623 goto out_put_page;
b4d1d99f 4624 }
106c992a 4625 entry = huge_pte_mkdirty(entry);
b4d1d99f
DG
4626 }
4627 entry = pte_mkyoung(entry);
285b8dca 4628 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
788c7df4 4629 flags & FAULT_FLAG_WRITE))
285b8dca 4630 update_mmu_cache(vma, haddr, ptep);
0f792cf9
NH
4631out_put_page:
4632 if (page != pagecache_page)
4633 unlock_page(page);
4634 put_page(page);
cb900f41
KS
4635out_ptl:
4636 spin_unlock(ptl);
57303d80
AW
4637
4638 if (pagecache_page) {
4639 unlock_page(pagecache_page);
4640 put_page(pagecache_page);
4641 }
b4d1d99f 4642out_mutex:
c672c7f2 4643 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
c0d0381a 4644 i_mmap_unlock_read(mapping);
0f792cf9
NH
4645 /*
4646 * Generally it's safe to hold refcount during waiting page lock. But
4647 * here we just wait to defer the next page fault to avoid busy loop and
4648 * the page is not used after unlocked before returning from the current
4649 * page fault. So we are safe from accessing freed page, even if we wait
4650 * here without taking refcount.
4651 */
4652 if (need_wait_lock)
4653 wait_on_page_locked(page);
1e8f889b 4654 return ret;
86e5216f
AL
4655}
4656
8fb5debc
MK
4657/*
4658 * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
4659 * modifications for huge pages.
4660 */
4661int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4662 pte_t *dst_pte,
4663 struct vm_area_struct *dst_vma,
4664 unsigned long dst_addr,
4665 unsigned long src_addr,
4666 struct page **pagep)
4667{
1e392147
AA
4668 struct address_space *mapping;
4669 pgoff_t idx;
4670 unsigned long size;
1c9e8def 4671 int vm_shared = dst_vma->vm_flags & VM_SHARED;
8fb5debc
MK
4672 struct hstate *h = hstate_vma(dst_vma);
4673 pte_t _dst_pte;
4674 spinlock_t *ptl;
4675 int ret;
4676 struct page *page;
4677
4678 if (!*pagep) {
4679 ret = -ENOMEM;
4680 page = alloc_huge_page(dst_vma, dst_addr, 0);
4681 if (IS_ERR(page))
4682 goto out;
4683
4684 ret = copy_huge_page_from_user(page,
4685 (const void __user *) src_addr,
810a56b9 4686 pages_per_huge_page(h), false);
8fb5debc 4687
c1e8d7c6 4688 /* fallback to copy_from_user outside mmap_lock */
8fb5debc 4689 if (unlikely(ret)) {
9e368259 4690 ret = -ENOENT;
8fb5debc
MK
4691 *pagep = page;
4692 /* don't free the page */
4693 goto out;
4694 }
4695 } else {
4696 page = *pagep;
4697 *pagep = NULL;
4698 }
4699
4700 /*
4701 * The memory barrier inside __SetPageUptodate makes sure that
4702 * preceding stores to the page contents become visible before
4703 * the set_pte_at() write.
4704 */
4705 __SetPageUptodate(page);
8fb5debc 4706
1e392147
AA
4707 mapping = dst_vma->vm_file->f_mapping;
4708 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4709
1c9e8def
MK
4710 /*
4711 * If shared, add to page cache
4712 */
4713 if (vm_shared) {
1e392147
AA
4714 size = i_size_read(mapping->host) >> huge_page_shift(h);
4715 ret = -EFAULT;
4716 if (idx >= size)
4717 goto out_release_nounlock;
1c9e8def 4718
1e392147
AA
4719 /*
4720 * Serialization between remove_inode_hugepages() and
4721 * huge_add_to_page_cache() below happens through the
4722 * hugetlb_fault_mutex_table that here must be hold by
4723 * the caller.
4724 */
1c9e8def
MK
4725 ret = huge_add_to_page_cache(page, mapping, idx);
4726 if (ret)
4727 goto out_release_nounlock;
4728 }
4729
8fb5debc
MK
4730 ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4731 spin_lock(ptl);
4732
1e392147
AA
4733 /*
4734 * Recheck the i_size after holding PT lock to make sure not
4735 * to leave any page mapped (as page_mapped()) beyond the end
4736 * of the i_size (remove_inode_hugepages() is strict about
4737 * enforcing that). If we bail out here, we'll also leave a
4738 * page in the radix tree in the vm_shared case beyond the end
4739 * of the i_size, but remove_inode_hugepages() will take care
4740 * of it as soon as we drop the hugetlb_fault_mutex_table.
4741 */
4742 size = i_size_read(mapping->host) >> huge_page_shift(h);
4743 ret = -EFAULT;
4744 if (idx >= size)
4745 goto out_release_unlock;
4746
8fb5debc
MK
4747 ret = -EEXIST;
4748 if (!huge_pte_none(huge_ptep_get(dst_pte)))
4749 goto out_release_unlock;
4750
1c9e8def
MK
4751 if (vm_shared) {
4752 page_dup_rmap(page, true);
4753 } else {
d6995da3 4754 ClearHPageRestoreReserve(page);
1c9e8def
MK
4755 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4756 }
8fb5debc
MK
4757
4758 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4759 if (dst_vma->vm_flags & VM_WRITE)
4760 _dst_pte = huge_pte_mkdirty(_dst_pte);
4761 _dst_pte = pte_mkyoung(_dst_pte);
4762
4763 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4764
4765 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4766 dst_vma->vm_flags & VM_WRITE);
4767 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4768
4769 /* No need to invalidate - it was non-present before */
4770 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4771
4772 spin_unlock(ptl);
8f251a3d 4773 SetHPageMigratable(page);
1c9e8def
MK
4774 if (vm_shared)
4775 unlock_page(page);
8fb5debc
MK
4776 ret = 0;
4777out:
4778 return ret;
4779out_release_unlock:
4780 spin_unlock(ptl);
1c9e8def
MK
4781 if (vm_shared)
4782 unlock_page(page);
5af10dfd 4783out_release_nounlock:
8fb5debc
MK
4784 put_page(page);
4785 goto out;
4786}
4787
82e5d378
JM
4788static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma,
4789 int refs, struct page **pages,
4790 struct vm_area_struct **vmas)
4791{
4792 int nr;
4793
4794 for (nr = 0; nr < refs; nr++) {
4795 if (likely(pages))
4796 pages[nr] = mem_map_offset(page, nr);
4797 if (vmas)
4798 vmas[nr] = vma;
4799 }
4800}
4801
28a35716
ML
4802long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4803 struct page **pages, struct vm_area_struct **vmas,
4804 unsigned long *position, unsigned long *nr_pages,
4f6da934 4805 long i, unsigned int flags, int *locked)
63551ae0 4806{
d5d4b0aa
CK
4807 unsigned long pfn_offset;
4808 unsigned long vaddr = *position;
28a35716 4809 unsigned long remainder = *nr_pages;
a5516438 4810 struct hstate *h = hstate_vma(vma);
0fa5bc40 4811 int err = -EFAULT, refs;
63551ae0 4812
63551ae0 4813 while (vaddr < vma->vm_end && remainder) {
4c887265 4814 pte_t *pte;
cb900f41 4815 spinlock_t *ptl = NULL;
2a15efc9 4816 int absent;
4c887265 4817 struct page *page;
63551ae0 4818
02057967
DR
4819 /*
4820 * If we have a pending SIGKILL, don't keep faulting pages and
4821 * potentially allocating memory.
4822 */
fa45f116 4823 if (fatal_signal_pending(current)) {
02057967
DR
4824 remainder = 0;
4825 break;
4826 }
4827
4c887265
AL
4828 /*
4829 * Some archs (sparc64, sh*) have multiple pte_ts to
2a15efc9 4830 * each hugepage. We have to make sure we get the
4c887265 4831 * first, for the page indexing below to work.
cb900f41
KS
4832 *
4833 * Note that page table lock is not held when pte is null.
4c887265 4834 */
7868a208
PA
4835 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4836 huge_page_size(h));
cb900f41
KS
4837 if (pte)
4838 ptl = huge_pte_lock(h, mm, pte);
2a15efc9
HD
4839 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4840
4841 /*
4842 * When coredumping, it suits get_dump_page if we just return
3ae77f43
HD
4843 * an error where there's an empty slot with no huge pagecache
4844 * to back it. This way, we avoid allocating a hugepage, and
4845 * the sparse dumpfile avoids allocating disk blocks, but its
4846 * huge holes still show up with zeroes where they need to be.
2a15efc9 4847 */
3ae77f43
HD
4848 if (absent && (flags & FOLL_DUMP) &&
4849 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
cb900f41
KS
4850 if (pte)
4851 spin_unlock(ptl);
2a15efc9
HD
4852 remainder = 0;
4853 break;
4854 }
63551ae0 4855
9cc3a5bd
NH
4856 /*
4857 * We need call hugetlb_fault for both hugepages under migration
4858 * (in which case hugetlb_fault waits for the migration,) and
4859 * hwpoisoned hugepages (in which case we need to prevent the
4860 * caller from accessing to them.) In order to do this, we use
4861 * here is_swap_pte instead of is_hugetlb_entry_migration and
4862 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4863 * both cases, and because we can't follow correct pages
4864 * directly from any kind of swap entries.
4865 */
4866 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
106c992a
GS
4867 ((flags & FOLL_WRITE) &&
4868 !huge_pte_write(huge_ptep_get(pte)))) {
2b740303 4869 vm_fault_t ret;
87ffc118 4870 unsigned int fault_flags = 0;
63551ae0 4871
cb900f41
KS
4872 if (pte)
4873 spin_unlock(ptl);
87ffc118
AA
4874 if (flags & FOLL_WRITE)
4875 fault_flags |= FAULT_FLAG_WRITE;
4f6da934 4876 if (locked)
71335f37
PX
4877 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4878 FAULT_FLAG_KILLABLE;
87ffc118
AA
4879 if (flags & FOLL_NOWAIT)
4880 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4881 FAULT_FLAG_RETRY_NOWAIT;
4882 if (flags & FOLL_TRIED) {
4426e945
PX
4883 /*
4884 * Note: FAULT_FLAG_ALLOW_RETRY and
4885 * FAULT_FLAG_TRIED can co-exist
4886 */
87ffc118
AA
4887 fault_flags |= FAULT_FLAG_TRIED;
4888 }
4889 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4890 if (ret & VM_FAULT_ERROR) {
2be7cfed 4891 err = vm_fault_to_errno(ret, flags);
87ffc118
AA
4892 remainder = 0;
4893 break;
4894 }
4895 if (ret & VM_FAULT_RETRY) {
4f6da934 4896 if (locked &&
1ac25013 4897 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4f6da934 4898 *locked = 0;
87ffc118
AA
4899 *nr_pages = 0;
4900 /*
4901 * VM_FAULT_RETRY must not return an
4902 * error, it will return zero
4903 * instead.
4904 *
4905 * No need to update "position" as the
4906 * caller will not check it after
4907 * *nr_pages is set to 0.
4908 */
4909 return i;
4910 }
4911 continue;
4c887265
AL
4912 }
4913
a5516438 4914 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
7f2e9525 4915 page = pte_page(huge_ptep_get(pte));
8fde12ca 4916
acbfb087
ZL
4917 /*
4918 * If subpage information not requested, update counters
4919 * and skip the same_page loop below.
4920 */
4921 if (!pages && !vmas && !pfn_offset &&
4922 (vaddr + huge_page_size(h) < vma->vm_end) &&
4923 (remainder >= pages_per_huge_page(h))) {
4924 vaddr += huge_page_size(h);
4925 remainder -= pages_per_huge_page(h);
4926 i += pages_per_huge_page(h);
4927 spin_unlock(ptl);
4928 continue;
4929 }
4930
82e5d378
JM
4931 refs = min3(pages_per_huge_page(h) - pfn_offset,
4932 (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder);
0fa5bc40 4933
82e5d378
JM
4934 if (pages || vmas)
4935 record_subpages_vmas(mem_map_offset(page, pfn_offset),
4936 vma, refs,
4937 likely(pages) ? pages + i : NULL,
4938 vmas ? vmas + i : NULL);
63551ae0 4939
82e5d378 4940 if (pages) {
0fa5bc40
JM
4941 /*
4942 * try_grab_compound_head() should always succeed here,
4943 * because: a) we hold the ptl lock, and b) we've just
4944 * checked that the huge page is present in the page
4945 * tables. If the huge page is present, then the tail
4946 * pages must also be present. The ptl prevents the
4947 * head page and tail pages from being rearranged in
4948 * any way. So this page must be available at this
4949 * point, unless the page refcount overflowed:
4950 */
82e5d378 4951 if (WARN_ON_ONCE(!try_grab_compound_head(pages[i],
0fa5bc40
JM
4952 refs,
4953 flags))) {
4954 spin_unlock(ptl);
4955 remainder = 0;
4956 err = -ENOMEM;
4957 break;
4958 }
d5d4b0aa 4959 }
82e5d378
JM
4960
4961 vaddr += (refs << PAGE_SHIFT);
4962 remainder -= refs;
4963 i += refs;
4964
cb900f41 4965 spin_unlock(ptl);
63551ae0 4966 }
28a35716 4967 *nr_pages = remainder;
87ffc118
AA
4968 /*
4969 * setting position is actually required only if remainder is
4970 * not zero but it's faster not to add a "if (remainder)"
4971 * branch.
4972 */
63551ae0
DG
4973 *position = vaddr;
4974
2be7cfed 4975 return i ? i : err;
63551ae0 4976}
8f860591 4977
5491ae7b
AK
4978#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4979/*
4980 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4981 * implement this.
4982 */
4983#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4984#endif
4985
7da4d641 4986unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
8f860591
ZY
4987 unsigned long address, unsigned long end, pgprot_t newprot)
4988{
4989 struct mm_struct *mm = vma->vm_mm;
4990 unsigned long start = address;
4991 pte_t *ptep;
4992 pte_t pte;
a5516438 4993 struct hstate *h = hstate_vma(vma);
7da4d641 4994 unsigned long pages = 0;
dff11abe 4995 bool shared_pmd = false;
ac46d4f3 4996 struct mmu_notifier_range range;
dff11abe
MK
4997
4998 /*
4999 * In the case of shared PMDs, the area to flush could be beyond
ac46d4f3 5000 * start/end. Set range.start/range.end to cover the maximum possible
dff11abe
MK
5001 * range if PMD sharing is possible.
5002 */
7269f999
JG
5003 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
5004 0, vma, mm, start, end);
ac46d4f3 5005 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
8f860591
ZY
5006
5007 BUG_ON(address >= end);
ac46d4f3 5008 flush_cache_range(vma, range.start, range.end);
8f860591 5009
ac46d4f3 5010 mmu_notifier_invalidate_range_start(&range);
83cde9e8 5011 i_mmap_lock_write(vma->vm_file->f_mapping);
a5516438 5012 for (; address < end; address += huge_page_size(h)) {
cb900f41 5013 spinlock_t *ptl;
7868a208 5014 ptep = huge_pte_offset(mm, address, huge_page_size(h));
8f860591
ZY
5015 if (!ptep)
5016 continue;
cb900f41 5017 ptl = huge_pte_lock(h, mm, ptep);
34ae204f 5018 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
7da4d641 5019 pages++;
cb900f41 5020 spin_unlock(ptl);
dff11abe 5021 shared_pmd = true;
39dde65c 5022 continue;
7da4d641 5023 }
a8bda28d
NH
5024 pte = huge_ptep_get(ptep);
5025 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
5026 spin_unlock(ptl);
5027 continue;
5028 }
5029 if (unlikely(is_hugetlb_entry_migration(pte))) {
5030 swp_entry_t entry = pte_to_swp_entry(pte);
5031
5032 if (is_write_migration_entry(entry)) {
5033 pte_t newpte;
5034
5035 make_migration_entry_read(&entry);
5036 newpte = swp_entry_to_pte(entry);
e5251fd4
PA
5037 set_huge_swap_pte_at(mm, address, ptep,
5038 newpte, huge_page_size(h));
a8bda28d
NH
5039 pages++;
5040 }
5041 spin_unlock(ptl);
5042 continue;
5043 }
5044 if (!huge_pte_none(pte)) {
023bdd00
AK
5045 pte_t old_pte;
5046
5047 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
5048 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
be7517d6 5049 pte = arch_make_huge_pte(pte, vma, NULL, 0);
023bdd00 5050 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
7da4d641 5051 pages++;
8f860591 5052 }
cb900f41 5053 spin_unlock(ptl);
8f860591 5054 }
d833352a 5055 /*
c8c06efa 5056 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
d833352a 5057 * may have cleared our pud entry and done put_page on the page table:
c8c06efa 5058 * once we release i_mmap_rwsem, another task can do the final put_page
dff11abe
MK
5059 * and that page table be reused and filled with junk. If we actually
5060 * did unshare a page of pmds, flush the range corresponding to the pud.
d833352a 5061 */
dff11abe 5062 if (shared_pmd)
ac46d4f3 5063 flush_hugetlb_tlb_range(vma, range.start, range.end);
dff11abe
MK
5064 else
5065 flush_hugetlb_tlb_range(vma, start, end);
0f10851e
JG
5066 /*
5067 * No need to call mmu_notifier_invalidate_range() we are downgrading
5068 * page table protection not changing it to point to a new page.
5069 *
ad56b738 5070 * See Documentation/vm/mmu_notifier.rst
0f10851e 5071 */
83cde9e8 5072 i_mmap_unlock_write(vma->vm_file->f_mapping);
ac46d4f3 5073 mmu_notifier_invalidate_range_end(&range);
7da4d641
PZ
5074
5075 return pages << h->order;
8f860591
ZY
5076}
5077
33b8f84a
MK
5078/* Return true if reservation was successful, false otherwise. */
5079bool hugetlb_reserve_pages(struct inode *inode,
a1e78772 5080 long from, long to,
5a6fe125 5081 struct vm_area_struct *vma,
ca16d140 5082 vm_flags_t vm_flags)
e4e574b7 5083{
33b8f84a 5084 long chg, add = -1;
a5516438 5085 struct hstate *h = hstate_inode(inode);
90481622 5086 struct hugepage_subpool *spool = subpool_inode(inode);
9119a41e 5087 struct resv_map *resv_map;
075a61d0 5088 struct hugetlb_cgroup *h_cg = NULL;
0db9d74e 5089 long gbl_reserve, regions_needed = 0;
e4e574b7 5090
63489f8e
MK
5091 /* This should never happen */
5092 if (from > to) {
5093 VM_WARN(1, "%s called with a negative range\n", __func__);
33b8f84a 5094 return false;
63489f8e
MK
5095 }
5096
17c9d12e
MG
5097 /*
5098 * Only apply hugepage reservation if asked. At fault time, an
5099 * attempt will be made for VM_NORESERVE to allocate a page
90481622 5100 * without using reserves
17c9d12e 5101 */
ca16d140 5102 if (vm_flags & VM_NORESERVE)
33b8f84a 5103 return true;
17c9d12e 5104
a1e78772
MG
5105 /*
5106 * Shared mappings base their reservation on the number of pages that
5107 * are already allocated on behalf of the file. Private mappings need
5108 * to reserve the full area even if read-only as mprotect() may be
5109 * called to make the mapping read-write. Assume !vma is a shm mapping
5110 */
9119a41e 5111 if (!vma || vma->vm_flags & VM_MAYSHARE) {
f27a5136
MK
5112 /*
5113 * resv_map can not be NULL as hugetlb_reserve_pages is only
5114 * called for inodes for which resv_maps were created (see
5115 * hugetlbfs_get_inode).
5116 */
4e35f483 5117 resv_map = inode_resv_map(inode);
9119a41e 5118
0db9d74e 5119 chg = region_chg(resv_map, from, to, &regions_needed);
9119a41e
JK
5120
5121 } else {
e9fe92ae 5122 /* Private mapping. */
9119a41e 5123 resv_map = resv_map_alloc();
17c9d12e 5124 if (!resv_map)
33b8f84a 5125 return false;
17c9d12e 5126
a1e78772 5127 chg = to - from;
84afd99b 5128
17c9d12e
MG
5129 set_vma_resv_map(vma, resv_map);
5130 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
5131 }
5132
33b8f84a 5133 if (chg < 0)
c50ac050 5134 goto out_err;
8a630112 5135
33b8f84a
MK
5136 if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
5137 chg * pages_per_huge_page(h), &h_cg) < 0)
075a61d0 5138 goto out_err;
075a61d0
MA
5139
5140 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
5141 /* For private mappings, the hugetlb_cgroup uncharge info hangs
5142 * of the resv_map.
5143 */
5144 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
5145 }
5146
1c5ecae3
MK
5147 /*
5148 * There must be enough pages in the subpool for the mapping. If
5149 * the subpool has a minimum size, there may be some global
5150 * reservations already in place (gbl_reserve).
5151 */
5152 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
33b8f84a 5153 if (gbl_reserve < 0)
075a61d0 5154 goto out_uncharge_cgroup;
5a6fe125
MG
5155
5156 /*
17c9d12e 5157 * Check enough hugepages are available for the reservation.
90481622 5158 * Hand the pages back to the subpool if there are not
5a6fe125 5159 */
33b8f84a 5160 if (hugetlb_acct_memory(h, gbl_reserve) < 0)
075a61d0 5161 goto out_put_pages;
17c9d12e
MG
5162
5163 /*
5164 * Account for the reservations made. Shared mappings record regions
5165 * that have reservations as they are shared by multiple VMAs.
5166 * When the last VMA disappears, the region map says how much
5167 * the reservation was and the page cache tells how much of
5168 * the reservation was consumed. Private mappings are per-VMA and
5169 * only the consumed reservations are tracked. When the VMA
5170 * disappears, the original reservation is the VMA size and the
5171 * consumed reservations are stored in the map. Hence, nothing
5172 * else has to be done for private mappings here
5173 */
33039678 5174 if (!vma || vma->vm_flags & VM_MAYSHARE) {
075a61d0 5175 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
0db9d74e
MA
5176
5177 if (unlikely(add < 0)) {
5178 hugetlb_acct_memory(h, -gbl_reserve);
075a61d0 5179 goto out_put_pages;
0db9d74e 5180 } else if (unlikely(chg > add)) {
33039678
MK
5181 /*
5182 * pages in this range were added to the reserve
5183 * map between region_chg and region_add. This
5184 * indicates a race with alloc_huge_page. Adjust
5185 * the subpool and reserve counts modified above
5186 * based on the difference.
5187 */
5188 long rsv_adjust;
5189
075a61d0
MA
5190 hugetlb_cgroup_uncharge_cgroup_rsvd(
5191 hstate_index(h),
5192 (chg - add) * pages_per_huge_page(h), h_cg);
5193
33039678
MK
5194 rsv_adjust = hugepage_subpool_put_pages(spool,
5195 chg - add);
5196 hugetlb_acct_memory(h, -rsv_adjust);
5197 }
5198 }
33b8f84a
MK
5199 return true;
5200
075a61d0
MA
5201out_put_pages:
5202 /* put back original number of pages, chg */
5203 (void)hugepage_subpool_put_pages(spool, chg);
5204out_uncharge_cgroup:
5205 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
5206 chg * pages_per_huge_page(h), h_cg);
c50ac050 5207out_err:
5e911373 5208 if (!vma || vma->vm_flags & VM_MAYSHARE)
0db9d74e
MA
5209 /* Only call region_abort if the region_chg succeeded but the
5210 * region_add failed or didn't run.
5211 */
5212 if (chg >= 0 && add < 0)
5213 region_abort(resv_map, from, to, regions_needed);
f031dd27
JK
5214 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5215 kref_put(&resv_map->refs, resv_map_release);
33b8f84a 5216 return false;
a43a8c39
CK
5217}
5218
b5cec28d
MK
5219long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
5220 long freed)
a43a8c39 5221{
a5516438 5222 struct hstate *h = hstate_inode(inode);
4e35f483 5223 struct resv_map *resv_map = inode_resv_map(inode);
9119a41e 5224 long chg = 0;
90481622 5225 struct hugepage_subpool *spool = subpool_inode(inode);
1c5ecae3 5226 long gbl_reserve;
45c682a6 5227
f27a5136
MK
5228 /*
5229 * Since this routine can be called in the evict inode path for all
5230 * hugetlbfs inodes, resv_map could be NULL.
5231 */
b5cec28d
MK
5232 if (resv_map) {
5233 chg = region_del(resv_map, start, end);
5234 /*
5235 * region_del() can fail in the rare case where a region
5236 * must be split and another region descriptor can not be
5237 * allocated. If end == LONG_MAX, it will not fail.
5238 */
5239 if (chg < 0)
5240 return chg;
5241 }
5242
45c682a6 5243 spin_lock(&inode->i_lock);
e4c6f8be 5244 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
45c682a6
KC
5245 spin_unlock(&inode->i_lock);
5246
1c5ecae3
MK
5247 /*
5248 * If the subpool has a minimum size, the number of global
5249 * reservations to be released may be adjusted.
5250 */
5251 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
5252 hugetlb_acct_memory(h, -gbl_reserve);
b5cec28d
MK
5253
5254 return 0;
a43a8c39 5255}
93f70f90 5256
3212b535
SC
5257#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
5258static unsigned long page_table_shareable(struct vm_area_struct *svma,
5259 struct vm_area_struct *vma,
5260 unsigned long addr, pgoff_t idx)
5261{
5262 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
5263 svma->vm_start;
5264 unsigned long sbase = saddr & PUD_MASK;
5265 unsigned long s_end = sbase + PUD_SIZE;
5266
5267 /* Allow segments to share if only one is marked locked */
de60f5f1
EM
5268 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
5269 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
3212b535
SC
5270
5271 /*
5272 * match the virtual addresses, permission and the alignment of the
5273 * page table page.
5274 */
5275 if (pmd_index(addr) != pmd_index(saddr) ||
5276 vm_flags != svm_flags ||
07e51edf 5277 !range_in_vma(svma, sbase, s_end))
3212b535
SC
5278 return 0;
5279
5280 return saddr;
5281}
5282
31aafb45 5283static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3212b535
SC
5284{
5285 unsigned long base = addr & PUD_MASK;
5286 unsigned long end = base + PUD_SIZE;
5287
5288 /*
5289 * check on proper vm_flags and page table alignment
5290 */
017b1660 5291 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
31aafb45
NK
5292 return true;
5293 return false;
3212b535
SC
5294}
5295
017b1660
MK
5296/*
5297 * Determine if start,end range within vma could be mapped by shared pmd.
5298 * If yes, adjust start and end to cover range associated with possible
5299 * shared pmd mappings.
5300 */
5301void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5302 unsigned long *start, unsigned long *end)
5303{
a1ba9da8
LX
5304 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
5305 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
017b1660 5306
a1ba9da8
LX
5307 /*
5308 * vma need span at least one aligned PUD size and the start,end range
5309 * must at least partialy within it.
5310 */
5311 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
5312 (*end <= v_start) || (*start >= v_end))
017b1660
MK
5313 return;
5314
75802ca6 5315 /* Extend the range to be PUD aligned for a worst case scenario */
a1ba9da8
LX
5316 if (*start > v_start)
5317 *start = ALIGN_DOWN(*start, PUD_SIZE);
017b1660 5318
a1ba9da8
LX
5319 if (*end < v_end)
5320 *end = ALIGN(*end, PUD_SIZE);
017b1660
MK
5321}
5322
3212b535
SC
5323/*
5324 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
5325 * and returns the corresponding pte. While this is not necessary for the
5326 * !shared pmd case because we can allocate the pmd later as well, it makes the
c0d0381a
MK
5327 * code much cleaner.
5328 *
0bf7b64e
MK
5329 * This routine must be called with i_mmap_rwsem held in at least read mode if
5330 * sharing is possible. For hugetlbfs, this prevents removal of any page
5331 * table entries associated with the address space. This is important as we
5332 * are setting up sharing based on existing page table entries (mappings).
5333 *
5334 * NOTE: This routine is only called from huge_pte_alloc. Some callers of
5335 * huge_pte_alloc know that sharing is not possible and do not take
5336 * i_mmap_rwsem as a performance optimization. This is handled by the
5337 * if !vma_shareable check at the beginning of the routine. i_mmap_rwsem is
5338 * only required for subsequent processing.
3212b535
SC
5339 */
5340pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5341{
5342 struct vm_area_struct *vma = find_vma(mm, addr);
5343 struct address_space *mapping = vma->vm_file->f_mapping;
5344 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
5345 vma->vm_pgoff;
5346 struct vm_area_struct *svma;
5347 unsigned long saddr;
5348 pte_t *spte = NULL;
5349 pte_t *pte;
cb900f41 5350 spinlock_t *ptl;
3212b535
SC
5351
5352 if (!vma_shareable(vma, addr))
5353 return (pte_t *)pmd_alloc(mm, pud, addr);
5354
0bf7b64e 5355 i_mmap_assert_locked(mapping);
3212b535
SC
5356 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
5357 if (svma == vma)
5358 continue;
5359
5360 saddr = page_table_shareable(svma, vma, addr, idx);
5361 if (saddr) {
7868a208
PA
5362 spte = huge_pte_offset(svma->vm_mm, saddr,
5363 vma_mmu_pagesize(svma));
3212b535
SC
5364 if (spte) {
5365 get_page(virt_to_page(spte));
5366 break;
5367 }
5368 }
5369 }
5370
5371 if (!spte)
5372 goto out;
5373
8bea8052 5374 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
dc6c9a35 5375 if (pud_none(*pud)) {
3212b535
SC
5376 pud_populate(mm, pud,
5377 (pmd_t *)((unsigned long)spte & PAGE_MASK));
c17b1f42 5378 mm_inc_nr_pmds(mm);
dc6c9a35 5379 } else {
3212b535 5380 put_page(virt_to_page(spte));
dc6c9a35 5381 }
cb900f41 5382 spin_unlock(ptl);
3212b535
SC
5383out:
5384 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3212b535
SC
5385 return pte;
5386}
5387
5388/*
5389 * unmap huge page backed by shared pte.
5390 *
5391 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
5392 * indicated by page_count > 1, unmap is achieved by clearing pud and
5393 * decrementing the ref count. If count == 1, the pte page is not shared.
5394 *
c0d0381a 5395 * Called with page table lock held and i_mmap_rwsem held in write mode.
3212b535
SC
5396 *
5397 * returns: 1 successfully unmapped a shared pte page
5398 * 0 the underlying pte page is not shared, or it is the last user
5399 */
34ae204f
MK
5400int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5401 unsigned long *addr, pte_t *ptep)
3212b535
SC
5402{
5403 pgd_t *pgd = pgd_offset(mm, *addr);
c2febafc
KS
5404 p4d_t *p4d = p4d_offset(pgd, *addr);
5405 pud_t *pud = pud_offset(p4d, *addr);
3212b535 5406
34ae204f 5407 i_mmap_assert_write_locked(vma->vm_file->f_mapping);
3212b535
SC
5408 BUG_ON(page_count(virt_to_page(ptep)) == 0);
5409 if (page_count(virt_to_page(ptep)) == 1)
5410 return 0;
5411
5412 pud_clear(pud);
5413 put_page(virt_to_page(ptep));
dc6c9a35 5414 mm_dec_nr_pmds(mm);
3212b535
SC
5415 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
5416 return 1;
5417}
9e5fc74c
SC
5418#define want_pmd_share() (1)
5419#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5420pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5421{
5422 return NULL;
5423}
e81f2d22 5424
34ae204f
MK
5425int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5426 unsigned long *addr, pte_t *ptep)
e81f2d22
ZZ
5427{
5428 return 0;
5429}
017b1660
MK
5430
5431void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5432 unsigned long *start, unsigned long *end)
5433{
5434}
9e5fc74c 5435#define want_pmd_share() (0)
3212b535
SC
5436#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
5437
9e5fc74c
SC
5438#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
5439pte_t *huge_pte_alloc(struct mm_struct *mm,
5440 unsigned long addr, unsigned long sz)
5441{
5442 pgd_t *pgd;
c2febafc 5443 p4d_t *p4d;
9e5fc74c
SC
5444 pud_t *pud;
5445 pte_t *pte = NULL;
5446
5447 pgd = pgd_offset(mm, addr);
f4f0a3d8
KS
5448 p4d = p4d_alloc(mm, pgd, addr);
5449 if (!p4d)
5450 return NULL;
c2febafc 5451 pud = pud_alloc(mm, p4d, addr);
9e5fc74c
SC
5452 if (pud) {
5453 if (sz == PUD_SIZE) {
5454 pte = (pte_t *)pud;
5455 } else {
5456 BUG_ON(sz != PMD_SIZE);
5457 if (want_pmd_share() && pud_none(*pud))
5458 pte = huge_pmd_share(mm, addr, pud);
5459 else
5460 pte = (pte_t *)pmd_alloc(mm, pud, addr);
5461 }
5462 }
4e666314 5463 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
9e5fc74c
SC
5464
5465 return pte;
5466}
5467
9b19df29
PA
5468/*
5469 * huge_pte_offset() - Walk the page table to resolve the hugepage
5470 * entry at address @addr
5471 *
8ac0b81a
LX
5472 * Return: Pointer to page table entry (PUD or PMD) for
5473 * address @addr, or NULL if a !p*d_present() entry is encountered and the
9b19df29
PA
5474 * size @sz doesn't match the hugepage size at this level of the page
5475 * table.
5476 */
7868a208
PA
5477pte_t *huge_pte_offset(struct mm_struct *mm,
5478 unsigned long addr, unsigned long sz)
9e5fc74c
SC
5479{
5480 pgd_t *pgd;
c2febafc 5481 p4d_t *p4d;
8ac0b81a
LX
5482 pud_t *pud;
5483 pmd_t *pmd;
9e5fc74c
SC
5484
5485 pgd = pgd_offset(mm, addr);
c2febafc
KS
5486 if (!pgd_present(*pgd))
5487 return NULL;
5488 p4d = p4d_offset(pgd, addr);
5489 if (!p4d_present(*p4d))
5490 return NULL;
9b19df29 5491
c2febafc 5492 pud = pud_offset(p4d, addr);
8ac0b81a
LX
5493 if (sz == PUD_SIZE)
5494 /* must be pud huge, non-present or none */
c2febafc 5495 return (pte_t *)pud;
8ac0b81a 5496 if (!pud_present(*pud))
9b19df29 5497 return NULL;
8ac0b81a 5498 /* must have a valid entry and size to go further */
9b19df29 5499
8ac0b81a
LX
5500 pmd = pmd_offset(pud, addr);
5501 /* must be pmd huge, non-present or none */
5502 return (pte_t *)pmd;
9e5fc74c
SC
5503}
5504
61f77eda
NH
5505#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
5506
5507/*
5508 * These functions are overwritable if your architecture needs its own
5509 * behavior.
5510 */
5511struct page * __weak
5512follow_huge_addr(struct mm_struct *mm, unsigned long address,
5513 int write)
5514{
5515 return ERR_PTR(-EINVAL);
5516}
5517
4dc71451
AK
5518struct page * __weak
5519follow_huge_pd(struct vm_area_struct *vma,
5520 unsigned long address, hugepd_t hpd, int flags, int pdshift)
5521{
5522 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5523 return NULL;
5524}
5525
61f77eda 5526struct page * __weak
9e5fc74c 5527follow_huge_pmd(struct mm_struct *mm, unsigned long address,
e66f17ff 5528 pmd_t *pmd, int flags)
9e5fc74c 5529{
e66f17ff
NH
5530 struct page *page = NULL;
5531 spinlock_t *ptl;
c9d398fa 5532 pte_t pte;
3faa52c0
JH
5533
5534 /* FOLL_GET and FOLL_PIN are mutually exclusive. */
5535 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
5536 (FOLL_PIN | FOLL_GET)))
5537 return NULL;
5538
e66f17ff
NH
5539retry:
5540 ptl = pmd_lockptr(mm, pmd);
5541 spin_lock(ptl);
5542 /*
5543 * make sure that the address range covered by this pmd is not
5544 * unmapped from other threads.
5545 */
5546 if (!pmd_huge(*pmd))
5547 goto out;
c9d398fa
NH
5548 pte = huge_ptep_get((pte_t *)pmd);
5549 if (pte_present(pte)) {
97534127 5550 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
3faa52c0
JH
5551 /*
5552 * try_grab_page() should always succeed here, because: a) we
5553 * hold the pmd (ptl) lock, and b) we've just checked that the
5554 * huge pmd (head) page is present in the page tables. The ptl
5555 * prevents the head page and tail pages from being rearranged
5556 * in any way. So this page must be available at this point,
5557 * unless the page refcount overflowed:
5558 */
5559 if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
5560 page = NULL;
5561 goto out;
5562 }
e66f17ff 5563 } else {
c9d398fa 5564 if (is_hugetlb_entry_migration(pte)) {
e66f17ff
NH
5565 spin_unlock(ptl);
5566 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
5567 goto retry;
5568 }
5569 /*
5570 * hwpoisoned entry is treated as no_page_table in
5571 * follow_page_mask().
5572 */
5573 }
5574out:
5575 spin_unlock(ptl);
9e5fc74c
SC
5576 return page;
5577}
5578
61f77eda 5579struct page * __weak
9e5fc74c 5580follow_huge_pud(struct mm_struct *mm, unsigned long address,
e66f17ff 5581 pud_t *pud, int flags)
9e5fc74c 5582{
3faa52c0 5583 if (flags & (FOLL_GET | FOLL_PIN))
e66f17ff 5584 return NULL;
9e5fc74c 5585
e66f17ff 5586 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
9e5fc74c
SC
5587}
5588
faaa5b62
AK
5589struct page * __weak
5590follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5591{
3faa52c0 5592 if (flags & (FOLL_GET | FOLL_PIN))
faaa5b62
AK
5593 return NULL;
5594
5595 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5596}
5597
31caf665
NH
5598bool isolate_huge_page(struct page *page, struct list_head *list)
5599{
bcc54222
NH
5600 bool ret = true;
5601
31caf665 5602 spin_lock(&hugetlb_lock);
8f251a3d
MK
5603 if (!PageHeadHuge(page) ||
5604 !HPageMigratable(page) ||
0eb2df2b 5605 !get_page_unless_zero(page)) {
bcc54222
NH
5606 ret = false;
5607 goto unlock;
5608 }
8f251a3d 5609 ClearHPageMigratable(page);
31caf665 5610 list_move_tail(&page->lru, list);
bcc54222 5611unlock:
31caf665 5612 spin_unlock(&hugetlb_lock);
bcc54222 5613 return ret;
31caf665
NH
5614}
5615
5616void putback_active_hugepage(struct page *page)
5617{
31caf665 5618 spin_lock(&hugetlb_lock);
8f251a3d 5619 SetHPageMigratable(page);
31caf665
NH
5620 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5621 spin_unlock(&hugetlb_lock);
5622 put_page(page);
5623}
ab5ac90a
MH
5624
5625void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5626{
5627 struct hstate *h = page_hstate(oldpage);
5628
5629 hugetlb_cgroup_migrate(oldpage, newpage);
5630 set_page_owner_migrate_reason(newpage, reason);
5631
5632 /*
5633 * transfer temporary state of the new huge page. This is
5634 * reverse to other transitions because the newpage is going to
5635 * be final while the old one will be freed so it takes over
5636 * the temporary status.
5637 *
5638 * Also note that we have to transfer the per-node surplus state
5639 * here as well otherwise the global surplus count will not match
5640 * the per-node's.
5641 */
9157c311 5642 if (HPageTemporary(newpage)) {
ab5ac90a
MH
5643 int old_nid = page_to_nid(oldpage);
5644 int new_nid = page_to_nid(newpage);
5645
9157c311
MK
5646 SetHPageTemporary(oldpage);
5647 ClearHPageTemporary(newpage);
ab5ac90a
MH
5648
5649 spin_lock(&hugetlb_lock);
5650 if (h->surplus_huge_pages_node[old_nid]) {
5651 h->surplus_huge_pages_node[old_nid]--;
5652 h->surplus_huge_pages_node[new_nid]++;
5653 }
5654 spin_unlock(&hugetlb_lock);
5655 }
5656}
cf11e85f
RG
5657
5658#ifdef CONFIG_CMA
cf11e85f
RG
5659static bool cma_reserve_called __initdata;
5660
5661static int __init cmdline_parse_hugetlb_cma(char *p)
5662{
5663 hugetlb_cma_size = memparse(p, &p);
5664 return 0;
5665}
5666
5667early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
5668
5669void __init hugetlb_cma_reserve(int order)
5670{
5671 unsigned long size, reserved, per_node;
5672 int nid;
5673
5674 cma_reserve_called = true;
5675
5676 if (!hugetlb_cma_size)
5677 return;
5678
5679 if (hugetlb_cma_size < (PAGE_SIZE << order)) {
5680 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
5681 (PAGE_SIZE << order) / SZ_1M);
5682 return;
5683 }
5684
5685 /*
5686 * If 3 GB area is requested on a machine with 4 numa nodes,
5687 * let's allocate 1 GB on first three nodes and ignore the last one.
5688 */
5689 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
5690 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
5691 hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
5692
5693 reserved = 0;
5694 for_each_node_state(nid, N_ONLINE) {
5695 int res;
2281f797 5696 char name[CMA_MAX_NAME];
cf11e85f
RG
5697
5698 size = min(per_node, hugetlb_cma_size - reserved);
5699 size = round_up(size, PAGE_SIZE << order);
5700
2281f797 5701 snprintf(name, sizeof(name), "hugetlb%d", nid);
cf11e85f 5702 res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
29d0f41d 5703 0, false, name,
cf11e85f
RG
5704 &hugetlb_cma[nid], nid);
5705 if (res) {
5706 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
5707 res, nid);
5708 continue;
5709 }
5710
5711 reserved += size;
5712 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
5713 size / SZ_1M, nid);
5714
5715 if (reserved >= hugetlb_cma_size)
5716 break;
5717 }
5718}
5719
5720void __init hugetlb_cma_check(void)
5721{
5722 if (!hugetlb_cma_size || cma_reserve_called)
5723 return;
5724
5725 pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
5726}
5727
5728#endif /* CONFIG_CMA */