mm/hugetlb: document the reserve map/region tracking routines
[linux-2.6-block.git] / mm / hugetlb.c
CommitLineData
1da177e4
LT
1/*
2 * Generic hugetlb support.
6d49e352 3 * (C) Nadia Yvette Chambers, April 2004
1da177e4 4 */
1da177e4
LT
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/mm.h>
e1759c21 9#include <linux/seq_file.h>
1da177e4
LT
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
cddb8a5c 12#include <linux/mmu_notifier.h>
1da177e4 13#include <linux/nodemask.h>
63551ae0 14#include <linux/pagemap.h>
5da7ca86 15#include <linux/mempolicy.h>
3b32123d 16#include <linux/compiler.h>
aea47ff3 17#include <linux/cpuset.h>
3935baa9 18#include <linux/mutex.h>
aa888a74 19#include <linux/bootmem.h>
a3437870 20#include <linux/sysfs.h>
5a0e3ad6 21#include <linux/slab.h>
0fe6e20b 22#include <linux/rmap.h>
fd6a03ed
NH
23#include <linux/swap.h>
24#include <linux/swapops.h>
c8721bbb 25#include <linux/page-isolation.h>
8382d914 26#include <linux/jhash.h>
d6606683 27
63551ae0
DG
28#include <asm/page.h>
29#include <asm/pgtable.h>
24669e58 30#include <asm/tlb.h>
63551ae0 31
24669e58 32#include <linux/io.h>
63551ae0 33#include <linux/hugetlb.h>
9dd540e2 34#include <linux/hugetlb_cgroup.h>
9a305230 35#include <linux/node.h>
7835e98b 36#include "internal.h"
1da177e4 37
753162cd 38int hugepages_treat_as_movable;
a5516438 39
c3f38a38 40int hugetlb_max_hstate __read_mostly;
e5ff2159
AK
41unsigned int default_hstate_idx;
42struct hstate hstates[HUGE_MAX_HSTATE];
641844f5
NH
43/*
44 * Minimum page order among possible hugepage sizes, set to a proper value
45 * at boot time.
46 */
47static unsigned int minimum_order __read_mostly = UINT_MAX;
e5ff2159 48
53ba51d2
JT
49__initdata LIST_HEAD(huge_boot_pages);
50
e5ff2159
AK
51/* for command line parsing */
52static struct hstate * __initdata parsed_hstate;
53static unsigned long __initdata default_hstate_max_huge_pages;
e11bfbfc 54static unsigned long __initdata default_hstate_size;
e5ff2159 55
3935baa9 56/*
31caf665
NH
57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58 * free_huge_pages, and surplus_huge_pages.
3935baa9 59 */
c3f38a38 60DEFINE_SPINLOCK(hugetlb_lock);
0bd0f9fb 61
8382d914
DB
62/*
63 * Serializes faults on the same logical page. This is used to
64 * prevent spurious OOMs when the hugepage pool is fully utilized.
65 */
66static int num_fault_mutexes;
67static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
7ca02d0a
MK
69/* Forward declaration */
70static int hugetlb_acct_memory(struct hstate *h, long delta);
71
90481622
DG
72static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73{
74 bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76 spin_unlock(&spool->lock);
77
78 /* If no pages are used, and no other handles to the subpool
7ca02d0a
MK
79 * remain, give up any reservations mased on minimum size and
80 * free the subpool */
81 if (free) {
82 if (spool->min_hpages != -1)
83 hugetlb_acct_memory(spool->hstate,
84 -spool->min_hpages);
90481622 85 kfree(spool);
7ca02d0a 86 }
90481622
DG
87}
88
7ca02d0a
MK
89struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90 long min_hpages)
90481622
DG
91{
92 struct hugepage_subpool *spool;
93
c6a91820 94 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
90481622
DG
95 if (!spool)
96 return NULL;
97
98 spin_lock_init(&spool->lock);
99 spool->count = 1;
7ca02d0a
MK
100 spool->max_hpages = max_hpages;
101 spool->hstate = h;
102 spool->min_hpages = min_hpages;
103
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105 kfree(spool);
106 return NULL;
107 }
108 spool->rsv_hpages = min_hpages;
90481622
DG
109
110 return spool;
111}
112
113void hugepage_put_subpool(struct hugepage_subpool *spool)
114{
115 spin_lock(&spool->lock);
116 BUG_ON(!spool->count);
117 spool->count--;
118 unlock_or_release_subpool(spool);
119}
120
1c5ecae3
MK
121/*
122 * Subpool accounting for allocating and reserving pages.
123 * Return -ENOMEM if there are not enough resources to satisfy the
124 * the request. Otherwise, return the number of pages by which the
125 * global pools must be adjusted (upward). The returned value may
126 * only be different than the passed value (delta) in the case where
127 * a subpool minimum size must be manitained.
128 */
129static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
90481622
DG
130 long delta)
131{
1c5ecae3 132 long ret = delta;
90481622
DG
133
134 if (!spool)
1c5ecae3 135 return ret;
90481622
DG
136
137 spin_lock(&spool->lock);
1c5ecae3
MK
138
139 if (spool->max_hpages != -1) { /* maximum size accounting */
140 if ((spool->used_hpages + delta) <= spool->max_hpages)
141 spool->used_hpages += delta;
142 else {
143 ret = -ENOMEM;
144 goto unlock_ret;
145 }
90481622 146 }
90481622 147
1c5ecae3
MK
148 if (spool->min_hpages != -1) { /* minimum size accounting */
149 if (delta > spool->rsv_hpages) {
150 /*
151 * Asking for more reserves than those already taken on
152 * behalf of subpool. Return difference.
153 */
154 ret = delta - spool->rsv_hpages;
155 spool->rsv_hpages = 0;
156 } else {
157 ret = 0; /* reserves already accounted for */
158 spool->rsv_hpages -= delta;
159 }
160 }
161
162unlock_ret:
163 spin_unlock(&spool->lock);
90481622
DG
164 return ret;
165}
166
1c5ecae3
MK
167/*
168 * Subpool accounting for freeing and unreserving pages.
169 * Return the number of global page reservations that must be dropped.
170 * The return value may only be different than the passed value (delta)
171 * in the case where a subpool minimum size must be maintained.
172 */
173static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
90481622
DG
174 long delta)
175{
1c5ecae3
MK
176 long ret = delta;
177
90481622 178 if (!spool)
1c5ecae3 179 return delta;
90481622
DG
180
181 spin_lock(&spool->lock);
1c5ecae3
MK
182
183 if (spool->max_hpages != -1) /* maximum size accounting */
184 spool->used_hpages -= delta;
185
186 if (spool->min_hpages != -1) { /* minimum size accounting */
187 if (spool->rsv_hpages + delta <= spool->min_hpages)
188 ret = 0;
189 else
190 ret = spool->rsv_hpages + delta - spool->min_hpages;
191
192 spool->rsv_hpages += delta;
193 if (spool->rsv_hpages > spool->min_hpages)
194 spool->rsv_hpages = spool->min_hpages;
195 }
196
197 /*
198 * If hugetlbfs_put_super couldn't free spool due to an outstanding
199 * quota reference, free it now.
200 */
90481622 201 unlock_or_release_subpool(spool);
1c5ecae3
MK
202
203 return ret;
90481622
DG
204}
205
206static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
207{
208 return HUGETLBFS_SB(inode->i_sb)->spool;
209}
210
211static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
212{
496ad9aa 213 return subpool_inode(file_inode(vma->vm_file));
90481622
DG
214}
215
96822904
AW
216/*
217 * Region tracking -- allows tracking of reservations and instantiated pages
218 * across the pages in a mapping.
84afd99b 219 *
1dd308a7
MK
220 * The region data structures are embedded into a resv_map and protected
221 * by a resv_map's lock. The set of regions within the resv_map represent
222 * reservations for huge pages, or huge pages that have already been
223 * instantiated within the map. The from and to elements are huge page
224 * indicies into the associated mapping. from indicates the starting index
225 * of the region. to represents the first index past the end of the region.
226 *
227 * For example, a file region structure with from == 0 and to == 4 represents
228 * four huge pages in a mapping. It is important to note that the to element
229 * represents the first element past the end of the region. This is used in
230 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
231 *
232 * Interval notation of the form [from, to) will be used to indicate that
233 * the endpoint from is inclusive and to is exclusive.
96822904
AW
234 */
235struct file_region {
236 struct list_head link;
237 long from;
238 long to;
239};
240
1dd308a7
MK
241/*
242 * Add the huge page range represented by [f, t) to the reserve
243 * map. Existing regions will be expanded to accommodate the
244 * specified range. We know only existing regions need to be
245 * expanded, because region_add is only called after region_chg
246 * with the same range. If a new file_region structure must
247 * be allocated, it is done in region_chg.
248 */
1406ec9b 249static long region_add(struct resv_map *resv, long f, long t)
96822904 250{
1406ec9b 251 struct list_head *head = &resv->regions;
96822904
AW
252 struct file_region *rg, *nrg, *trg;
253
7b24d861 254 spin_lock(&resv->lock);
96822904
AW
255 /* Locate the region we are either in or before. */
256 list_for_each_entry(rg, head, link)
257 if (f <= rg->to)
258 break;
259
260 /* Round our left edge to the current segment if it encloses us. */
261 if (f > rg->from)
262 f = rg->from;
263
264 /* Check for and consume any regions we now overlap with. */
265 nrg = rg;
266 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
267 if (&rg->link == head)
268 break;
269 if (rg->from > t)
270 break;
271
272 /* If this area reaches higher then extend our area to
273 * include it completely. If this is not the first area
274 * which we intend to reuse, free it. */
275 if (rg->to > t)
276 t = rg->to;
277 if (rg != nrg) {
278 list_del(&rg->link);
279 kfree(rg);
280 }
281 }
282 nrg->from = f;
283 nrg->to = t;
7b24d861 284 spin_unlock(&resv->lock);
96822904
AW
285 return 0;
286}
287
1dd308a7
MK
288/*
289 * Examine the existing reserve map and determine how many
290 * huge pages in the specified range [f, t) are NOT currently
291 * represented. This routine is called before a subsequent
292 * call to region_add that will actually modify the reserve
293 * map to add the specified range [f, t). region_chg does
294 * not change the number of huge pages represented by the
295 * map. However, if the existing regions in the map can not
296 * be expanded to represent the new range, a new file_region
297 * structure is added to the map as a placeholder. This is
298 * so that the subsequent region_add call will have all the
299 * regions it needs and will not fail.
300 *
301 * Returns the number of huge pages that need to be added
302 * to the existing reservation map for the range [f, t).
303 * This number is greater or equal to zero. -ENOMEM is
304 * returned if a new file_region structure is needed and can
305 * not be allocated.
306 */
1406ec9b 307static long region_chg(struct resv_map *resv, long f, long t)
96822904 308{
1406ec9b 309 struct list_head *head = &resv->regions;
7b24d861 310 struct file_region *rg, *nrg = NULL;
96822904
AW
311 long chg = 0;
312
7b24d861
DB
313retry:
314 spin_lock(&resv->lock);
96822904
AW
315 /* Locate the region we are before or in. */
316 list_for_each_entry(rg, head, link)
317 if (f <= rg->to)
318 break;
319
320 /* If we are below the current region then a new region is required.
321 * Subtle, allocate a new region at the position but make it zero
322 * size such that we can guarantee to record the reservation. */
323 if (&rg->link == head || t < rg->from) {
7b24d861
DB
324 if (!nrg) {
325 spin_unlock(&resv->lock);
326 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
327 if (!nrg)
328 return -ENOMEM;
329
330 nrg->from = f;
331 nrg->to = f;
332 INIT_LIST_HEAD(&nrg->link);
333 goto retry;
334 }
96822904 335
7b24d861
DB
336 list_add(&nrg->link, rg->link.prev);
337 chg = t - f;
338 goto out_nrg;
96822904
AW
339 }
340
341 /* Round our left edge to the current segment if it encloses us. */
342 if (f > rg->from)
343 f = rg->from;
344 chg = t - f;
345
346 /* Check for and consume any regions we now overlap with. */
347 list_for_each_entry(rg, rg->link.prev, link) {
348 if (&rg->link == head)
349 break;
350 if (rg->from > t)
7b24d861 351 goto out;
96822904 352
25985edc 353 /* We overlap with this area, if it extends further than
96822904
AW
354 * us then we must extend ourselves. Account for its
355 * existing reservation. */
356 if (rg->to > t) {
357 chg += rg->to - t;
358 t = rg->to;
359 }
360 chg -= rg->to - rg->from;
361 }
7b24d861
DB
362
363out:
364 spin_unlock(&resv->lock);
365 /* We already know we raced and no longer need the new region */
366 kfree(nrg);
367 return chg;
368out_nrg:
369 spin_unlock(&resv->lock);
96822904
AW
370 return chg;
371}
372
1dd308a7
MK
373/*
374 * Truncate the reserve map at index 'end'. Modify/truncate any
375 * region which contains end. Delete any regions past end.
376 * Return the number of huge pages removed from the map.
377 */
1406ec9b 378static long region_truncate(struct resv_map *resv, long end)
96822904 379{
1406ec9b 380 struct list_head *head = &resv->regions;
96822904
AW
381 struct file_region *rg, *trg;
382 long chg = 0;
383
7b24d861 384 spin_lock(&resv->lock);
96822904
AW
385 /* Locate the region we are either in or before. */
386 list_for_each_entry(rg, head, link)
387 if (end <= rg->to)
388 break;
389 if (&rg->link == head)
7b24d861 390 goto out;
96822904
AW
391
392 /* If we are in the middle of a region then adjust it. */
393 if (end > rg->from) {
394 chg = rg->to - end;
395 rg->to = end;
396 rg = list_entry(rg->link.next, typeof(*rg), link);
397 }
398
399 /* Drop any remaining regions. */
400 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
401 if (&rg->link == head)
402 break;
403 chg += rg->to - rg->from;
404 list_del(&rg->link);
405 kfree(rg);
406 }
7b24d861
DB
407
408out:
409 spin_unlock(&resv->lock);
96822904
AW
410 return chg;
411}
412
1dd308a7
MK
413/*
414 * Count and return the number of huge pages in the reserve map
415 * that intersect with the range [f, t).
416 */
1406ec9b 417static long region_count(struct resv_map *resv, long f, long t)
84afd99b 418{
1406ec9b 419 struct list_head *head = &resv->regions;
84afd99b
AW
420 struct file_region *rg;
421 long chg = 0;
422
7b24d861 423 spin_lock(&resv->lock);
84afd99b
AW
424 /* Locate each segment we overlap with, and count that overlap. */
425 list_for_each_entry(rg, head, link) {
f2135a4a
WSH
426 long seg_from;
427 long seg_to;
84afd99b
AW
428
429 if (rg->to <= f)
430 continue;
431 if (rg->from >= t)
432 break;
433
434 seg_from = max(rg->from, f);
435 seg_to = min(rg->to, t);
436
437 chg += seg_to - seg_from;
438 }
7b24d861 439 spin_unlock(&resv->lock);
84afd99b
AW
440
441 return chg;
442}
443
e7c4b0bf
AW
444/*
445 * Convert the address within this vma to the page offset within
446 * the mapping, in pagecache page units; huge pages here.
447 */
a5516438
AK
448static pgoff_t vma_hugecache_offset(struct hstate *h,
449 struct vm_area_struct *vma, unsigned long address)
e7c4b0bf 450{
a5516438
AK
451 return ((address - vma->vm_start) >> huge_page_shift(h)) +
452 (vma->vm_pgoff >> huge_page_order(h));
e7c4b0bf
AW
453}
454
0fe6e20b
NH
455pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
456 unsigned long address)
457{
458 return vma_hugecache_offset(hstate_vma(vma), vma, address);
459}
460
08fba699
MG
461/*
462 * Return the size of the pages allocated when backing a VMA. In the majority
463 * cases this will be same size as used by the page table entries.
464 */
465unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
466{
467 struct hstate *hstate;
468
469 if (!is_vm_hugetlb_page(vma))
470 return PAGE_SIZE;
471
472 hstate = hstate_vma(vma);
473
2415cf12 474 return 1UL << huge_page_shift(hstate);
08fba699 475}
f340ca0f 476EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
08fba699 477
3340289d
MG
478/*
479 * Return the page size being used by the MMU to back a VMA. In the majority
480 * of cases, the page size used by the kernel matches the MMU size. On
481 * architectures where it differs, an architecture-specific version of this
482 * function is required.
483 */
484#ifndef vma_mmu_pagesize
485unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
486{
487 return vma_kernel_pagesize(vma);
488}
489#endif
490
84afd99b
AW
491/*
492 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
493 * bits of the reservation map pointer, which are always clear due to
494 * alignment.
495 */
496#define HPAGE_RESV_OWNER (1UL << 0)
497#define HPAGE_RESV_UNMAPPED (1UL << 1)
04f2cbe3 498#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
84afd99b 499
a1e78772
MG
500/*
501 * These helpers are used to track how many pages are reserved for
502 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
503 * is guaranteed to have their future faults succeed.
504 *
505 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
506 * the reserve counters are updated with the hugetlb_lock held. It is safe
507 * to reset the VMA at fork() time as it is not in use yet and there is no
508 * chance of the global counters getting corrupted as a result of the values.
84afd99b
AW
509 *
510 * The private mapping reservation is represented in a subtly different
511 * manner to a shared mapping. A shared mapping has a region map associated
512 * with the underlying file, this region map represents the backing file
513 * pages which have ever had a reservation assigned which this persists even
514 * after the page is instantiated. A private mapping has a region map
515 * associated with the original mmap which is attached to all VMAs which
516 * reference it, this region map represents those offsets which have consumed
517 * reservation ie. where pages have been instantiated.
a1e78772 518 */
e7c4b0bf
AW
519static unsigned long get_vma_private_data(struct vm_area_struct *vma)
520{
521 return (unsigned long)vma->vm_private_data;
522}
523
524static void set_vma_private_data(struct vm_area_struct *vma,
525 unsigned long value)
526{
527 vma->vm_private_data = (void *)value;
528}
529
9119a41e 530struct resv_map *resv_map_alloc(void)
84afd99b
AW
531{
532 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
533 if (!resv_map)
534 return NULL;
535
536 kref_init(&resv_map->refs);
7b24d861 537 spin_lock_init(&resv_map->lock);
84afd99b
AW
538 INIT_LIST_HEAD(&resv_map->regions);
539
540 return resv_map;
541}
542
9119a41e 543void resv_map_release(struct kref *ref)
84afd99b
AW
544{
545 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
546
547 /* Clear out any active regions before we release the map. */
1406ec9b 548 region_truncate(resv_map, 0);
84afd99b
AW
549 kfree(resv_map);
550}
551
4e35f483
JK
552static inline struct resv_map *inode_resv_map(struct inode *inode)
553{
554 return inode->i_mapping->private_data;
555}
556
84afd99b 557static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
a1e78772 558{
81d1b09c 559 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
4e35f483
JK
560 if (vma->vm_flags & VM_MAYSHARE) {
561 struct address_space *mapping = vma->vm_file->f_mapping;
562 struct inode *inode = mapping->host;
563
564 return inode_resv_map(inode);
565
566 } else {
84afd99b
AW
567 return (struct resv_map *)(get_vma_private_data(vma) &
568 ~HPAGE_RESV_MASK);
4e35f483 569 }
a1e78772
MG
570}
571
84afd99b 572static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
a1e78772 573{
81d1b09c
SL
574 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
575 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
a1e78772 576
84afd99b
AW
577 set_vma_private_data(vma, (get_vma_private_data(vma) &
578 HPAGE_RESV_MASK) | (unsigned long)map);
04f2cbe3
MG
579}
580
581static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
582{
81d1b09c
SL
583 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
584 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
e7c4b0bf
AW
585
586 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
04f2cbe3
MG
587}
588
589static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
590{
81d1b09c 591 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
e7c4b0bf
AW
592
593 return (get_vma_private_data(vma) & flag) != 0;
a1e78772
MG
594}
595
04f2cbe3 596/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
a1e78772
MG
597void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
598{
81d1b09c 599 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
f83a275d 600 if (!(vma->vm_flags & VM_MAYSHARE))
a1e78772
MG
601 vma->vm_private_data = (void *)0;
602}
603
604/* Returns true if the VMA has associated reserve pages */
af0ed73e 605static int vma_has_reserves(struct vm_area_struct *vma, long chg)
a1e78772 606{
af0ed73e
JK
607 if (vma->vm_flags & VM_NORESERVE) {
608 /*
609 * This address is already reserved by other process(chg == 0),
610 * so, we should decrement reserved count. Without decrementing,
611 * reserve count remains after releasing inode, because this
612 * allocated page will go into page cache and is regarded as
613 * coming from reserved pool in releasing step. Currently, we
614 * don't have any other solution to deal with this situation
615 * properly, so add work-around here.
616 */
617 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
618 return 1;
619 else
620 return 0;
621 }
a63884e9
JK
622
623 /* Shared mappings always use reserves */
f83a275d 624 if (vma->vm_flags & VM_MAYSHARE)
7f09ca51 625 return 1;
a63884e9
JK
626
627 /*
628 * Only the process that called mmap() has reserves for
629 * private mappings.
630 */
7f09ca51
MG
631 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
632 return 1;
a63884e9 633
7f09ca51 634 return 0;
a1e78772
MG
635}
636
a5516438 637static void enqueue_huge_page(struct hstate *h, struct page *page)
1da177e4
LT
638{
639 int nid = page_to_nid(page);
0edaecfa 640 list_move(&page->lru, &h->hugepage_freelists[nid]);
a5516438
AK
641 h->free_huge_pages++;
642 h->free_huge_pages_node[nid]++;
1da177e4
LT
643}
644
bf50bab2
NH
645static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
646{
647 struct page *page;
648
c8721bbb
NH
649 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
650 if (!is_migrate_isolate_page(page))
651 break;
652 /*
653 * if 'non-isolated free hugepage' not found on the list,
654 * the allocation fails.
655 */
656 if (&h->hugepage_freelists[nid] == &page->lru)
bf50bab2 657 return NULL;
0edaecfa 658 list_move(&page->lru, &h->hugepage_activelist);
a9869b83 659 set_page_refcounted(page);
bf50bab2
NH
660 h->free_huge_pages--;
661 h->free_huge_pages_node[nid]--;
662 return page;
663}
664
86cdb465
NH
665/* Movability of hugepages depends on migration support. */
666static inline gfp_t htlb_alloc_mask(struct hstate *h)
667{
100873d7 668 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
86cdb465
NH
669 return GFP_HIGHUSER_MOVABLE;
670 else
671 return GFP_HIGHUSER;
672}
673
a5516438
AK
674static struct page *dequeue_huge_page_vma(struct hstate *h,
675 struct vm_area_struct *vma,
af0ed73e
JK
676 unsigned long address, int avoid_reserve,
677 long chg)
1da177e4 678{
b1c12cbc 679 struct page *page = NULL;
480eccf9 680 struct mempolicy *mpol;
19770b32 681 nodemask_t *nodemask;
c0ff7453 682 struct zonelist *zonelist;
dd1a239f
MG
683 struct zone *zone;
684 struct zoneref *z;
cc9a6c87 685 unsigned int cpuset_mems_cookie;
1da177e4 686
a1e78772
MG
687 /*
688 * A child process with MAP_PRIVATE mappings created by their parent
689 * have no page reserves. This check ensures that reservations are
690 * not "stolen". The child may still get SIGKILLed
691 */
af0ed73e 692 if (!vma_has_reserves(vma, chg) &&
a5516438 693 h->free_huge_pages - h->resv_huge_pages == 0)
c0ff7453 694 goto err;
a1e78772 695
04f2cbe3 696 /* If reserves cannot be used, ensure enough pages are in the pool */
a5516438 697 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
6eab04a8 698 goto err;
04f2cbe3 699
9966c4bb 700retry_cpuset:
d26914d1 701 cpuset_mems_cookie = read_mems_allowed_begin();
9966c4bb 702 zonelist = huge_zonelist(vma, address,
86cdb465 703 htlb_alloc_mask(h), &mpol, &nodemask);
9966c4bb 704
19770b32
MG
705 for_each_zone_zonelist_nodemask(zone, z, zonelist,
706 MAX_NR_ZONES - 1, nodemask) {
344736f2 707 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
bf50bab2
NH
708 page = dequeue_huge_page_node(h, zone_to_nid(zone));
709 if (page) {
af0ed73e
JK
710 if (avoid_reserve)
711 break;
712 if (!vma_has_reserves(vma, chg))
713 break;
714
07443a85 715 SetPagePrivate(page);
af0ed73e 716 h->resv_huge_pages--;
bf50bab2
NH
717 break;
718 }
3abf7afd 719 }
1da177e4 720 }
cc9a6c87 721
52cd3b07 722 mpol_cond_put(mpol);
d26914d1 723 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
cc9a6c87 724 goto retry_cpuset;
1da177e4 725 return page;
cc9a6c87
MG
726
727err:
cc9a6c87 728 return NULL;
1da177e4
LT
729}
730
1cac6f2c
LC
731/*
732 * common helper functions for hstate_next_node_to_{alloc|free}.
733 * We may have allocated or freed a huge page based on a different
734 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
735 * be outside of *nodes_allowed. Ensure that we use an allowed
736 * node for alloc or free.
737 */
738static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
739{
740 nid = next_node(nid, *nodes_allowed);
741 if (nid == MAX_NUMNODES)
742 nid = first_node(*nodes_allowed);
743 VM_BUG_ON(nid >= MAX_NUMNODES);
744
745 return nid;
746}
747
748static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
749{
750 if (!node_isset(nid, *nodes_allowed))
751 nid = next_node_allowed(nid, nodes_allowed);
752 return nid;
753}
754
755/*
756 * returns the previously saved node ["this node"] from which to
757 * allocate a persistent huge page for the pool and advance the
758 * next node from which to allocate, handling wrap at end of node
759 * mask.
760 */
761static int hstate_next_node_to_alloc(struct hstate *h,
762 nodemask_t *nodes_allowed)
763{
764 int nid;
765
766 VM_BUG_ON(!nodes_allowed);
767
768 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
769 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
770
771 return nid;
772}
773
774/*
775 * helper for free_pool_huge_page() - return the previously saved
776 * node ["this node"] from which to free a huge page. Advance the
777 * next node id whether or not we find a free huge page to free so
778 * that the next attempt to free addresses the next node.
779 */
780static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
781{
782 int nid;
783
784 VM_BUG_ON(!nodes_allowed);
785
786 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
787 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
788
789 return nid;
790}
791
792#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
793 for (nr_nodes = nodes_weight(*mask); \
794 nr_nodes > 0 && \
795 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
796 nr_nodes--)
797
798#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
799 for (nr_nodes = nodes_weight(*mask); \
800 nr_nodes > 0 && \
801 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
802 nr_nodes--)
803
944d9fec
LC
804#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
805static void destroy_compound_gigantic_page(struct page *page,
806 unsigned long order)
807{
808 int i;
809 int nr_pages = 1 << order;
810 struct page *p = page + 1;
811
812 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
813 __ClearPageTail(p);
814 set_page_refcounted(p);
815 p->first_page = NULL;
816 }
817
818 set_compound_order(page, 0);
819 __ClearPageHead(page);
820}
821
822static void free_gigantic_page(struct page *page, unsigned order)
823{
824 free_contig_range(page_to_pfn(page), 1 << order);
825}
826
827static int __alloc_gigantic_page(unsigned long start_pfn,
828 unsigned long nr_pages)
829{
830 unsigned long end_pfn = start_pfn + nr_pages;
831 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
832}
833
834static bool pfn_range_valid_gigantic(unsigned long start_pfn,
835 unsigned long nr_pages)
836{
837 unsigned long i, end_pfn = start_pfn + nr_pages;
838 struct page *page;
839
840 for (i = start_pfn; i < end_pfn; i++) {
841 if (!pfn_valid(i))
842 return false;
843
844 page = pfn_to_page(i);
845
846 if (PageReserved(page))
847 return false;
848
849 if (page_count(page) > 0)
850 return false;
851
852 if (PageHuge(page))
853 return false;
854 }
855
856 return true;
857}
858
859static bool zone_spans_last_pfn(const struct zone *zone,
860 unsigned long start_pfn, unsigned long nr_pages)
861{
862 unsigned long last_pfn = start_pfn + nr_pages - 1;
863 return zone_spans_pfn(zone, last_pfn);
864}
865
866static struct page *alloc_gigantic_page(int nid, unsigned order)
867{
868 unsigned long nr_pages = 1 << order;
869 unsigned long ret, pfn, flags;
870 struct zone *z;
871
872 z = NODE_DATA(nid)->node_zones;
873 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
874 spin_lock_irqsave(&z->lock, flags);
875
876 pfn = ALIGN(z->zone_start_pfn, nr_pages);
877 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
878 if (pfn_range_valid_gigantic(pfn, nr_pages)) {
879 /*
880 * We release the zone lock here because
881 * alloc_contig_range() will also lock the zone
882 * at some point. If there's an allocation
883 * spinning on this lock, it may win the race
884 * and cause alloc_contig_range() to fail...
885 */
886 spin_unlock_irqrestore(&z->lock, flags);
887 ret = __alloc_gigantic_page(pfn, nr_pages);
888 if (!ret)
889 return pfn_to_page(pfn);
890 spin_lock_irqsave(&z->lock, flags);
891 }
892 pfn += nr_pages;
893 }
894
895 spin_unlock_irqrestore(&z->lock, flags);
896 }
897
898 return NULL;
899}
900
901static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
902static void prep_compound_gigantic_page(struct page *page, unsigned long order);
903
904static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
905{
906 struct page *page;
907
908 page = alloc_gigantic_page(nid, huge_page_order(h));
909 if (page) {
910 prep_compound_gigantic_page(page, huge_page_order(h));
911 prep_new_huge_page(h, page, nid);
912 }
913
914 return page;
915}
916
917static int alloc_fresh_gigantic_page(struct hstate *h,
918 nodemask_t *nodes_allowed)
919{
920 struct page *page = NULL;
921 int nr_nodes, node;
922
923 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
924 page = alloc_fresh_gigantic_page_node(h, node);
925 if (page)
926 return 1;
927 }
928
929 return 0;
930}
931
932static inline bool gigantic_page_supported(void) { return true; }
933#else
934static inline bool gigantic_page_supported(void) { return false; }
935static inline void free_gigantic_page(struct page *page, unsigned order) { }
936static inline void destroy_compound_gigantic_page(struct page *page,
937 unsigned long order) { }
938static inline int alloc_fresh_gigantic_page(struct hstate *h,
939 nodemask_t *nodes_allowed) { return 0; }
940#endif
941
a5516438 942static void update_and_free_page(struct hstate *h, struct page *page)
6af2acb6
AL
943{
944 int i;
a5516438 945
944d9fec
LC
946 if (hstate_is_gigantic(h) && !gigantic_page_supported())
947 return;
18229df5 948
a5516438
AK
949 h->nr_huge_pages--;
950 h->nr_huge_pages_node[page_to_nid(page)]--;
951 for (i = 0; i < pages_per_huge_page(h); i++) {
32f84528
CF
952 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
953 1 << PG_referenced | 1 << PG_dirty |
a7407a27
LC
954 1 << PG_active | 1 << PG_private |
955 1 << PG_writeback);
6af2acb6 956 }
309381fe 957 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
6af2acb6
AL
958 set_compound_page_dtor(page, NULL);
959 set_page_refcounted(page);
944d9fec
LC
960 if (hstate_is_gigantic(h)) {
961 destroy_compound_gigantic_page(page, huge_page_order(h));
962 free_gigantic_page(page, huge_page_order(h));
963 } else {
964 arch_release_hugepage(page);
965 __free_pages(page, huge_page_order(h));
966 }
6af2acb6
AL
967}
968
e5ff2159
AK
969struct hstate *size_to_hstate(unsigned long size)
970{
971 struct hstate *h;
972
973 for_each_hstate(h) {
974 if (huge_page_size(h) == size)
975 return h;
976 }
977 return NULL;
978}
979
bcc54222
NH
980/*
981 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
982 * to hstate->hugepage_activelist.)
983 *
984 * This function can be called for tail pages, but never returns true for them.
985 */
986bool page_huge_active(struct page *page)
987{
988 VM_BUG_ON_PAGE(!PageHuge(page), page);
989 return PageHead(page) && PagePrivate(&page[1]);
990}
991
992/* never called for tail page */
993static void set_page_huge_active(struct page *page)
994{
995 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
996 SetPagePrivate(&page[1]);
997}
998
999static void clear_page_huge_active(struct page *page)
1000{
1001 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1002 ClearPagePrivate(&page[1]);
1003}
1004
8f1d26d0 1005void free_huge_page(struct page *page)
27a85ef1 1006{
a5516438
AK
1007 /*
1008 * Can't pass hstate in here because it is called from the
1009 * compound page destructor.
1010 */
e5ff2159 1011 struct hstate *h = page_hstate(page);
7893d1d5 1012 int nid = page_to_nid(page);
90481622
DG
1013 struct hugepage_subpool *spool =
1014 (struct hugepage_subpool *)page_private(page);
07443a85 1015 bool restore_reserve;
27a85ef1 1016
e5df70ab 1017 set_page_private(page, 0);
23be7468 1018 page->mapping = NULL;
7893d1d5 1019 BUG_ON(page_count(page));
0fe6e20b 1020 BUG_ON(page_mapcount(page));
07443a85 1021 restore_reserve = PagePrivate(page);
16c794b4 1022 ClearPagePrivate(page);
27a85ef1 1023
1c5ecae3
MK
1024 /*
1025 * A return code of zero implies that the subpool will be under its
1026 * minimum size if the reservation is not restored after page is free.
1027 * Therefore, force restore_reserve operation.
1028 */
1029 if (hugepage_subpool_put_pages(spool, 1) == 0)
1030 restore_reserve = true;
1031
27a85ef1 1032 spin_lock(&hugetlb_lock);
bcc54222 1033 clear_page_huge_active(page);
6d76dcf4
AK
1034 hugetlb_cgroup_uncharge_page(hstate_index(h),
1035 pages_per_huge_page(h), page);
07443a85
JK
1036 if (restore_reserve)
1037 h->resv_huge_pages++;
1038
944d9fec 1039 if (h->surplus_huge_pages_node[nid]) {
0edaecfa
AK
1040 /* remove the page from active list */
1041 list_del(&page->lru);
a5516438
AK
1042 update_and_free_page(h, page);
1043 h->surplus_huge_pages--;
1044 h->surplus_huge_pages_node[nid]--;
7893d1d5 1045 } else {
5d3a551c 1046 arch_clear_hugepage_flags(page);
a5516438 1047 enqueue_huge_page(h, page);
7893d1d5 1048 }
27a85ef1
DG
1049 spin_unlock(&hugetlb_lock);
1050}
1051
a5516438 1052static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
b7ba30c6 1053{
0edaecfa 1054 INIT_LIST_HEAD(&page->lru);
b7ba30c6
AK
1055 set_compound_page_dtor(page, free_huge_page);
1056 spin_lock(&hugetlb_lock);
9dd540e2 1057 set_hugetlb_cgroup(page, NULL);
a5516438
AK
1058 h->nr_huge_pages++;
1059 h->nr_huge_pages_node[nid]++;
b7ba30c6
AK
1060 spin_unlock(&hugetlb_lock);
1061 put_page(page); /* free it into the hugepage allocator */
1062}
1063
2906dd52 1064static void prep_compound_gigantic_page(struct page *page, unsigned long order)
20a0307c
WF
1065{
1066 int i;
1067 int nr_pages = 1 << order;
1068 struct page *p = page + 1;
1069
1070 /* we rely on prep_new_huge_page to set the destructor */
1071 set_compound_order(page, order);
1072 __SetPageHead(page);
ef5a22be 1073 __ClearPageReserved(page);
20a0307c 1074 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
ef5a22be
AA
1075 /*
1076 * For gigantic hugepages allocated through bootmem at
1077 * boot, it's safer to be consistent with the not-gigantic
1078 * hugepages and clear the PG_reserved bit from all tail pages
1079 * too. Otherwse drivers using get_user_pages() to access tail
1080 * pages may get the reference counting wrong if they see
1081 * PG_reserved set on a tail page (despite the head page not
1082 * having PG_reserved set). Enforcing this consistency between
1083 * head and tail pages allows drivers to optimize away a check
1084 * on the head page when they need know if put_page() is needed
1085 * after get_user_pages().
1086 */
1087 __ClearPageReserved(p);
58a84aa9 1088 set_page_count(p, 0);
20a0307c 1089 p->first_page = page;
44fc8057
DR
1090 /* Make sure p->first_page is always valid for PageTail() */
1091 smp_wmb();
1092 __SetPageTail(p);
20a0307c
WF
1093 }
1094}
1095
7795912c
AM
1096/*
1097 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1098 * transparent huge pages. See the PageTransHuge() documentation for more
1099 * details.
1100 */
20a0307c
WF
1101int PageHuge(struct page *page)
1102{
20a0307c
WF
1103 if (!PageCompound(page))
1104 return 0;
1105
1106 page = compound_head(page);
758f66a2 1107 return get_compound_page_dtor(page) == free_huge_page;
20a0307c 1108}
43131e14
NH
1109EXPORT_SYMBOL_GPL(PageHuge);
1110
27c73ae7
AA
1111/*
1112 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1113 * normal or transparent huge pages.
1114 */
1115int PageHeadHuge(struct page *page_head)
1116{
27c73ae7
AA
1117 if (!PageHead(page_head))
1118 return 0;
1119
758f66a2 1120 return get_compound_page_dtor(page_head) == free_huge_page;
27c73ae7 1121}
27c73ae7 1122
13d60f4b
ZY
1123pgoff_t __basepage_index(struct page *page)
1124{
1125 struct page *page_head = compound_head(page);
1126 pgoff_t index = page_index(page_head);
1127 unsigned long compound_idx;
1128
1129 if (!PageHuge(page_head))
1130 return page_index(page);
1131
1132 if (compound_order(page_head) >= MAX_ORDER)
1133 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1134 else
1135 compound_idx = page - page_head;
1136
1137 return (index << compound_order(page_head)) + compound_idx;
1138}
1139
a5516438 1140static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1da177e4 1141{
1da177e4 1142 struct page *page;
f96efd58 1143
6484eb3e 1144 page = alloc_pages_exact_node(nid,
86cdb465 1145 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
551883ae 1146 __GFP_REPEAT|__GFP_NOWARN,
a5516438 1147 huge_page_order(h));
1da177e4 1148 if (page) {
7f2e9525 1149 if (arch_prepare_hugepage(page)) {
caff3a2c 1150 __free_pages(page, huge_page_order(h));
7b8ee84d 1151 return NULL;
7f2e9525 1152 }
a5516438 1153 prep_new_huge_page(h, page, nid);
1da177e4 1154 }
63b4613c
NA
1155
1156 return page;
1157}
1158
b2261026
JK
1159static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1160{
1161 struct page *page;
1162 int nr_nodes, node;
1163 int ret = 0;
1164
1165 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1166 page = alloc_fresh_huge_page_node(h, node);
1167 if (page) {
1168 ret = 1;
1169 break;
1170 }
1171 }
1172
1173 if (ret)
1174 count_vm_event(HTLB_BUDDY_PGALLOC);
1175 else
1176 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1177
1178 return ret;
1179}
1180
e8c5c824
LS
1181/*
1182 * Free huge page from pool from next node to free.
1183 * Attempt to keep persistent huge pages more or less
1184 * balanced over allowed nodes.
1185 * Called with hugetlb_lock locked.
1186 */
6ae11b27
LS
1187static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1188 bool acct_surplus)
e8c5c824 1189{
b2261026 1190 int nr_nodes, node;
e8c5c824
LS
1191 int ret = 0;
1192
b2261026 1193 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
685f3457
LS
1194 /*
1195 * If we're returning unused surplus pages, only examine
1196 * nodes with surplus pages.
1197 */
b2261026
JK
1198 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1199 !list_empty(&h->hugepage_freelists[node])) {
e8c5c824 1200 struct page *page =
b2261026 1201 list_entry(h->hugepage_freelists[node].next,
e8c5c824
LS
1202 struct page, lru);
1203 list_del(&page->lru);
1204 h->free_huge_pages--;
b2261026 1205 h->free_huge_pages_node[node]--;
685f3457
LS
1206 if (acct_surplus) {
1207 h->surplus_huge_pages--;
b2261026 1208 h->surplus_huge_pages_node[node]--;
685f3457 1209 }
e8c5c824
LS
1210 update_and_free_page(h, page);
1211 ret = 1;
9a76db09 1212 break;
e8c5c824 1213 }
b2261026 1214 }
e8c5c824
LS
1215
1216 return ret;
1217}
1218
c8721bbb
NH
1219/*
1220 * Dissolve a given free hugepage into free buddy pages. This function does
1221 * nothing for in-use (including surplus) hugepages.
1222 */
1223static void dissolve_free_huge_page(struct page *page)
1224{
1225 spin_lock(&hugetlb_lock);
1226 if (PageHuge(page) && !page_count(page)) {
1227 struct hstate *h = page_hstate(page);
1228 int nid = page_to_nid(page);
1229 list_del(&page->lru);
1230 h->free_huge_pages--;
1231 h->free_huge_pages_node[nid]--;
1232 update_and_free_page(h, page);
1233 }
1234 spin_unlock(&hugetlb_lock);
1235}
1236
1237/*
1238 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1239 * make specified memory blocks removable from the system.
1240 * Note that start_pfn should aligned with (minimum) hugepage size.
1241 */
1242void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1243{
c8721bbb 1244 unsigned long pfn;
c8721bbb 1245
d0177639
LZ
1246 if (!hugepages_supported())
1247 return;
1248
641844f5
NH
1249 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1250 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
c8721bbb
NH
1251 dissolve_free_huge_page(pfn_to_page(pfn));
1252}
1253
bf50bab2 1254static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
7893d1d5
AL
1255{
1256 struct page *page;
bf50bab2 1257 unsigned int r_nid;
7893d1d5 1258
bae7f4ae 1259 if (hstate_is_gigantic(h))
aa888a74
AK
1260 return NULL;
1261
d1c3fb1f
NA
1262 /*
1263 * Assume we will successfully allocate the surplus page to
1264 * prevent racing processes from causing the surplus to exceed
1265 * overcommit
1266 *
1267 * This however introduces a different race, where a process B
1268 * tries to grow the static hugepage pool while alloc_pages() is
1269 * called by process A. B will only examine the per-node
1270 * counters in determining if surplus huge pages can be
1271 * converted to normal huge pages in adjust_pool_surplus(). A
1272 * won't be able to increment the per-node counter, until the
1273 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1274 * no more huge pages can be converted from surplus to normal
1275 * state (and doesn't try to convert again). Thus, we have a
1276 * case where a surplus huge page exists, the pool is grown, and
1277 * the surplus huge page still exists after, even though it
1278 * should just have been converted to a normal huge page. This
1279 * does not leak memory, though, as the hugepage will be freed
1280 * once it is out of use. It also does not allow the counters to
1281 * go out of whack in adjust_pool_surplus() as we don't modify
1282 * the node values until we've gotten the hugepage and only the
1283 * per-node value is checked there.
1284 */
1285 spin_lock(&hugetlb_lock);
a5516438 1286 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
d1c3fb1f
NA
1287 spin_unlock(&hugetlb_lock);
1288 return NULL;
1289 } else {
a5516438
AK
1290 h->nr_huge_pages++;
1291 h->surplus_huge_pages++;
d1c3fb1f
NA
1292 }
1293 spin_unlock(&hugetlb_lock);
1294
bf50bab2 1295 if (nid == NUMA_NO_NODE)
86cdb465 1296 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
bf50bab2
NH
1297 __GFP_REPEAT|__GFP_NOWARN,
1298 huge_page_order(h));
1299 else
1300 page = alloc_pages_exact_node(nid,
86cdb465 1301 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
bf50bab2 1302 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
d1c3fb1f 1303
caff3a2c
GS
1304 if (page && arch_prepare_hugepage(page)) {
1305 __free_pages(page, huge_page_order(h));
ea5768c7 1306 page = NULL;
caff3a2c
GS
1307 }
1308
d1c3fb1f 1309 spin_lock(&hugetlb_lock);
7893d1d5 1310 if (page) {
0edaecfa 1311 INIT_LIST_HEAD(&page->lru);
bf50bab2 1312 r_nid = page_to_nid(page);
7893d1d5 1313 set_compound_page_dtor(page, free_huge_page);
9dd540e2 1314 set_hugetlb_cgroup(page, NULL);
d1c3fb1f
NA
1315 /*
1316 * We incremented the global counters already
1317 */
bf50bab2
NH
1318 h->nr_huge_pages_node[r_nid]++;
1319 h->surplus_huge_pages_node[r_nid]++;
3b116300 1320 __count_vm_event(HTLB_BUDDY_PGALLOC);
d1c3fb1f 1321 } else {
a5516438
AK
1322 h->nr_huge_pages--;
1323 h->surplus_huge_pages--;
3b116300 1324 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
7893d1d5 1325 }
d1c3fb1f 1326 spin_unlock(&hugetlb_lock);
7893d1d5
AL
1327
1328 return page;
1329}
1330
bf50bab2
NH
1331/*
1332 * This allocation function is useful in the context where vma is irrelevant.
1333 * E.g. soft-offlining uses this function because it only cares physical
1334 * address of error page.
1335 */
1336struct page *alloc_huge_page_node(struct hstate *h, int nid)
1337{
4ef91848 1338 struct page *page = NULL;
bf50bab2
NH
1339
1340 spin_lock(&hugetlb_lock);
4ef91848
JK
1341 if (h->free_huge_pages - h->resv_huge_pages > 0)
1342 page = dequeue_huge_page_node(h, nid);
bf50bab2
NH
1343 spin_unlock(&hugetlb_lock);
1344
94ae8ba7 1345 if (!page)
bf50bab2
NH
1346 page = alloc_buddy_huge_page(h, nid);
1347
1348 return page;
1349}
1350
e4e574b7 1351/*
25985edc 1352 * Increase the hugetlb pool such that it can accommodate a reservation
e4e574b7
AL
1353 * of size 'delta'.
1354 */
a5516438 1355static int gather_surplus_pages(struct hstate *h, int delta)
e4e574b7
AL
1356{
1357 struct list_head surplus_list;
1358 struct page *page, *tmp;
1359 int ret, i;
1360 int needed, allocated;
28073b02 1361 bool alloc_ok = true;
e4e574b7 1362
a5516438 1363 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
ac09b3a1 1364 if (needed <= 0) {
a5516438 1365 h->resv_huge_pages += delta;
e4e574b7 1366 return 0;
ac09b3a1 1367 }
e4e574b7
AL
1368
1369 allocated = 0;
1370 INIT_LIST_HEAD(&surplus_list);
1371
1372 ret = -ENOMEM;
1373retry:
1374 spin_unlock(&hugetlb_lock);
1375 for (i = 0; i < needed; i++) {
bf50bab2 1376 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
28073b02
HD
1377 if (!page) {
1378 alloc_ok = false;
1379 break;
1380 }
e4e574b7
AL
1381 list_add(&page->lru, &surplus_list);
1382 }
28073b02 1383 allocated += i;
e4e574b7
AL
1384
1385 /*
1386 * After retaking hugetlb_lock, we need to recalculate 'needed'
1387 * because either resv_huge_pages or free_huge_pages may have changed.
1388 */
1389 spin_lock(&hugetlb_lock);
a5516438
AK
1390 needed = (h->resv_huge_pages + delta) -
1391 (h->free_huge_pages + allocated);
28073b02
HD
1392 if (needed > 0) {
1393 if (alloc_ok)
1394 goto retry;
1395 /*
1396 * We were not able to allocate enough pages to
1397 * satisfy the entire reservation so we free what
1398 * we've allocated so far.
1399 */
1400 goto free;
1401 }
e4e574b7
AL
1402 /*
1403 * The surplus_list now contains _at_least_ the number of extra pages
25985edc 1404 * needed to accommodate the reservation. Add the appropriate number
e4e574b7 1405 * of pages to the hugetlb pool and free the extras back to the buddy
ac09b3a1
AL
1406 * allocator. Commit the entire reservation here to prevent another
1407 * process from stealing the pages as they are added to the pool but
1408 * before they are reserved.
e4e574b7
AL
1409 */
1410 needed += allocated;
a5516438 1411 h->resv_huge_pages += delta;
e4e574b7 1412 ret = 0;
a9869b83 1413
19fc3f0a 1414 /* Free the needed pages to the hugetlb pool */
e4e574b7 1415 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
19fc3f0a
AL
1416 if ((--needed) < 0)
1417 break;
a9869b83
NH
1418 /*
1419 * This page is now managed by the hugetlb allocator and has
1420 * no users -- drop the buddy allocator's reference.
1421 */
1422 put_page_testzero(page);
309381fe 1423 VM_BUG_ON_PAGE(page_count(page), page);
a5516438 1424 enqueue_huge_page(h, page);
19fc3f0a 1425 }
28073b02 1426free:
b0365c8d 1427 spin_unlock(&hugetlb_lock);
19fc3f0a
AL
1428
1429 /* Free unnecessary surplus pages to the buddy allocator */
c0d934ba
JK
1430 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1431 put_page(page);
a9869b83 1432 spin_lock(&hugetlb_lock);
e4e574b7
AL
1433
1434 return ret;
1435}
1436
1437/*
1438 * When releasing a hugetlb pool reservation, any surplus pages that were
1439 * allocated to satisfy the reservation must be explicitly freed if they were
1440 * never used.
685f3457 1441 * Called with hugetlb_lock held.
e4e574b7 1442 */
a5516438
AK
1443static void return_unused_surplus_pages(struct hstate *h,
1444 unsigned long unused_resv_pages)
e4e574b7 1445{
e4e574b7
AL
1446 unsigned long nr_pages;
1447
ac09b3a1 1448 /* Uncommit the reservation */
a5516438 1449 h->resv_huge_pages -= unused_resv_pages;
ac09b3a1 1450
aa888a74 1451 /* Cannot return gigantic pages currently */
bae7f4ae 1452 if (hstate_is_gigantic(h))
aa888a74
AK
1453 return;
1454
a5516438 1455 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
e4e574b7 1456
685f3457
LS
1457 /*
1458 * We want to release as many surplus pages as possible, spread
9b5e5d0f
LS
1459 * evenly across all nodes with memory. Iterate across these nodes
1460 * until we can no longer free unreserved surplus pages. This occurs
1461 * when the nodes with surplus pages have no free pages.
1462 * free_pool_huge_page() will balance the the freed pages across the
1463 * on-line nodes with memory and will handle the hstate accounting.
685f3457
LS
1464 */
1465 while (nr_pages--) {
8cebfcd0 1466 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
685f3457 1467 break;
7848a4bf 1468 cond_resched_lock(&hugetlb_lock);
e4e574b7
AL
1469 }
1470}
1471
c37f9fb1
AW
1472/*
1473 * Determine if the huge page at addr within the vma has an associated
1474 * reservation. Where it does not we will need to logically increase
90481622
DG
1475 * reservation and actually increase subpool usage before an allocation
1476 * can occur. Where any new reservation would be required the
1477 * reservation change is prepared, but not committed. Once the page
1478 * has been allocated from the subpool and instantiated the change should
1479 * be committed via vma_commit_reservation. No action is required on
1480 * failure.
c37f9fb1 1481 */
e2f17d94 1482static long vma_needs_reservation(struct hstate *h,
a5516438 1483 struct vm_area_struct *vma, unsigned long addr)
c37f9fb1 1484{
4e35f483
JK
1485 struct resv_map *resv;
1486 pgoff_t idx;
1487 long chg;
c37f9fb1 1488
4e35f483
JK
1489 resv = vma_resv_map(vma);
1490 if (!resv)
84afd99b 1491 return 1;
c37f9fb1 1492
4e35f483
JK
1493 idx = vma_hugecache_offset(h, vma, addr);
1494 chg = region_chg(resv, idx, idx + 1);
84afd99b 1495
4e35f483
JK
1496 if (vma->vm_flags & VM_MAYSHARE)
1497 return chg;
1498 else
1499 return chg < 0 ? chg : 0;
c37f9fb1 1500}
a5516438
AK
1501static void vma_commit_reservation(struct hstate *h,
1502 struct vm_area_struct *vma, unsigned long addr)
c37f9fb1 1503{
4e35f483
JK
1504 struct resv_map *resv;
1505 pgoff_t idx;
84afd99b 1506
4e35f483
JK
1507 resv = vma_resv_map(vma);
1508 if (!resv)
1509 return;
84afd99b 1510
4e35f483
JK
1511 idx = vma_hugecache_offset(h, vma, addr);
1512 region_add(resv, idx, idx + 1);
c37f9fb1
AW
1513}
1514
a1e78772 1515static struct page *alloc_huge_page(struct vm_area_struct *vma,
04f2cbe3 1516 unsigned long addr, int avoid_reserve)
1da177e4 1517{
90481622 1518 struct hugepage_subpool *spool = subpool_vma(vma);
a5516438 1519 struct hstate *h = hstate_vma(vma);
348ea204 1520 struct page *page;
e2f17d94 1521 long chg;
6d76dcf4
AK
1522 int ret, idx;
1523 struct hugetlb_cgroup *h_cg;
a1e78772 1524
6d76dcf4 1525 idx = hstate_index(h);
a1e78772 1526 /*
90481622
DG
1527 * Processes that did not create the mapping will have no
1528 * reserves and will not have accounted against subpool
1529 * limit. Check that the subpool limit can be made before
1530 * satisfying the allocation MAP_NORESERVE mappings may also
1531 * need pages and subpool limit allocated allocated if no reserve
1532 * mapping overlaps.
a1e78772 1533 */
a5516438 1534 chg = vma_needs_reservation(h, vma, addr);
c37f9fb1 1535 if (chg < 0)
76dcee75 1536 return ERR_PTR(-ENOMEM);
8bb3f12e 1537 if (chg || avoid_reserve)
1c5ecae3 1538 if (hugepage_subpool_get_pages(spool, 1) < 0)
76dcee75 1539 return ERR_PTR(-ENOSPC);
1da177e4 1540
6d76dcf4 1541 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
8f34af6f
JZ
1542 if (ret)
1543 goto out_subpool_put;
1544
1da177e4 1545 spin_lock(&hugetlb_lock);
af0ed73e 1546 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
81a6fcae 1547 if (!page) {
94ae8ba7 1548 spin_unlock(&hugetlb_lock);
bf50bab2 1549 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
8f34af6f
JZ
1550 if (!page)
1551 goto out_uncharge_cgroup;
1552
79dbb236
AK
1553 spin_lock(&hugetlb_lock);
1554 list_move(&page->lru, &h->hugepage_activelist);
81a6fcae 1555 /* Fall through */
68842c9b 1556 }
81a6fcae
JK
1557 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1558 spin_unlock(&hugetlb_lock);
348ea204 1559
90481622 1560 set_page_private(page, (unsigned long)spool);
90d8b7e6 1561
a5516438 1562 vma_commit_reservation(h, vma, addr);
90d8b7e6 1563 return page;
8f34af6f
JZ
1564
1565out_uncharge_cgroup:
1566 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1567out_subpool_put:
1568 if (chg || avoid_reserve)
1569 hugepage_subpool_put_pages(spool, 1);
1570 return ERR_PTR(-ENOSPC);
b45b5bd6
DG
1571}
1572
74060e4d
NH
1573/*
1574 * alloc_huge_page()'s wrapper which simply returns the page if allocation
1575 * succeeds, otherwise NULL. This function is called from new_vma_page(),
1576 * where no ERR_VALUE is expected to be returned.
1577 */
1578struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1579 unsigned long addr, int avoid_reserve)
1580{
1581 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1582 if (IS_ERR(page))
1583 page = NULL;
1584 return page;
1585}
1586
91f47662 1587int __weak alloc_bootmem_huge_page(struct hstate *h)
aa888a74
AK
1588{
1589 struct huge_bootmem_page *m;
b2261026 1590 int nr_nodes, node;
aa888a74 1591
b2261026 1592 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
aa888a74
AK
1593 void *addr;
1594
8b89a116
GS
1595 addr = memblock_virt_alloc_try_nid_nopanic(
1596 huge_page_size(h), huge_page_size(h),
1597 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
aa888a74
AK
1598 if (addr) {
1599 /*
1600 * Use the beginning of the huge page to store the
1601 * huge_bootmem_page struct (until gather_bootmem
1602 * puts them into the mem_map).
1603 */
1604 m = addr;
91f47662 1605 goto found;
aa888a74 1606 }
aa888a74
AK
1607 }
1608 return 0;
1609
1610found:
df994ead 1611 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
aa888a74
AK
1612 /* Put them into a private list first because mem_map is not up yet */
1613 list_add(&m->list, &huge_boot_pages);
1614 m->hstate = h;
1615 return 1;
1616}
1617
f412c97a 1618static void __init prep_compound_huge_page(struct page *page, int order)
18229df5
AW
1619{
1620 if (unlikely(order > (MAX_ORDER - 1)))
1621 prep_compound_gigantic_page(page, order);
1622 else
1623 prep_compound_page(page, order);
1624}
1625
aa888a74
AK
1626/* Put bootmem huge pages into the standard lists after mem_map is up */
1627static void __init gather_bootmem_prealloc(void)
1628{
1629 struct huge_bootmem_page *m;
1630
1631 list_for_each_entry(m, &huge_boot_pages, list) {
aa888a74 1632 struct hstate *h = m->hstate;
ee8f248d
BB
1633 struct page *page;
1634
1635#ifdef CONFIG_HIGHMEM
1636 page = pfn_to_page(m->phys >> PAGE_SHIFT);
8b89a116
GS
1637 memblock_free_late(__pa(m),
1638 sizeof(struct huge_bootmem_page));
ee8f248d
BB
1639#else
1640 page = virt_to_page(m);
1641#endif
aa888a74 1642 WARN_ON(page_count(page) != 1);
18229df5 1643 prep_compound_huge_page(page, h->order);
ef5a22be 1644 WARN_ON(PageReserved(page));
aa888a74 1645 prep_new_huge_page(h, page, page_to_nid(page));
b0320c7b
RA
1646 /*
1647 * If we had gigantic hugepages allocated at boot time, we need
1648 * to restore the 'stolen' pages to totalram_pages in order to
1649 * fix confusing memory reports from free(1) and another
1650 * side-effects, like CommitLimit going negative.
1651 */
bae7f4ae 1652 if (hstate_is_gigantic(h))
3dcc0571 1653 adjust_managed_page_count(page, 1 << h->order);
aa888a74
AK
1654 }
1655}
1656
8faa8b07 1657static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1da177e4
LT
1658{
1659 unsigned long i;
a5516438 1660
e5ff2159 1661 for (i = 0; i < h->max_huge_pages; ++i) {
bae7f4ae 1662 if (hstate_is_gigantic(h)) {
aa888a74
AK
1663 if (!alloc_bootmem_huge_page(h))
1664 break;
9b5e5d0f 1665 } else if (!alloc_fresh_huge_page(h,
8cebfcd0 1666 &node_states[N_MEMORY]))
1da177e4 1667 break;
1da177e4 1668 }
8faa8b07 1669 h->max_huge_pages = i;
e5ff2159
AK
1670}
1671
1672static void __init hugetlb_init_hstates(void)
1673{
1674 struct hstate *h;
1675
1676 for_each_hstate(h) {
641844f5
NH
1677 if (minimum_order > huge_page_order(h))
1678 minimum_order = huge_page_order(h);
1679
8faa8b07 1680 /* oversize hugepages were init'ed in early boot */
bae7f4ae 1681 if (!hstate_is_gigantic(h))
8faa8b07 1682 hugetlb_hstate_alloc_pages(h);
e5ff2159 1683 }
641844f5 1684 VM_BUG_ON(minimum_order == UINT_MAX);
e5ff2159
AK
1685}
1686
4abd32db
AK
1687static char * __init memfmt(char *buf, unsigned long n)
1688{
1689 if (n >= (1UL << 30))
1690 sprintf(buf, "%lu GB", n >> 30);
1691 else if (n >= (1UL << 20))
1692 sprintf(buf, "%lu MB", n >> 20);
1693 else
1694 sprintf(buf, "%lu KB", n >> 10);
1695 return buf;
1696}
1697
e5ff2159
AK
1698static void __init report_hugepages(void)
1699{
1700 struct hstate *h;
1701
1702 for_each_hstate(h) {
4abd32db 1703 char buf[32];
ffb22af5 1704 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
4abd32db
AK
1705 memfmt(buf, huge_page_size(h)),
1706 h->free_huge_pages);
e5ff2159
AK
1707 }
1708}
1709
1da177e4 1710#ifdef CONFIG_HIGHMEM
6ae11b27
LS
1711static void try_to_free_low(struct hstate *h, unsigned long count,
1712 nodemask_t *nodes_allowed)
1da177e4 1713{
4415cc8d
CL
1714 int i;
1715
bae7f4ae 1716 if (hstate_is_gigantic(h))
aa888a74
AK
1717 return;
1718
6ae11b27 1719 for_each_node_mask(i, *nodes_allowed) {
1da177e4 1720 struct page *page, *next;
a5516438
AK
1721 struct list_head *freel = &h->hugepage_freelists[i];
1722 list_for_each_entry_safe(page, next, freel, lru) {
1723 if (count >= h->nr_huge_pages)
6b0c880d 1724 return;
1da177e4
LT
1725 if (PageHighMem(page))
1726 continue;
1727 list_del(&page->lru);
e5ff2159 1728 update_and_free_page(h, page);
a5516438
AK
1729 h->free_huge_pages--;
1730 h->free_huge_pages_node[page_to_nid(page)]--;
1da177e4
LT
1731 }
1732 }
1733}
1734#else
6ae11b27
LS
1735static inline void try_to_free_low(struct hstate *h, unsigned long count,
1736 nodemask_t *nodes_allowed)
1da177e4
LT
1737{
1738}
1739#endif
1740
20a0307c
WF
1741/*
1742 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1743 * balanced by operating on them in a round-robin fashion.
1744 * Returns 1 if an adjustment was made.
1745 */
6ae11b27
LS
1746static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1747 int delta)
20a0307c 1748{
b2261026 1749 int nr_nodes, node;
20a0307c
WF
1750
1751 VM_BUG_ON(delta != -1 && delta != 1);
20a0307c 1752
b2261026
JK
1753 if (delta < 0) {
1754 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1755 if (h->surplus_huge_pages_node[node])
1756 goto found;
e8c5c824 1757 }
b2261026
JK
1758 } else {
1759 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1760 if (h->surplus_huge_pages_node[node] <
1761 h->nr_huge_pages_node[node])
1762 goto found;
e8c5c824 1763 }
b2261026
JK
1764 }
1765 return 0;
20a0307c 1766
b2261026
JK
1767found:
1768 h->surplus_huge_pages += delta;
1769 h->surplus_huge_pages_node[node] += delta;
1770 return 1;
20a0307c
WF
1771}
1772
a5516438 1773#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
6ae11b27
LS
1774static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1775 nodemask_t *nodes_allowed)
1da177e4 1776{
7893d1d5 1777 unsigned long min_count, ret;
1da177e4 1778
944d9fec 1779 if (hstate_is_gigantic(h) && !gigantic_page_supported())
aa888a74
AK
1780 return h->max_huge_pages;
1781
7893d1d5
AL
1782 /*
1783 * Increase the pool size
1784 * First take pages out of surplus state. Then make up the
1785 * remaining difference by allocating fresh huge pages.
d1c3fb1f
NA
1786 *
1787 * We might race with alloc_buddy_huge_page() here and be unable
1788 * to convert a surplus huge page to a normal huge page. That is
1789 * not critical, though, it just means the overall size of the
1790 * pool might be one hugepage larger than it needs to be, but
1791 * within all the constraints specified by the sysctls.
7893d1d5 1792 */
1da177e4 1793 spin_lock(&hugetlb_lock);
a5516438 1794 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
6ae11b27 1795 if (!adjust_pool_surplus(h, nodes_allowed, -1))
7893d1d5
AL
1796 break;
1797 }
1798
a5516438 1799 while (count > persistent_huge_pages(h)) {
7893d1d5
AL
1800 /*
1801 * If this allocation races such that we no longer need the
1802 * page, free_huge_page will handle it by freeing the page
1803 * and reducing the surplus.
1804 */
1805 spin_unlock(&hugetlb_lock);
944d9fec
LC
1806 if (hstate_is_gigantic(h))
1807 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
1808 else
1809 ret = alloc_fresh_huge_page(h, nodes_allowed);
7893d1d5
AL
1810 spin_lock(&hugetlb_lock);
1811 if (!ret)
1812 goto out;
1813
536240f2
MG
1814 /* Bail for signals. Probably ctrl-c from user */
1815 if (signal_pending(current))
1816 goto out;
7893d1d5 1817 }
7893d1d5
AL
1818
1819 /*
1820 * Decrease the pool size
1821 * First return free pages to the buddy allocator (being careful
1822 * to keep enough around to satisfy reservations). Then place
1823 * pages into surplus state as needed so the pool will shrink
1824 * to the desired size as pages become free.
d1c3fb1f
NA
1825 *
1826 * By placing pages into the surplus state independent of the
1827 * overcommit value, we are allowing the surplus pool size to
1828 * exceed overcommit. There are few sane options here. Since
1829 * alloc_buddy_huge_page() is checking the global counter,
1830 * though, we'll note that we're not allowed to exceed surplus
1831 * and won't grow the pool anywhere else. Not until one of the
1832 * sysctls are changed, or the surplus pages go out of use.
7893d1d5 1833 */
a5516438 1834 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
6b0c880d 1835 min_count = max(count, min_count);
6ae11b27 1836 try_to_free_low(h, min_count, nodes_allowed);
a5516438 1837 while (min_count < persistent_huge_pages(h)) {
6ae11b27 1838 if (!free_pool_huge_page(h, nodes_allowed, 0))
1da177e4 1839 break;
55f67141 1840 cond_resched_lock(&hugetlb_lock);
1da177e4 1841 }
a5516438 1842 while (count < persistent_huge_pages(h)) {
6ae11b27 1843 if (!adjust_pool_surplus(h, nodes_allowed, 1))
7893d1d5
AL
1844 break;
1845 }
1846out:
a5516438 1847 ret = persistent_huge_pages(h);
1da177e4 1848 spin_unlock(&hugetlb_lock);
7893d1d5 1849 return ret;
1da177e4
LT
1850}
1851
a3437870
NA
1852#define HSTATE_ATTR_RO(_name) \
1853 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1854
1855#define HSTATE_ATTR(_name) \
1856 static struct kobj_attribute _name##_attr = \
1857 __ATTR(_name, 0644, _name##_show, _name##_store)
1858
1859static struct kobject *hugepages_kobj;
1860static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1861
9a305230
LS
1862static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1863
1864static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
a3437870
NA
1865{
1866 int i;
9a305230 1867
a3437870 1868 for (i = 0; i < HUGE_MAX_HSTATE; i++)
9a305230
LS
1869 if (hstate_kobjs[i] == kobj) {
1870 if (nidp)
1871 *nidp = NUMA_NO_NODE;
a3437870 1872 return &hstates[i];
9a305230
LS
1873 }
1874
1875 return kobj_to_node_hstate(kobj, nidp);
a3437870
NA
1876}
1877
06808b08 1878static ssize_t nr_hugepages_show_common(struct kobject *kobj,
a3437870
NA
1879 struct kobj_attribute *attr, char *buf)
1880{
9a305230
LS
1881 struct hstate *h;
1882 unsigned long nr_huge_pages;
1883 int nid;
1884
1885 h = kobj_to_hstate(kobj, &nid);
1886 if (nid == NUMA_NO_NODE)
1887 nr_huge_pages = h->nr_huge_pages;
1888 else
1889 nr_huge_pages = h->nr_huge_pages_node[nid];
1890
1891 return sprintf(buf, "%lu\n", nr_huge_pages);
a3437870 1892}
adbe8726 1893
238d3c13
DR
1894static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
1895 struct hstate *h, int nid,
1896 unsigned long count, size_t len)
a3437870
NA
1897{
1898 int err;
bad44b5b 1899 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
a3437870 1900
944d9fec 1901 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
adbe8726
EM
1902 err = -EINVAL;
1903 goto out;
1904 }
1905
9a305230
LS
1906 if (nid == NUMA_NO_NODE) {
1907 /*
1908 * global hstate attribute
1909 */
1910 if (!(obey_mempolicy &&
1911 init_nodemask_of_mempolicy(nodes_allowed))) {
1912 NODEMASK_FREE(nodes_allowed);
8cebfcd0 1913 nodes_allowed = &node_states[N_MEMORY];
9a305230
LS
1914 }
1915 } else if (nodes_allowed) {
1916 /*
1917 * per node hstate attribute: adjust count to global,
1918 * but restrict alloc/free to the specified node.
1919 */
1920 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1921 init_nodemask_of_node(nodes_allowed, nid);
1922 } else
8cebfcd0 1923 nodes_allowed = &node_states[N_MEMORY];
9a305230 1924
06808b08 1925 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
a3437870 1926
8cebfcd0 1927 if (nodes_allowed != &node_states[N_MEMORY])
06808b08
LS
1928 NODEMASK_FREE(nodes_allowed);
1929
1930 return len;
adbe8726
EM
1931out:
1932 NODEMASK_FREE(nodes_allowed);
1933 return err;
06808b08
LS
1934}
1935
238d3c13
DR
1936static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1937 struct kobject *kobj, const char *buf,
1938 size_t len)
1939{
1940 struct hstate *h;
1941 unsigned long count;
1942 int nid;
1943 int err;
1944
1945 err = kstrtoul(buf, 10, &count);
1946 if (err)
1947 return err;
1948
1949 h = kobj_to_hstate(kobj, &nid);
1950 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1951}
1952
06808b08
LS
1953static ssize_t nr_hugepages_show(struct kobject *kobj,
1954 struct kobj_attribute *attr, char *buf)
1955{
1956 return nr_hugepages_show_common(kobj, attr, buf);
1957}
1958
1959static ssize_t nr_hugepages_store(struct kobject *kobj,
1960 struct kobj_attribute *attr, const char *buf, size_t len)
1961{
238d3c13 1962 return nr_hugepages_store_common(false, kobj, buf, len);
a3437870
NA
1963}
1964HSTATE_ATTR(nr_hugepages);
1965
06808b08
LS
1966#ifdef CONFIG_NUMA
1967
1968/*
1969 * hstate attribute for optionally mempolicy-based constraint on persistent
1970 * huge page alloc/free.
1971 */
1972static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1973 struct kobj_attribute *attr, char *buf)
1974{
1975 return nr_hugepages_show_common(kobj, attr, buf);
1976}
1977
1978static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1979 struct kobj_attribute *attr, const char *buf, size_t len)
1980{
238d3c13 1981 return nr_hugepages_store_common(true, kobj, buf, len);
06808b08
LS
1982}
1983HSTATE_ATTR(nr_hugepages_mempolicy);
1984#endif
1985
1986
a3437870
NA
1987static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1988 struct kobj_attribute *attr, char *buf)
1989{
9a305230 1990 struct hstate *h = kobj_to_hstate(kobj, NULL);
a3437870
NA
1991 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1992}
adbe8726 1993
a3437870
NA
1994static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1995 struct kobj_attribute *attr, const char *buf, size_t count)
1996{
1997 int err;
1998 unsigned long input;
9a305230 1999 struct hstate *h = kobj_to_hstate(kobj, NULL);
a3437870 2000
bae7f4ae 2001 if (hstate_is_gigantic(h))
adbe8726
EM
2002 return -EINVAL;
2003
3dbb95f7 2004 err = kstrtoul(buf, 10, &input);
a3437870 2005 if (err)
73ae31e5 2006 return err;
a3437870
NA
2007
2008 spin_lock(&hugetlb_lock);
2009 h->nr_overcommit_huge_pages = input;
2010 spin_unlock(&hugetlb_lock);
2011
2012 return count;
2013}
2014HSTATE_ATTR(nr_overcommit_hugepages);
2015
2016static ssize_t free_hugepages_show(struct kobject *kobj,
2017 struct kobj_attribute *attr, char *buf)
2018{
9a305230
LS
2019 struct hstate *h;
2020 unsigned long free_huge_pages;
2021 int nid;
2022
2023 h = kobj_to_hstate(kobj, &nid);
2024 if (nid == NUMA_NO_NODE)
2025 free_huge_pages = h->free_huge_pages;
2026 else
2027 free_huge_pages = h->free_huge_pages_node[nid];
2028
2029 return sprintf(buf, "%lu\n", free_huge_pages);
a3437870
NA
2030}
2031HSTATE_ATTR_RO(free_hugepages);
2032
2033static ssize_t resv_hugepages_show(struct kobject *kobj,
2034 struct kobj_attribute *attr, char *buf)
2035{
9a305230 2036 struct hstate *h = kobj_to_hstate(kobj, NULL);
a3437870
NA
2037 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2038}
2039HSTATE_ATTR_RO(resv_hugepages);
2040
2041static ssize_t surplus_hugepages_show(struct kobject *kobj,
2042 struct kobj_attribute *attr, char *buf)
2043{
9a305230
LS
2044 struct hstate *h;
2045 unsigned long surplus_huge_pages;
2046 int nid;
2047
2048 h = kobj_to_hstate(kobj, &nid);
2049 if (nid == NUMA_NO_NODE)
2050 surplus_huge_pages = h->surplus_huge_pages;
2051 else
2052 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2053
2054 return sprintf(buf, "%lu\n", surplus_huge_pages);
a3437870
NA
2055}
2056HSTATE_ATTR_RO(surplus_hugepages);
2057
2058static struct attribute *hstate_attrs[] = {
2059 &nr_hugepages_attr.attr,
2060 &nr_overcommit_hugepages_attr.attr,
2061 &free_hugepages_attr.attr,
2062 &resv_hugepages_attr.attr,
2063 &surplus_hugepages_attr.attr,
06808b08
LS
2064#ifdef CONFIG_NUMA
2065 &nr_hugepages_mempolicy_attr.attr,
2066#endif
a3437870
NA
2067 NULL,
2068};
2069
2070static struct attribute_group hstate_attr_group = {
2071 .attrs = hstate_attrs,
2072};
2073
094e9539
JM
2074static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2075 struct kobject **hstate_kobjs,
2076 struct attribute_group *hstate_attr_group)
a3437870
NA
2077{
2078 int retval;
972dc4de 2079 int hi = hstate_index(h);
a3437870 2080
9a305230
LS
2081 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2082 if (!hstate_kobjs[hi])
a3437870
NA
2083 return -ENOMEM;
2084
9a305230 2085 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
a3437870 2086 if (retval)
9a305230 2087 kobject_put(hstate_kobjs[hi]);
a3437870
NA
2088
2089 return retval;
2090}
2091
2092static void __init hugetlb_sysfs_init(void)
2093{
2094 struct hstate *h;
2095 int err;
2096
2097 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2098 if (!hugepages_kobj)
2099 return;
2100
2101 for_each_hstate(h) {
9a305230
LS
2102 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2103 hstate_kobjs, &hstate_attr_group);
a3437870 2104 if (err)
ffb22af5 2105 pr_err("Hugetlb: Unable to add hstate %s", h->name);
a3437870
NA
2106 }
2107}
2108
9a305230
LS
2109#ifdef CONFIG_NUMA
2110
2111/*
2112 * node_hstate/s - associate per node hstate attributes, via their kobjects,
10fbcf4c
KS
2113 * with node devices in node_devices[] using a parallel array. The array
2114 * index of a node device or _hstate == node id.
2115 * This is here to avoid any static dependency of the node device driver, in
9a305230
LS
2116 * the base kernel, on the hugetlb module.
2117 */
2118struct node_hstate {
2119 struct kobject *hugepages_kobj;
2120 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2121};
2122struct node_hstate node_hstates[MAX_NUMNODES];
2123
2124/*
10fbcf4c 2125 * A subset of global hstate attributes for node devices
9a305230
LS
2126 */
2127static struct attribute *per_node_hstate_attrs[] = {
2128 &nr_hugepages_attr.attr,
2129 &free_hugepages_attr.attr,
2130 &surplus_hugepages_attr.attr,
2131 NULL,
2132};
2133
2134static struct attribute_group per_node_hstate_attr_group = {
2135 .attrs = per_node_hstate_attrs,
2136};
2137
2138/*
10fbcf4c 2139 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
9a305230
LS
2140 * Returns node id via non-NULL nidp.
2141 */
2142static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2143{
2144 int nid;
2145
2146 for (nid = 0; nid < nr_node_ids; nid++) {
2147 struct node_hstate *nhs = &node_hstates[nid];
2148 int i;
2149 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2150 if (nhs->hstate_kobjs[i] == kobj) {
2151 if (nidp)
2152 *nidp = nid;
2153 return &hstates[i];
2154 }
2155 }
2156
2157 BUG();
2158 return NULL;
2159}
2160
2161/*
10fbcf4c 2162 * Unregister hstate attributes from a single node device.
9a305230
LS
2163 * No-op if no hstate attributes attached.
2164 */
3cd8b44f 2165static void hugetlb_unregister_node(struct node *node)
9a305230
LS
2166{
2167 struct hstate *h;
10fbcf4c 2168 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
2169
2170 if (!nhs->hugepages_kobj)
9b5e5d0f 2171 return; /* no hstate attributes */
9a305230 2172
972dc4de
AK
2173 for_each_hstate(h) {
2174 int idx = hstate_index(h);
2175 if (nhs->hstate_kobjs[idx]) {
2176 kobject_put(nhs->hstate_kobjs[idx]);
2177 nhs->hstate_kobjs[idx] = NULL;
9a305230 2178 }
972dc4de 2179 }
9a305230
LS
2180
2181 kobject_put(nhs->hugepages_kobj);
2182 nhs->hugepages_kobj = NULL;
2183}
2184
2185/*
10fbcf4c 2186 * hugetlb module exit: unregister hstate attributes from node devices
9a305230
LS
2187 * that have them.
2188 */
2189static void hugetlb_unregister_all_nodes(void)
2190{
2191 int nid;
2192
2193 /*
10fbcf4c 2194 * disable node device registrations.
9a305230
LS
2195 */
2196 register_hugetlbfs_with_node(NULL, NULL);
2197
2198 /*
2199 * remove hstate attributes from any nodes that have them.
2200 */
2201 for (nid = 0; nid < nr_node_ids; nid++)
8732794b 2202 hugetlb_unregister_node(node_devices[nid]);
9a305230
LS
2203}
2204
2205/*
10fbcf4c 2206 * Register hstate attributes for a single node device.
9a305230
LS
2207 * No-op if attributes already registered.
2208 */
3cd8b44f 2209static void hugetlb_register_node(struct node *node)
9a305230
LS
2210{
2211 struct hstate *h;
10fbcf4c 2212 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
2213 int err;
2214
2215 if (nhs->hugepages_kobj)
2216 return; /* already allocated */
2217
2218 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
10fbcf4c 2219 &node->dev.kobj);
9a305230
LS
2220 if (!nhs->hugepages_kobj)
2221 return;
2222
2223 for_each_hstate(h) {
2224 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2225 nhs->hstate_kobjs,
2226 &per_node_hstate_attr_group);
2227 if (err) {
ffb22af5
AM
2228 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2229 h->name, node->dev.id);
9a305230
LS
2230 hugetlb_unregister_node(node);
2231 break;
2232 }
2233 }
2234}
2235
2236/*
9b5e5d0f 2237 * hugetlb init time: register hstate attributes for all registered node
10fbcf4c
KS
2238 * devices of nodes that have memory. All on-line nodes should have
2239 * registered their associated device by this time.
9a305230 2240 */
7d9ca000 2241static void __init hugetlb_register_all_nodes(void)
9a305230
LS
2242{
2243 int nid;
2244
8cebfcd0 2245 for_each_node_state(nid, N_MEMORY) {
8732794b 2246 struct node *node = node_devices[nid];
10fbcf4c 2247 if (node->dev.id == nid)
9a305230
LS
2248 hugetlb_register_node(node);
2249 }
2250
2251 /*
10fbcf4c 2252 * Let the node device driver know we're here so it can
9a305230
LS
2253 * [un]register hstate attributes on node hotplug.
2254 */
2255 register_hugetlbfs_with_node(hugetlb_register_node,
2256 hugetlb_unregister_node);
2257}
2258#else /* !CONFIG_NUMA */
2259
2260static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2261{
2262 BUG();
2263 if (nidp)
2264 *nidp = -1;
2265 return NULL;
2266}
2267
2268static void hugetlb_unregister_all_nodes(void) { }
2269
2270static void hugetlb_register_all_nodes(void) { }
2271
2272#endif
2273
a3437870
NA
2274static void __exit hugetlb_exit(void)
2275{
2276 struct hstate *h;
2277
9a305230
LS
2278 hugetlb_unregister_all_nodes();
2279
a3437870 2280 for_each_hstate(h) {
972dc4de 2281 kobject_put(hstate_kobjs[hstate_index(h)]);
a3437870
NA
2282 }
2283
2284 kobject_put(hugepages_kobj);
8382d914 2285 kfree(htlb_fault_mutex_table);
a3437870
NA
2286}
2287module_exit(hugetlb_exit);
2288
2289static int __init hugetlb_init(void)
2290{
8382d914
DB
2291 int i;
2292
457c1b27 2293 if (!hugepages_supported())
0ef89d25 2294 return 0;
a3437870 2295
e11bfbfc
NP
2296 if (!size_to_hstate(default_hstate_size)) {
2297 default_hstate_size = HPAGE_SIZE;
2298 if (!size_to_hstate(default_hstate_size))
2299 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
a3437870 2300 }
972dc4de 2301 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
e11bfbfc
NP
2302 if (default_hstate_max_huge_pages)
2303 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
a3437870
NA
2304
2305 hugetlb_init_hstates();
aa888a74 2306 gather_bootmem_prealloc();
a3437870
NA
2307 report_hugepages();
2308
2309 hugetlb_sysfs_init();
9a305230 2310 hugetlb_register_all_nodes();
7179e7bf 2311 hugetlb_cgroup_file_init();
9a305230 2312
8382d914
DB
2313#ifdef CONFIG_SMP
2314 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2315#else
2316 num_fault_mutexes = 1;
2317#endif
2318 htlb_fault_mutex_table =
2319 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2320 BUG_ON(!htlb_fault_mutex_table);
2321
2322 for (i = 0; i < num_fault_mutexes; i++)
2323 mutex_init(&htlb_fault_mutex_table[i]);
a3437870
NA
2324 return 0;
2325}
2326module_init(hugetlb_init);
2327
2328/* Should be called on processing a hugepagesz=... option */
2329void __init hugetlb_add_hstate(unsigned order)
2330{
2331 struct hstate *h;
8faa8b07
AK
2332 unsigned long i;
2333
a3437870 2334 if (size_to_hstate(PAGE_SIZE << order)) {
ffb22af5 2335 pr_warning("hugepagesz= specified twice, ignoring\n");
a3437870
NA
2336 return;
2337 }
47d38344 2338 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
a3437870 2339 BUG_ON(order == 0);
47d38344 2340 h = &hstates[hugetlb_max_hstate++];
a3437870
NA
2341 h->order = order;
2342 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
8faa8b07
AK
2343 h->nr_huge_pages = 0;
2344 h->free_huge_pages = 0;
2345 for (i = 0; i < MAX_NUMNODES; ++i)
2346 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
0edaecfa 2347 INIT_LIST_HEAD(&h->hugepage_activelist);
8cebfcd0
LJ
2348 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2349 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
a3437870
NA
2350 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2351 huge_page_size(h)/1024);
8faa8b07 2352
a3437870
NA
2353 parsed_hstate = h;
2354}
2355
e11bfbfc 2356static int __init hugetlb_nrpages_setup(char *s)
a3437870
NA
2357{
2358 unsigned long *mhp;
8faa8b07 2359 static unsigned long *last_mhp;
a3437870
NA
2360
2361 /*
47d38344 2362 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
a3437870
NA
2363 * so this hugepages= parameter goes to the "default hstate".
2364 */
47d38344 2365 if (!hugetlb_max_hstate)
a3437870
NA
2366 mhp = &default_hstate_max_huge_pages;
2367 else
2368 mhp = &parsed_hstate->max_huge_pages;
2369
8faa8b07 2370 if (mhp == last_mhp) {
ffb22af5
AM
2371 pr_warning("hugepages= specified twice without "
2372 "interleaving hugepagesz=, ignoring\n");
8faa8b07
AK
2373 return 1;
2374 }
2375
a3437870
NA
2376 if (sscanf(s, "%lu", mhp) <= 0)
2377 *mhp = 0;
2378
8faa8b07
AK
2379 /*
2380 * Global state is always initialized later in hugetlb_init.
2381 * But we need to allocate >= MAX_ORDER hstates here early to still
2382 * use the bootmem allocator.
2383 */
47d38344 2384 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
8faa8b07
AK
2385 hugetlb_hstate_alloc_pages(parsed_hstate);
2386
2387 last_mhp = mhp;
2388
a3437870
NA
2389 return 1;
2390}
e11bfbfc
NP
2391__setup("hugepages=", hugetlb_nrpages_setup);
2392
2393static int __init hugetlb_default_setup(char *s)
2394{
2395 default_hstate_size = memparse(s, &s);
2396 return 1;
2397}
2398__setup("default_hugepagesz=", hugetlb_default_setup);
a3437870 2399
8a213460
NA
2400static unsigned int cpuset_mems_nr(unsigned int *array)
2401{
2402 int node;
2403 unsigned int nr = 0;
2404
2405 for_each_node_mask(node, cpuset_current_mems_allowed)
2406 nr += array[node];
2407
2408 return nr;
2409}
2410
2411#ifdef CONFIG_SYSCTL
06808b08
LS
2412static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2413 struct ctl_table *table, int write,
2414 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 2415{
e5ff2159 2416 struct hstate *h = &default_hstate;
238d3c13 2417 unsigned long tmp = h->max_huge_pages;
08d4a246 2418 int ret;
e5ff2159 2419
457c1b27
NA
2420 if (!hugepages_supported())
2421 return -ENOTSUPP;
2422
e5ff2159
AK
2423 table->data = &tmp;
2424 table->maxlen = sizeof(unsigned long);
08d4a246
MH
2425 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2426 if (ret)
2427 goto out;
e5ff2159 2428
238d3c13
DR
2429 if (write)
2430 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2431 NUMA_NO_NODE, tmp, *length);
08d4a246
MH
2432out:
2433 return ret;
1da177e4 2434}
396faf03 2435
06808b08
LS
2436int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2437 void __user *buffer, size_t *length, loff_t *ppos)
2438{
2439
2440 return hugetlb_sysctl_handler_common(false, table, write,
2441 buffer, length, ppos);
2442}
2443
2444#ifdef CONFIG_NUMA
2445int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2446 void __user *buffer, size_t *length, loff_t *ppos)
2447{
2448 return hugetlb_sysctl_handler_common(true, table, write,
2449 buffer, length, ppos);
2450}
2451#endif /* CONFIG_NUMA */
2452
a3d0c6aa 2453int hugetlb_overcommit_handler(struct ctl_table *table, int write,
8d65af78 2454 void __user *buffer,
a3d0c6aa
NA
2455 size_t *length, loff_t *ppos)
2456{
a5516438 2457 struct hstate *h = &default_hstate;
e5ff2159 2458 unsigned long tmp;
08d4a246 2459 int ret;
e5ff2159 2460
457c1b27
NA
2461 if (!hugepages_supported())
2462 return -ENOTSUPP;
2463
c033a93c 2464 tmp = h->nr_overcommit_huge_pages;
e5ff2159 2465
bae7f4ae 2466 if (write && hstate_is_gigantic(h))
adbe8726
EM
2467 return -EINVAL;
2468
e5ff2159
AK
2469 table->data = &tmp;
2470 table->maxlen = sizeof(unsigned long);
08d4a246
MH
2471 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2472 if (ret)
2473 goto out;
e5ff2159
AK
2474
2475 if (write) {
2476 spin_lock(&hugetlb_lock);
2477 h->nr_overcommit_huge_pages = tmp;
2478 spin_unlock(&hugetlb_lock);
2479 }
08d4a246
MH
2480out:
2481 return ret;
a3d0c6aa
NA
2482}
2483
1da177e4
LT
2484#endif /* CONFIG_SYSCTL */
2485
e1759c21 2486void hugetlb_report_meminfo(struct seq_file *m)
1da177e4 2487{
a5516438 2488 struct hstate *h = &default_hstate;
457c1b27
NA
2489 if (!hugepages_supported())
2490 return;
e1759c21 2491 seq_printf(m,
4f98a2fe
RR
2492 "HugePages_Total: %5lu\n"
2493 "HugePages_Free: %5lu\n"
2494 "HugePages_Rsvd: %5lu\n"
2495 "HugePages_Surp: %5lu\n"
2496 "Hugepagesize: %8lu kB\n",
a5516438
AK
2497 h->nr_huge_pages,
2498 h->free_huge_pages,
2499 h->resv_huge_pages,
2500 h->surplus_huge_pages,
2501 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1da177e4
LT
2502}
2503
2504int hugetlb_report_node_meminfo(int nid, char *buf)
2505{
a5516438 2506 struct hstate *h = &default_hstate;
457c1b27
NA
2507 if (!hugepages_supported())
2508 return 0;
1da177e4
LT
2509 return sprintf(buf,
2510 "Node %d HugePages_Total: %5u\n"
a1de0919
NA
2511 "Node %d HugePages_Free: %5u\n"
2512 "Node %d HugePages_Surp: %5u\n",
a5516438
AK
2513 nid, h->nr_huge_pages_node[nid],
2514 nid, h->free_huge_pages_node[nid],
2515 nid, h->surplus_huge_pages_node[nid]);
1da177e4
LT
2516}
2517
949f7ec5
DR
2518void hugetlb_show_meminfo(void)
2519{
2520 struct hstate *h;
2521 int nid;
2522
457c1b27
NA
2523 if (!hugepages_supported())
2524 return;
2525
949f7ec5
DR
2526 for_each_node_state(nid, N_MEMORY)
2527 for_each_hstate(h)
2528 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2529 nid,
2530 h->nr_huge_pages_node[nid],
2531 h->free_huge_pages_node[nid],
2532 h->surplus_huge_pages_node[nid],
2533 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2534}
2535
1da177e4
LT
2536/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2537unsigned long hugetlb_total_pages(void)
2538{
d0028588
WL
2539 struct hstate *h;
2540 unsigned long nr_total_pages = 0;
2541
2542 for_each_hstate(h)
2543 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2544 return nr_total_pages;
1da177e4 2545}
1da177e4 2546
a5516438 2547static int hugetlb_acct_memory(struct hstate *h, long delta)
fc1b8a73
MG
2548{
2549 int ret = -ENOMEM;
2550
2551 spin_lock(&hugetlb_lock);
2552 /*
2553 * When cpuset is configured, it breaks the strict hugetlb page
2554 * reservation as the accounting is done on a global variable. Such
2555 * reservation is completely rubbish in the presence of cpuset because
2556 * the reservation is not checked against page availability for the
2557 * current cpuset. Application can still potentially OOM'ed by kernel
2558 * with lack of free htlb page in cpuset that the task is in.
2559 * Attempt to enforce strict accounting with cpuset is almost
2560 * impossible (or too ugly) because cpuset is too fluid that
2561 * task or memory node can be dynamically moved between cpusets.
2562 *
2563 * The change of semantics for shared hugetlb mapping with cpuset is
2564 * undesirable. However, in order to preserve some of the semantics,
2565 * we fall back to check against current free page availability as
2566 * a best attempt and hopefully to minimize the impact of changing
2567 * semantics that cpuset has.
2568 */
2569 if (delta > 0) {
a5516438 2570 if (gather_surplus_pages(h, delta) < 0)
fc1b8a73
MG
2571 goto out;
2572
a5516438
AK
2573 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2574 return_unused_surplus_pages(h, delta);
fc1b8a73
MG
2575 goto out;
2576 }
2577 }
2578
2579 ret = 0;
2580 if (delta < 0)
a5516438 2581 return_unused_surplus_pages(h, (unsigned long) -delta);
fc1b8a73
MG
2582
2583out:
2584 spin_unlock(&hugetlb_lock);
2585 return ret;
2586}
2587
84afd99b
AW
2588static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2589{
f522c3ac 2590 struct resv_map *resv = vma_resv_map(vma);
84afd99b
AW
2591
2592 /*
2593 * This new VMA should share its siblings reservation map if present.
2594 * The VMA will only ever have a valid reservation map pointer where
2595 * it is being copied for another still existing VMA. As that VMA
25985edc 2596 * has a reference to the reservation map it cannot disappear until
84afd99b
AW
2597 * after this open call completes. It is therefore safe to take a
2598 * new reference here without additional locking.
2599 */
4e35f483 2600 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
f522c3ac 2601 kref_get(&resv->refs);
84afd99b
AW
2602}
2603
a1e78772
MG
2604static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2605{
a5516438 2606 struct hstate *h = hstate_vma(vma);
f522c3ac 2607 struct resv_map *resv = vma_resv_map(vma);
90481622 2608 struct hugepage_subpool *spool = subpool_vma(vma);
4e35f483 2609 unsigned long reserve, start, end;
1c5ecae3 2610 long gbl_reserve;
84afd99b 2611
4e35f483
JK
2612 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2613 return;
84afd99b 2614
4e35f483
JK
2615 start = vma_hugecache_offset(h, vma, vma->vm_start);
2616 end = vma_hugecache_offset(h, vma, vma->vm_end);
84afd99b 2617
4e35f483 2618 reserve = (end - start) - region_count(resv, start, end);
84afd99b 2619
4e35f483
JK
2620 kref_put(&resv->refs, resv_map_release);
2621
2622 if (reserve) {
1c5ecae3
MK
2623 /*
2624 * Decrement reserve counts. The global reserve count may be
2625 * adjusted if the subpool has a minimum size.
2626 */
2627 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2628 hugetlb_acct_memory(h, -gbl_reserve);
84afd99b 2629 }
a1e78772
MG
2630}
2631
1da177e4
LT
2632/*
2633 * We cannot handle pagefaults against hugetlb pages at all. They cause
2634 * handle_mm_fault() to try to instantiate regular-sized pages in the
2635 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
2636 * this far.
2637 */
d0217ac0 2638static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4
LT
2639{
2640 BUG();
d0217ac0 2641 return 0;
1da177e4
LT
2642}
2643
f0f37e2f 2644const struct vm_operations_struct hugetlb_vm_ops = {
d0217ac0 2645 .fault = hugetlb_vm_op_fault,
84afd99b 2646 .open = hugetlb_vm_op_open,
a1e78772 2647 .close = hugetlb_vm_op_close,
1da177e4
LT
2648};
2649
1e8f889b
DG
2650static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2651 int writable)
63551ae0
DG
2652{
2653 pte_t entry;
2654
1e8f889b 2655 if (writable) {
106c992a
GS
2656 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2657 vma->vm_page_prot)));
63551ae0 2658 } else {
106c992a
GS
2659 entry = huge_pte_wrprotect(mk_huge_pte(page,
2660 vma->vm_page_prot));
63551ae0
DG
2661 }
2662 entry = pte_mkyoung(entry);
2663 entry = pte_mkhuge(entry);
d9ed9faa 2664 entry = arch_make_huge_pte(entry, vma, page, writable);
63551ae0
DG
2665
2666 return entry;
2667}
2668
1e8f889b
DG
2669static void set_huge_ptep_writable(struct vm_area_struct *vma,
2670 unsigned long address, pte_t *ptep)
2671{
2672 pte_t entry;
2673
106c992a 2674 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
32f84528 2675 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4b3073e1 2676 update_mmu_cache(vma, address, ptep);
1e8f889b
DG
2677}
2678
4a705fef
NH
2679static int is_hugetlb_entry_migration(pte_t pte)
2680{
2681 swp_entry_t swp;
2682
2683 if (huge_pte_none(pte) || pte_present(pte))
2684 return 0;
2685 swp = pte_to_swp_entry(pte);
2686 if (non_swap_entry(swp) && is_migration_entry(swp))
2687 return 1;
2688 else
2689 return 0;
2690}
2691
2692static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2693{
2694 swp_entry_t swp;
2695
2696 if (huge_pte_none(pte) || pte_present(pte))
2697 return 0;
2698 swp = pte_to_swp_entry(pte);
2699 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2700 return 1;
2701 else
2702 return 0;
2703}
1e8f889b 2704
63551ae0
DG
2705int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2706 struct vm_area_struct *vma)
2707{
2708 pte_t *src_pte, *dst_pte, entry;
2709 struct page *ptepage;
1c59827d 2710 unsigned long addr;
1e8f889b 2711 int cow;
a5516438
AK
2712 struct hstate *h = hstate_vma(vma);
2713 unsigned long sz = huge_page_size(h);
e8569dd2
AS
2714 unsigned long mmun_start; /* For mmu_notifiers */
2715 unsigned long mmun_end; /* For mmu_notifiers */
2716 int ret = 0;
1e8f889b
DG
2717
2718 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
63551ae0 2719
e8569dd2
AS
2720 mmun_start = vma->vm_start;
2721 mmun_end = vma->vm_end;
2722 if (cow)
2723 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2724
a5516438 2725 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
cb900f41 2726 spinlock_t *src_ptl, *dst_ptl;
c74df32c
HD
2727 src_pte = huge_pte_offset(src, addr);
2728 if (!src_pte)
2729 continue;
a5516438 2730 dst_pte = huge_pte_alloc(dst, addr, sz);
e8569dd2
AS
2731 if (!dst_pte) {
2732 ret = -ENOMEM;
2733 break;
2734 }
c5c99429
LW
2735
2736 /* If the pagetables are shared don't copy or take references */
2737 if (dst_pte == src_pte)
2738 continue;
2739
cb900f41
KS
2740 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2741 src_ptl = huge_pte_lockptr(h, src, src_pte);
2742 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4a705fef
NH
2743 entry = huge_ptep_get(src_pte);
2744 if (huge_pte_none(entry)) { /* skip none entry */
2745 ;
2746 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2747 is_hugetlb_entry_hwpoisoned(entry))) {
2748 swp_entry_t swp_entry = pte_to_swp_entry(entry);
2749
2750 if (is_write_migration_entry(swp_entry) && cow) {
2751 /*
2752 * COW mappings require pages in both
2753 * parent and child to be set to read.
2754 */
2755 make_migration_entry_read(&swp_entry);
2756 entry = swp_entry_to_pte(swp_entry);
2757 set_huge_pte_at(src, addr, src_pte, entry);
2758 }
2759 set_huge_pte_at(dst, addr, dst_pte, entry);
2760 } else {
34ee645e 2761 if (cow) {
7f2e9525 2762 huge_ptep_set_wrprotect(src, addr, src_pte);
34ee645e
JR
2763 mmu_notifier_invalidate_range(src, mmun_start,
2764 mmun_end);
2765 }
0253d634 2766 entry = huge_ptep_get(src_pte);
1c59827d
HD
2767 ptepage = pte_page(entry);
2768 get_page(ptepage);
0fe6e20b 2769 page_dup_rmap(ptepage);
1c59827d
HD
2770 set_huge_pte_at(dst, addr, dst_pte, entry);
2771 }
cb900f41
KS
2772 spin_unlock(src_ptl);
2773 spin_unlock(dst_ptl);
63551ae0 2774 }
63551ae0 2775
e8569dd2
AS
2776 if (cow)
2777 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2778
2779 return ret;
63551ae0
DG
2780}
2781
24669e58
AK
2782void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2783 unsigned long start, unsigned long end,
2784 struct page *ref_page)
63551ae0 2785{
24669e58 2786 int force_flush = 0;
63551ae0
DG
2787 struct mm_struct *mm = vma->vm_mm;
2788 unsigned long address;
c7546f8f 2789 pte_t *ptep;
63551ae0 2790 pte_t pte;
cb900f41 2791 spinlock_t *ptl;
63551ae0 2792 struct page *page;
a5516438
AK
2793 struct hstate *h = hstate_vma(vma);
2794 unsigned long sz = huge_page_size(h);
2ec74c3e
SG
2795 const unsigned long mmun_start = start; /* For mmu_notifiers */
2796 const unsigned long mmun_end = end; /* For mmu_notifiers */
a5516438 2797
63551ae0 2798 WARN_ON(!is_vm_hugetlb_page(vma));
a5516438
AK
2799 BUG_ON(start & ~huge_page_mask(h));
2800 BUG_ON(end & ~huge_page_mask(h));
63551ae0 2801
24669e58 2802 tlb_start_vma(tlb, vma);
2ec74c3e 2803 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
569f48b8 2804 address = start;
24669e58 2805again:
569f48b8 2806 for (; address < end; address += sz) {
c7546f8f 2807 ptep = huge_pte_offset(mm, address);
4c887265 2808 if (!ptep)
c7546f8f
DG
2809 continue;
2810
cb900f41 2811 ptl = huge_pte_lock(h, mm, ptep);
39dde65c 2812 if (huge_pmd_unshare(mm, &address, ptep))
cb900f41 2813 goto unlock;
39dde65c 2814
6629326b
HD
2815 pte = huge_ptep_get(ptep);
2816 if (huge_pte_none(pte))
cb900f41 2817 goto unlock;
6629326b
HD
2818
2819 /*
9fbc1f63
NH
2820 * Migrating hugepage or HWPoisoned hugepage is already
2821 * unmapped and its refcount is dropped, so just clear pte here.
6629326b 2822 */
9fbc1f63 2823 if (unlikely(!pte_present(pte))) {
106c992a 2824 huge_pte_clear(mm, address, ptep);
cb900f41 2825 goto unlock;
8c4894c6 2826 }
6629326b
HD
2827
2828 page = pte_page(pte);
04f2cbe3
MG
2829 /*
2830 * If a reference page is supplied, it is because a specific
2831 * page is being unmapped, not a range. Ensure the page we
2832 * are about to unmap is the actual page of interest.
2833 */
2834 if (ref_page) {
04f2cbe3 2835 if (page != ref_page)
cb900f41 2836 goto unlock;
04f2cbe3
MG
2837
2838 /*
2839 * Mark the VMA as having unmapped its page so that
2840 * future faults in this VMA will fail rather than
2841 * looking like data was lost
2842 */
2843 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2844 }
2845
c7546f8f 2846 pte = huge_ptep_get_and_clear(mm, address, ptep);
24669e58 2847 tlb_remove_tlb_entry(tlb, ptep, address);
106c992a 2848 if (huge_pte_dirty(pte))
6649a386 2849 set_page_dirty(page);
9e81130b 2850
24669e58
AK
2851 page_remove_rmap(page);
2852 force_flush = !__tlb_remove_page(tlb, page);
cb900f41 2853 if (force_flush) {
569f48b8 2854 address += sz;
cb900f41 2855 spin_unlock(ptl);
24669e58 2856 break;
cb900f41 2857 }
9e81130b 2858 /* Bail out after unmapping reference page if supplied */
cb900f41
KS
2859 if (ref_page) {
2860 spin_unlock(ptl);
9e81130b 2861 break;
cb900f41
KS
2862 }
2863unlock:
2864 spin_unlock(ptl);
63551ae0 2865 }
24669e58
AK
2866 /*
2867 * mmu_gather ran out of room to batch pages, we break out of
2868 * the PTE lock to avoid doing the potential expensive TLB invalidate
2869 * and page-free while holding it.
2870 */
2871 if (force_flush) {
2872 force_flush = 0;
2873 tlb_flush_mmu(tlb);
2874 if (address < end && !ref_page)
2875 goto again;
fe1668ae 2876 }
2ec74c3e 2877 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
24669e58 2878 tlb_end_vma(tlb, vma);
1da177e4 2879}
63551ae0 2880
d833352a
MG
2881void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2882 struct vm_area_struct *vma, unsigned long start,
2883 unsigned long end, struct page *ref_page)
2884{
2885 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2886
2887 /*
2888 * Clear this flag so that x86's huge_pmd_share page_table_shareable
2889 * test will fail on a vma being torn down, and not grab a page table
2890 * on its way out. We're lucky that the flag has such an appropriate
2891 * name, and can in fact be safely cleared here. We could clear it
2892 * before the __unmap_hugepage_range above, but all that's necessary
c8c06efa 2893 * is to clear it before releasing the i_mmap_rwsem. This works
d833352a 2894 * because in the context this is called, the VMA is about to be
c8c06efa 2895 * destroyed and the i_mmap_rwsem is held.
d833352a
MG
2896 */
2897 vma->vm_flags &= ~VM_MAYSHARE;
2898}
2899
502717f4 2900void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
04f2cbe3 2901 unsigned long end, struct page *ref_page)
502717f4 2902{
24669e58
AK
2903 struct mm_struct *mm;
2904 struct mmu_gather tlb;
2905
2906 mm = vma->vm_mm;
2907
2b047252 2908 tlb_gather_mmu(&tlb, mm, start, end);
24669e58
AK
2909 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2910 tlb_finish_mmu(&tlb, start, end);
502717f4
CK
2911}
2912
04f2cbe3
MG
2913/*
2914 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2915 * mappping it owns the reserve page for. The intention is to unmap the page
2916 * from other VMAs and let the children be SIGKILLed if they are faulting the
2917 * same region.
2918 */
2f4612af
DB
2919static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2920 struct page *page, unsigned long address)
04f2cbe3 2921{
7526674d 2922 struct hstate *h = hstate_vma(vma);
04f2cbe3
MG
2923 struct vm_area_struct *iter_vma;
2924 struct address_space *mapping;
04f2cbe3
MG
2925 pgoff_t pgoff;
2926
2927 /*
2928 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2929 * from page cache lookup which is in HPAGE_SIZE units.
2930 */
7526674d 2931 address = address & huge_page_mask(h);
36e4f20a
MH
2932 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2933 vma->vm_pgoff;
496ad9aa 2934 mapping = file_inode(vma->vm_file)->i_mapping;
04f2cbe3 2935
4eb2b1dc
MG
2936 /*
2937 * Take the mapping lock for the duration of the table walk. As
2938 * this mapping should be shared between all the VMAs,
2939 * __unmap_hugepage_range() is called as the lock is already held
2940 */
83cde9e8 2941 i_mmap_lock_write(mapping);
6b2dbba8 2942 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
04f2cbe3
MG
2943 /* Do not unmap the current VMA */
2944 if (iter_vma == vma)
2945 continue;
2946
2947 /*
2948 * Unmap the page from other VMAs without their own reserves.
2949 * They get marked to be SIGKILLed if they fault in these
2950 * areas. This is because a future no-page fault on this VMA
2951 * could insert a zeroed page instead of the data existing
2952 * from the time of fork. This would look like data corruption
2953 */
2954 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
24669e58
AK
2955 unmap_hugepage_range(iter_vma, address,
2956 address + huge_page_size(h), page);
04f2cbe3 2957 }
83cde9e8 2958 i_mmap_unlock_write(mapping);
04f2cbe3
MG
2959}
2960
0fe6e20b
NH
2961/*
2962 * Hugetlb_cow() should be called with page lock of the original hugepage held.
ef009b25
MH
2963 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2964 * cannot race with other handlers or page migration.
2965 * Keep the pte_same checks anyway to make transition from the mutex easier.
0fe6e20b 2966 */
1e8f889b 2967static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
04f2cbe3 2968 unsigned long address, pte_t *ptep, pte_t pte,
cb900f41 2969 struct page *pagecache_page, spinlock_t *ptl)
1e8f889b 2970{
a5516438 2971 struct hstate *h = hstate_vma(vma);
1e8f889b 2972 struct page *old_page, *new_page;
ad4404a2 2973 int ret = 0, outside_reserve = 0;
2ec74c3e
SG
2974 unsigned long mmun_start; /* For mmu_notifiers */
2975 unsigned long mmun_end; /* For mmu_notifiers */
1e8f889b
DG
2976
2977 old_page = pte_page(pte);
2978
04f2cbe3 2979retry_avoidcopy:
1e8f889b
DG
2980 /* If no-one else is actually using this page, avoid the copy
2981 * and just make the page writable */
37a2140d
JK
2982 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2983 page_move_anon_rmap(old_page, vma, address);
1e8f889b 2984 set_huge_ptep_writable(vma, address, ptep);
83c54070 2985 return 0;
1e8f889b
DG
2986 }
2987
04f2cbe3
MG
2988 /*
2989 * If the process that created a MAP_PRIVATE mapping is about to
2990 * perform a COW due to a shared page count, attempt to satisfy
2991 * the allocation without using the existing reserves. The pagecache
2992 * page is used to determine if the reserve at this address was
2993 * consumed or not. If reserves were used, a partial faulted mapping
2994 * at the time of fork() could consume its reserves on COW instead
2995 * of the full address range.
2996 */
5944d011 2997 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
04f2cbe3
MG
2998 old_page != pagecache_page)
2999 outside_reserve = 1;
3000
1e8f889b 3001 page_cache_get(old_page);
b76c8cfb 3002
ad4404a2
DB
3003 /*
3004 * Drop page table lock as buddy allocator may be called. It will
3005 * be acquired again before returning to the caller, as expected.
3006 */
cb900f41 3007 spin_unlock(ptl);
04f2cbe3 3008 new_page = alloc_huge_page(vma, address, outside_reserve);
1e8f889b 3009
2fc39cec 3010 if (IS_ERR(new_page)) {
04f2cbe3
MG
3011 /*
3012 * If a process owning a MAP_PRIVATE mapping fails to COW,
3013 * it is due to references held by a child and an insufficient
3014 * huge page pool. To guarantee the original mappers
3015 * reliability, unmap the page from child processes. The child
3016 * may get SIGKILLed if it later faults.
3017 */
3018 if (outside_reserve) {
ad4404a2 3019 page_cache_release(old_page);
04f2cbe3 3020 BUG_ON(huge_pte_none(pte));
2f4612af
DB
3021 unmap_ref_private(mm, vma, old_page, address);
3022 BUG_ON(huge_pte_none(pte));
3023 spin_lock(ptl);
3024 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3025 if (likely(ptep &&
3026 pte_same(huge_ptep_get(ptep), pte)))
3027 goto retry_avoidcopy;
3028 /*
3029 * race occurs while re-acquiring page table
3030 * lock, and our job is done.
3031 */
3032 return 0;
04f2cbe3
MG
3033 }
3034
ad4404a2
DB
3035 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3036 VM_FAULT_OOM : VM_FAULT_SIGBUS;
3037 goto out_release_old;
1e8f889b
DG
3038 }
3039
0fe6e20b
NH
3040 /*
3041 * When the original hugepage is shared one, it does not have
3042 * anon_vma prepared.
3043 */
44e2aa93 3044 if (unlikely(anon_vma_prepare(vma))) {
ad4404a2
DB
3045 ret = VM_FAULT_OOM;
3046 goto out_release_all;
44e2aa93 3047 }
0fe6e20b 3048
47ad8475
AA
3049 copy_user_huge_page(new_page, old_page, address, vma,
3050 pages_per_huge_page(h));
0ed361de 3051 __SetPageUptodate(new_page);
bcc54222 3052 set_page_huge_active(new_page);
1e8f889b 3053
2ec74c3e
SG
3054 mmun_start = address & huge_page_mask(h);
3055 mmun_end = mmun_start + huge_page_size(h);
3056 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
ad4404a2 3057
b76c8cfb 3058 /*
cb900f41 3059 * Retake the page table lock to check for racing updates
b76c8cfb
LW
3060 * before the page tables are altered
3061 */
cb900f41 3062 spin_lock(ptl);
a5516438 3063 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
a9af0c5d 3064 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
07443a85
JK
3065 ClearPagePrivate(new_page);
3066
1e8f889b 3067 /* Break COW */
8fe627ec 3068 huge_ptep_clear_flush(vma, address, ptep);
34ee645e 3069 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
1e8f889b
DG
3070 set_huge_pte_at(mm, address, ptep,
3071 make_huge_pte(vma, new_page, 1));
0fe6e20b 3072 page_remove_rmap(old_page);
cd67f0d2 3073 hugepage_add_new_anon_rmap(new_page, vma, address);
1e8f889b
DG
3074 /* Make the old page be freed below */
3075 new_page = old_page;
3076 }
cb900f41 3077 spin_unlock(ptl);
2ec74c3e 3078 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
ad4404a2 3079out_release_all:
1e8f889b 3080 page_cache_release(new_page);
ad4404a2 3081out_release_old:
1e8f889b 3082 page_cache_release(old_page);
8312034f 3083
ad4404a2
DB
3084 spin_lock(ptl); /* Caller expects lock to be held */
3085 return ret;
1e8f889b
DG
3086}
3087
04f2cbe3 3088/* Return the pagecache page at a given address within a VMA */
a5516438
AK
3089static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3090 struct vm_area_struct *vma, unsigned long address)
04f2cbe3
MG
3091{
3092 struct address_space *mapping;
e7c4b0bf 3093 pgoff_t idx;
04f2cbe3
MG
3094
3095 mapping = vma->vm_file->f_mapping;
a5516438 3096 idx = vma_hugecache_offset(h, vma, address);
04f2cbe3
MG
3097
3098 return find_lock_page(mapping, idx);
3099}
3100
3ae77f43
HD
3101/*
3102 * Return whether there is a pagecache page to back given address within VMA.
3103 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3104 */
3105static bool hugetlbfs_pagecache_present(struct hstate *h,
2a15efc9
HD
3106 struct vm_area_struct *vma, unsigned long address)
3107{
3108 struct address_space *mapping;
3109 pgoff_t idx;
3110 struct page *page;
3111
3112 mapping = vma->vm_file->f_mapping;
3113 idx = vma_hugecache_offset(h, vma, address);
3114
3115 page = find_get_page(mapping, idx);
3116 if (page)
3117 put_page(page);
3118 return page != NULL;
3119}
3120
a1ed3dda 3121static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
8382d914
DB
3122 struct address_space *mapping, pgoff_t idx,
3123 unsigned long address, pte_t *ptep, unsigned int flags)
ac9b9c66 3124{
a5516438 3125 struct hstate *h = hstate_vma(vma);
ac9b9c66 3126 int ret = VM_FAULT_SIGBUS;
409eb8c2 3127 int anon_rmap = 0;
4c887265 3128 unsigned long size;
4c887265 3129 struct page *page;
1e8f889b 3130 pte_t new_pte;
cb900f41 3131 spinlock_t *ptl;
4c887265 3132
04f2cbe3
MG
3133 /*
3134 * Currently, we are forced to kill the process in the event the
3135 * original mapper has unmapped pages from the child due to a failed
25985edc 3136 * COW. Warn that such a situation has occurred as it may not be obvious
04f2cbe3
MG
3137 */
3138 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
ffb22af5
AM
3139 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3140 current->pid);
04f2cbe3
MG
3141 return ret;
3142 }
3143
4c887265
AL
3144 /*
3145 * Use page lock to guard against racing truncation
3146 * before we get page_table_lock.
3147 */
6bda666a
CL
3148retry:
3149 page = find_lock_page(mapping, idx);
3150 if (!page) {
a5516438 3151 size = i_size_read(mapping->host) >> huge_page_shift(h);
ebed4bfc
HD
3152 if (idx >= size)
3153 goto out;
04f2cbe3 3154 page = alloc_huge_page(vma, address, 0);
2fc39cec 3155 if (IS_ERR(page)) {
76dcee75
AK
3156 ret = PTR_ERR(page);
3157 if (ret == -ENOMEM)
3158 ret = VM_FAULT_OOM;
3159 else
3160 ret = VM_FAULT_SIGBUS;
6bda666a
CL
3161 goto out;
3162 }
47ad8475 3163 clear_huge_page(page, address, pages_per_huge_page(h));
0ed361de 3164 __SetPageUptodate(page);
bcc54222 3165 set_page_huge_active(page);
ac9b9c66 3166
f83a275d 3167 if (vma->vm_flags & VM_MAYSHARE) {
6bda666a 3168 int err;
45c682a6 3169 struct inode *inode = mapping->host;
6bda666a
CL
3170
3171 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3172 if (err) {
3173 put_page(page);
6bda666a
CL
3174 if (err == -EEXIST)
3175 goto retry;
3176 goto out;
3177 }
07443a85 3178 ClearPagePrivate(page);
45c682a6
KC
3179
3180 spin_lock(&inode->i_lock);
a5516438 3181 inode->i_blocks += blocks_per_huge_page(h);
45c682a6 3182 spin_unlock(&inode->i_lock);
23be7468 3183 } else {
6bda666a 3184 lock_page(page);
0fe6e20b
NH
3185 if (unlikely(anon_vma_prepare(vma))) {
3186 ret = VM_FAULT_OOM;
3187 goto backout_unlocked;
3188 }
409eb8c2 3189 anon_rmap = 1;
23be7468 3190 }
0fe6e20b 3191 } else {
998b4382
NH
3192 /*
3193 * If memory error occurs between mmap() and fault, some process
3194 * don't have hwpoisoned swap entry for errored virtual address.
3195 * So we need to block hugepage fault by PG_hwpoison bit check.
3196 */
3197 if (unlikely(PageHWPoison(page))) {
32f84528 3198 ret = VM_FAULT_HWPOISON |
972dc4de 3199 VM_FAULT_SET_HINDEX(hstate_index(h));
998b4382
NH
3200 goto backout_unlocked;
3201 }
6bda666a 3202 }
1e8f889b 3203
57303d80
AW
3204 /*
3205 * If we are going to COW a private mapping later, we examine the
3206 * pending reservations for this page now. This will ensure that
3207 * any allocations necessary to record that reservation occur outside
3208 * the spinlock.
3209 */
788c7df4 3210 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2b26736c
AW
3211 if (vma_needs_reservation(h, vma, address) < 0) {
3212 ret = VM_FAULT_OOM;
3213 goto backout_unlocked;
3214 }
57303d80 3215
cb900f41
KS
3216 ptl = huge_pte_lockptr(h, mm, ptep);
3217 spin_lock(ptl);
a5516438 3218 size = i_size_read(mapping->host) >> huge_page_shift(h);
4c887265
AL
3219 if (idx >= size)
3220 goto backout;
3221
83c54070 3222 ret = 0;
7f2e9525 3223 if (!huge_pte_none(huge_ptep_get(ptep)))
4c887265
AL
3224 goto backout;
3225
07443a85
JK
3226 if (anon_rmap) {
3227 ClearPagePrivate(page);
409eb8c2 3228 hugepage_add_new_anon_rmap(page, vma, address);
ac714904 3229 } else
409eb8c2 3230 page_dup_rmap(page);
1e8f889b
DG
3231 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3232 && (vma->vm_flags & VM_SHARED)));
3233 set_huge_pte_at(mm, address, ptep, new_pte);
3234
788c7df4 3235 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
1e8f889b 3236 /* Optimization, do the COW without a second fault */
cb900f41 3237 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
1e8f889b
DG
3238 }
3239
cb900f41 3240 spin_unlock(ptl);
4c887265
AL
3241 unlock_page(page);
3242out:
ac9b9c66 3243 return ret;
4c887265
AL
3244
3245backout:
cb900f41 3246 spin_unlock(ptl);
2b26736c 3247backout_unlocked:
4c887265
AL
3248 unlock_page(page);
3249 put_page(page);
3250 goto out;
ac9b9c66
HD
3251}
3252
8382d914
DB
3253#ifdef CONFIG_SMP
3254static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3255 struct vm_area_struct *vma,
3256 struct address_space *mapping,
3257 pgoff_t idx, unsigned long address)
3258{
3259 unsigned long key[2];
3260 u32 hash;
3261
3262 if (vma->vm_flags & VM_SHARED) {
3263 key[0] = (unsigned long) mapping;
3264 key[1] = idx;
3265 } else {
3266 key[0] = (unsigned long) mm;
3267 key[1] = address >> huge_page_shift(h);
3268 }
3269
3270 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3271
3272 return hash & (num_fault_mutexes - 1);
3273}
3274#else
3275/*
3276 * For uniprocesor systems we always use a single mutex, so just
3277 * return 0 and avoid the hashing overhead.
3278 */
3279static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3280 struct vm_area_struct *vma,
3281 struct address_space *mapping,
3282 pgoff_t idx, unsigned long address)
3283{
3284 return 0;
3285}
3286#endif
3287
86e5216f 3288int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 3289 unsigned long address, unsigned int flags)
86e5216f 3290{
8382d914 3291 pte_t *ptep, entry;
cb900f41 3292 spinlock_t *ptl;
1e8f889b 3293 int ret;
8382d914
DB
3294 u32 hash;
3295 pgoff_t idx;
0fe6e20b 3296 struct page *page = NULL;
57303d80 3297 struct page *pagecache_page = NULL;
a5516438 3298 struct hstate *h = hstate_vma(vma);
8382d914 3299 struct address_space *mapping;
0f792cf9 3300 int need_wait_lock = 0;
86e5216f 3301
1e16a539
KH
3302 address &= huge_page_mask(h);
3303
fd6a03ed
NH
3304 ptep = huge_pte_offset(mm, address);
3305 if (ptep) {
3306 entry = huge_ptep_get(ptep);
290408d4 3307 if (unlikely(is_hugetlb_entry_migration(entry))) {
cb900f41 3308 migration_entry_wait_huge(vma, mm, ptep);
290408d4
NH
3309 return 0;
3310 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
32f84528 3311 return VM_FAULT_HWPOISON_LARGE |
972dc4de 3312 VM_FAULT_SET_HINDEX(hstate_index(h));
fd6a03ed
NH
3313 }
3314
a5516438 3315 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
86e5216f
AL
3316 if (!ptep)
3317 return VM_FAULT_OOM;
3318
8382d914
DB
3319 mapping = vma->vm_file->f_mapping;
3320 idx = vma_hugecache_offset(h, vma, address);
3321
3935baa9
DG
3322 /*
3323 * Serialize hugepage allocation and instantiation, so that we don't
3324 * get spurious allocation failures if two CPUs race to instantiate
3325 * the same page in the page cache.
3326 */
8382d914
DB
3327 hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3328 mutex_lock(&htlb_fault_mutex_table[hash]);
3329
7f2e9525
GS
3330 entry = huge_ptep_get(ptep);
3331 if (huge_pte_none(entry)) {
8382d914 3332 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
b4d1d99f 3333 goto out_mutex;
3935baa9 3334 }
86e5216f 3335
83c54070 3336 ret = 0;
1e8f889b 3337
0f792cf9
NH
3338 /*
3339 * entry could be a migration/hwpoison entry at this point, so this
3340 * check prevents the kernel from going below assuming that we have
3341 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3342 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3343 * handle it.
3344 */
3345 if (!pte_present(entry))
3346 goto out_mutex;
3347
57303d80
AW
3348 /*
3349 * If we are going to COW the mapping later, we examine the pending
3350 * reservations for this page now. This will ensure that any
3351 * allocations necessary to record that reservation occur outside the
3352 * spinlock. For private mappings, we also lookup the pagecache
3353 * page now as it is used to determine if a reservation has been
3354 * consumed.
3355 */
106c992a 3356 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
2b26736c
AW
3357 if (vma_needs_reservation(h, vma, address) < 0) {
3358 ret = VM_FAULT_OOM;
b4d1d99f 3359 goto out_mutex;
2b26736c 3360 }
57303d80 3361
f83a275d 3362 if (!(vma->vm_flags & VM_MAYSHARE))
57303d80
AW
3363 pagecache_page = hugetlbfs_pagecache_page(h,
3364 vma, address);
3365 }
3366
0f792cf9
NH
3367 ptl = huge_pte_lock(h, mm, ptep);
3368
3369 /* Check for a racing update before calling hugetlb_cow */
3370 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3371 goto out_ptl;
3372
56c9cfb1
NH
3373 /*
3374 * hugetlb_cow() requires page locks of pte_page(entry) and
3375 * pagecache_page, so here we need take the former one
3376 * when page != pagecache_page or !pagecache_page.
56c9cfb1
NH
3377 */
3378 page = pte_page(entry);
3379 if (page != pagecache_page)
0f792cf9
NH
3380 if (!trylock_page(page)) {
3381 need_wait_lock = 1;
3382 goto out_ptl;
3383 }
b4d1d99f 3384
0f792cf9 3385 get_page(page);
b4d1d99f 3386
788c7df4 3387 if (flags & FAULT_FLAG_WRITE) {
106c992a 3388 if (!huge_pte_write(entry)) {
57303d80 3389 ret = hugetlb_cow(mm, vma, address, ptep, entry,
cb900f41 3390 pagecache_page, ptl);
0f792cf9 3391 goto out_put_page;
b4d1d99f 3392 }
106c992a 3393 entry = huge_pte_mkdirty(entry);
b4d1d99f
DG
3394 }
3395 entry = pte_mkyoung(entry);
788c7df4
HD
3396 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3397 flags & FAULT_FLAG_WRITE))
4b3073e1 3398 update_mmu_cache(vma, address, ptep);
0f792cf9
NH
3399out_put_page:
3400 if (page != pagecache_page)
3401 unlock_page(page);
3402 put_page(page);
cb900f41
KS
3403out_ptl:
3404 spin_unlock(ptl);
57303d80
AW
3405
3406 if (pagecache_page) {
3407 unlock_page(pagecache_page);
3408 put_page(pagecache_page);
3409 }
b4d1d99f 3410out_mutex:
8382d914 3411 mutex_unlock(&htlb_fault_mutex_table[hash]);
0f792cf9
NH
3412 /*
3413 * Generally it's safe to hold refcount during waiting page lock. But
3414 * here we just wait to defer the next page fault to avoid busy loop and
3415 * the page is not used after unlocked before returning from the current
3416 * page fault. So we are safe from accessing freed page, even if we wait
3417 * here without taking refcount.
3418 */
3419 if (need_wait_lock)
3420 wait_on_page_locked(page);
1e8f889b 3421 return ret;
86e5216f
AL
3422}
3423
28a35716
ML
3424long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3425 struct page **pages, struct vm_area_struct **vmas,
3426 unsigned long *position, unsigned long *nr_pages,
3427 long i, unsigned int flags)
63551ae0 3428{
d5d4b0aa
CK
3429 unsigned long pfn_offset;
3430 unsigned long vaddr = *position;
28a35716 3431 unsigned long remainder = *nr_pages;
a5516438 3432 struct hstate *h = hstate_vma(vma);
63551ae0 3433
63551ae0 3434 while (vaddr < vma->vm_end && remainder) {
4c887265 3435 pte_t *pte;
cb900f41 3436 spinlock_t *ptl = NULL;
2a15efc9 3437 int absent;
4c887265 3438 struct page *page;
63551ae0 3439
02057967
DR
3440 /*
3441 * If we have a pending SIGKILL, don't keep faulting pages and
3442 * potentially allocating memory.
3443 */
3444 if (unlikely(fatal_signal_pending(current))) {
3445 remainder = 0;
3446 break;
3447 }
3448
4c887265
AL
3449 /*
3450 * Some archs (sparc64, sh*) have multiple pte_ts to
2a15efc9 3451 * each hugepage. We have to make sure we get the
4c887265 3452 * first, for the page indexing below to work.
cb900f41
KS
3453 *
3454 * Note that page table lock is not held when pte is null.
4c887265 3455 */
a5516438 3456 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
cb900f41
KS
3457 if (pte)
3458 ptl = huge_pte_lock(h, mm, pte);
2a15efc9
HD
3459 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3460
3461 /*
3462 * When coredumping, it suits get_dump_page if we just return
3ae77f43
HD
3463 * an error where there's an empty slot with no huge pagecache
3464 * to back it. This way, we avoid allocating a hugepage, and
3465 * the sparse dumpfile avoids allocating disk blocks, but its
3466 * huge holes still show up with zeroes where they need to be.
2a15efc9 3467 */
3ae77f43
HD
3468 if (absent && (flags & FOLL_DUMP) &&
3469 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
cb900f41
KS
3470 if (pte)
3471 spin_unlock(ptl);
2a15efc9
HD
3472 remainder = 0;
3473 break;
3474 }
63551ae0 3475
9cc3a5bd
NH
3476 /*
3477 * We need call hugetlb_fault for both hugepages under migration
3478 * (in which case hugetlb_fault waits for the migration,) and
3479 * hwpoisoned hugepages (in which case we need to prevent the
3480 * caller from accessing to them.) In order to do this, we use
3481 * here is_swap_pte instead of is_hugetlb_entry_migration and
3482 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3483 * both cases, and because we can't follow correct pages
3484 * directly from any kind of swap entries.
3485 */
3486 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
106c992a
GS
3487 ((flags & FOLL_WRITE) &&
3488 !huge_pte_write(huge_ptep_get(pte)))) {
4c887265 3489 int ret;
63551ae0 3490
cb900f41
KS
3491 if (pte)
3492 spin_unlock(ptl);
2a15efc9
HD
3493 ret = hugetlb_fault(mm, vma, vaddr,
3494 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
a89182c7 3495 if (!(ret & VM_FAULT_ERROR))
4c887265 3496 continue;
63551ae0 3497
4c887265 3498 remainder = 0;
4c887265
AL
3499 break;
3500 }
3501
a5516438 3502 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
7f2e9525 3503 page = pte_page(huge_ptep_get(pte));
d5d4b0aa 3504same_page:
d6692183 3505 if (pages) {
2a15efc9 3506 pages[i] = mem_map_offset(page, pfn_offset);
a0368d4e 3507 get_page_foll(pages[i]);
d6692183 3508 }
63551ae0
DG
3509
3510 if (vmas)
3511 vmas[i] = vma;
3512
3513 vaddr += PAGE_SIZE;
d5d4b0aa 3514 ++pfn_offset;
63551ae0
DG
3515 --remainder;
3516 ++i;
d5d4b0aa 3517 if (vaddr < vma->vm_end && remainder &&
a5516438 3518 pfn_offset < pages_per_huge_page(h)) {
d5d4b0aa
CK
3519 /*
3520 * We use pfn_offset to avoid touching the pageframes
3521 * of this compound page.
3522 */
3523 goto same_page;
3524 }
cb900f41 3525 spin_unlock(ptl);
63551ae0 3526 }
28a35716 3527 *nr_pages = remainder;
63551ae0
DG
3528 *position = vaddr;
3529
2a15efc9 3530 return i ? i : -EFAULT;
63551ae0 3531}
8f860591 3532
7da4d641 3533unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
8f860591
ZY
3534 unsigned long address, unsigned long end, pgprot_t newprot)
3535{
3536 struct mm_struct *mm = vma->vm_mm;
3537 unsigned long start = address;
3538 pte_t *ptep;
3539 pte_t pte;
a5516438 3540 struct hstate *h = hstate_vma(vma);
7da4d641 3541 unsigned long pages = 0;
8f860591
ZY
3542
3543 BUG_ON(address >= end);
3544 flush_cache_range(vma, address, end);
3545
a5338093 3546 mmu_notifier_invalidate_range_start(mm, start, end);
83cde9e8 3547 i_mmap_lock_write(vma->vm_file->f_mapping);
a5516438 3548 for (; address < end; address += huge_page_size(h)) {
cb900f41 3549 spinlock_t *ptl;
8f860591
ZY
3550 ptep = huge_pte_offset(mm, address);
3551 if (!ptep)
3552 continue;
cb900f41 3553 ptl = huge_pte_lock(h, mm, ptep);
7da4d641
PZ
3554 if (huge_pmd_unshare(mm, &address, ptep)) {
3555 pages++;
cb900f41 3556 spin_unlock(ptl);
39dde65c 3557 continue;
7da4d641 3558 }
a8bda28d
NH
3559 pte = huge_ptep_get(ptep);
3560 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3561 spin_unlock(ptl);
3562 continue;
3563 }
3564 if (unlikely(is_hugetlb_entry_migration(pte))) {
3565 swp_entry_t entry = pte_to_swp_entry(pte);
3566
3567 if (is_write_migration_entry(entry)) {
3568 pte_t newpte;
3569
3570 make_migration_entry_read(&entry);
3571 newpte = swp_entry_to_pte(entry);
3572 set_huge_pte_at(mm, address, ptep, newpte);
3573 pages++;
3574 }
3575 spin_unlock(ptl);
3576 continue;
3577 }
3578 if (!huge_pte_none(pte)) {
8f860591 3579 pte = huge_ptep_get_and_clear(mm, address, ptep);
106c992a 3580 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
be7517d6 3581 pte = arch_make_huge_pte(pte, vma, NULL, 0);
8f860591 3582 set_huge_pte_at(mm, address, ptep, pte);
7da4d641 3583 pages++;
8f860591 3584 }
cb900f41 3585 spin_unlock(ptl);
8f860591 3586 }
d833352a 3587 /*
c8c06efa 3588 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
d833352a 3589 * may have cleared our pud entry and done put_page on the page table:
c8c06efa 3590 * once we release i_mmap_rwsem, another task can do the final put_page
d833352a
MG
3591 * and that page table be reused and filled with junk.
3592 */
8f860591 3593 flush_tlb_range(vma, start, end);
34ee645e 3594 mmu_notifier_invalidate_range(mm, start, end);
83cde9e8 3595 i_mmap_unlock_write(vma->vm_file->f_mapping);
a5338093 3596 mmu_notifier_invalidate_range_end(mm, start, end);
7da4d641
PZ
3597
3598 return pages << h->order;
8f860591
ZY
3599}
3600
a1e78772
MG
3601int hugetlb_reserve_pages(struct inode *inode,
3602 long from, long to,
5a6fe125 3603 struct vm_area_struct *vma,
ca16d140 3604 vm_flags_t vm_flags)
e4e574b7 3605{
17c9d12e 3606 long ret, chg;
a5516438 3607 struct hstate *h = hstate_inode(inode);
90481622 3608 struct hugepage_subpool *spool = subpool_inode(inode);
9119a41e 3609 struct resv_map *resv_map;
1c5ecae3 3610 long gbl_reserve;
e4e574b7 3611
17c9d12e
MG
3612 /*
3613 * Only apply hugepage reservation if asked. At fault time, an
3614 * attempt will be made for VM_NORESERVE to allocate a page
90481622 3615 * without using reserves
17c9d12e 3616 */
ca16d140 3617 if (vm_flags & VM_NORESERVE)
17c9d12e
MG
3618 return 0;
3619
a1e78772
MG
3620 /*
3621 * Shared mappings base their reservation on the number of pages that
3622 * are already allocated on behalf of the file. Private mappings need
3623 * to reserve the full area even if read-only as mprotect() may be
3624 * called to make the mapping read-write. Assume !vma is a shm mapping
3625 */
9119a41e 3626 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4e35f483 3627 resv_map = inode_resv_map(inode);
9119a41e 3628
1406ec9b 3629 chg = region_chg(resv_map, from, to);
9119a41e
JK
3630
3631 } else {
3632 resv_map = resv_map_alloc();
17c9d12e
MG
3633 if (!resv_map)
3634 return -ENOMEM;
3635
a1e78772 3636 chg = to - from;
84afd99b 3637
17c9d12e
MG
3638 set_vma_resv_map(vma, resv_map);
3639 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3640 }
3641
c50ac050
DH
3642 if (chg < 0) {
3643 ret = chg;
3644 goto out_err;
3645 }
8a630112 3646
1c5ecae3
MK
3647 /*
3648 * There must be enough pages in the subpool for the mapping. If
3649 * the subpool has a minimum size, there may be some global
3650 * reservations already in place (gbl_reserve).
3651 */
3652 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
3653 if (gbl_reserve < 0) {
c50ac050
DH
3654 ret = -ENOSPC;
3655 goto out_err;
3656 }
5a6fe125
MG
3657
3658 /*
17c9d12e 3659 * Check enough hugepages are available for the reservation.
90481622 3660 * Hand the pages back to the subpool if there are not
5a6fe125 3661 */
1c5ecae3 3662 ret = hugetlb_acct_memory(h, gbl_reserve);
68842c9b 3663 if (ret < 0) {
1c5ecae3
MK
3664 /* put back original number of pages, chg */
3665 (void)hugepage_subpool_put_pages(spool, chg);
c50ac050 3666 goto out_err;
68842c9b 3667 }
17c9d12e
MG
3668
3669 /*
3670 * Account for the reservations made. Shared mappings record regions
3671 * that have reservations as they are shared by multiple VMAs.
3672 * When the last VMA disappears, the region map says how much
3673 * the reservation was and the page cache tells how much of
3674 * the reservation was consumed. Private mappings are per-VMA and
3675 * only the consumed reservations are tracked. When the VMA
3676 * disappears, the original reservation is the VMA size and the
3677 * consumed reservations are stored in the map. Hence, nothing
3678 * else has to be done for private mappings here
3679 */
f83a275d 3680 if (!vma || vma->vm_flags & VM_MAYSHARE)
1406ec9b 3681 region_add(resv_map, from, to);
a43a8c39 3682 return 0;
c50ac050 3683out_err:
f031dd27
JK
3684 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3685 kref_put(&resv_map->refs, resv_map_release);
c50ac050 3686 return ret;
a43a8c39
CK
3687}
3688
3689void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3690{
a5516438 3691 struct hstate *h = hstate_inode(inode);
4e35f483 3692 struct resv_map *resv_map = inode_resv_map(inode);
9119a41e 3693 long chg = 0;
90481622 3694 struct hugepage_subpool *spool = subpool_inode(inode);
1c5ecae3 3695 long gbl_reserve;
45c682a6 3696
9119a41e 3697 if (resv_map)
1406ec9b 3698 chg = region_truncate(resv_map, offset);
45c682a6 3699 spin_lock(&inode->i_lock);
e4c6f8be 3700 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
45c682a6
KC
3701 spin_unlock(&inode->i_lock);
3702
1c5ecae3
MK
3703 /*
3704 * If the subpool has a minimum size, the number of global
3705 * reservations to be released may be adjusted.
3706 */
3707 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
3708 hugetlb_acct_memory(h, -gbl_reserve);
a43a8c39 3709}
93f70f90 3710
3212b535
SC
3711#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3712static unsigned long page_table_shareable(struct vm_area_struct *svma,
3713 struct vm_area_struct *vma,
3714 unsigned long addr, pgoff_t idx)
3715{
3716 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3717 svma->vm_start;
3718 unsigned long sbase = saddr & PUD_MASK;
3719 unsigned long s_end = sbase + PUD_SIZE;
3720
3721 /* Allow segments to share if only one is marked locked */
3722 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3723 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3724
3725 /*
3726 * match the virtual addresses, permission and the alignment of the
3727 * page table page.
3728 */
3729 if (pmd_index(addr) != pmd_index(saddr) ||
3730 vm_flags != svm_flags ||
3731 sbase < svma->vm_start || svma->vm_end < s_end)
3732 return 0;
3733
3734 return saddr;
3735}
3736
3737static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3738{
3739 unsigned long base = addr & PUD_MASK;
3740 unsigned long end = base + PUD_SIZE;
3741
3742 /*
3743 * check on proper vm_flags and page table alignment
3744 */
3745 if (vma->vm_flags & VM_MAYSHARE &&
3746 vma->vm_start <= base && end <= vma->vm_end)
3747 return 1;
3748 return 0;
3749}
3750
3751/*
3752 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3753 * and returns the corresponding pte. While this is not necessary for the
3754 * !shared pmd case because we can allocate the pmd later as well, it makes the
3755 * code much cleaner. pmd allocation is essential for the shared case because
c8c06efa 3756 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3212b535
SC
3757 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3758 * bad pmd for sharing.
3759 */
3760pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3761{
3762 struct vm_area_struct *vma = find_vma(mm, addr);
3763 struct address_space *mapping = vma->vm_file->f_mapping;
3764 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3765 vma->vm_pgoff;
3766 struct vm_area_struct *svma;
3767 unsigned long saddr;
3768 pte_t *spte = NULL;
3769 pte_t *pte;
cb900f41 3770 spinlock_t *ptl;
3212b535
SC
3771
3772 if (!vma_shareable(vma, addr))
3773 return (pte_t *)pmd_alloc(mm, pud, addr);
3774
83cde9e8 3775 i_mmap_lock_write(mapping);
3212b535
SC
3776 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3777 if (svma == vma)
3778 continue;
3779
3780 saddr = page_table_shareable(svma, vma, addr, idx);
3781 if (saddr) {
3782 spte = huge_pte_offset(svma->vm_mm, saddr);
3783 if (spte) {
dc6c9a35 3784 mm_inc_nr_pmds(mm);
3212b535
SC
3785 get_page(virt_to_page(spte));
3786 break;
3787 }
3788 }
3789 }
3790
3791 if (!spte)
3792 goto out;
3793
cb900f41
KS
3794 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3795 spin_lock(ptl);
dc6c9a35 3796 if (pud_none(*pud)) {
3212b535
SC
3797 pud_populate(mm, pud,
3798 (pmd_t *)((unsigned long)spte & PAGE_MASK));
dc6c9a35 3799 } else {
3212b535 3800 put_page(virt_to_page(spte));
dc6c9a35
KS
3801 mm_inc_nr_pmds(mm);
3802 }
cb900f41 3803 spin_unlock(ptl);
3212b535
SC
3804out:
3805 pte = (pte_t *)pmd_alloc(mm, pud, addr);
83cde9e8 3806 i_mmap_unlock_write(mapping);
3212b535
SC
3807 return pte;
3808}
3809
3810/*
3811 * unmap huge page backed by shared pte.
3812 *
3813 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
3814 * indicated by page_count > 1, unmap is achieved by clearing pud and
3815 * decrementing the ref count. If count == 1, the pte page is not shared.
3816 *
cb900f41 3817 * called with page table lock held.
3212b535
SC
3818 *
3819 * returns: 1 successfully unmapped a shared pte page
3820 * 0 the underlying pte page is not shared, or it is the last user
3821 */
3822int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3823{
3824 pgd_t *pgd = pgd_offset(mm, *addr);
3825 pud_t *pud = pud_offset(pgd, *addr);
3826
3827 BUG_ON(page_count(virt_to_page(ptep)) == 0);
3828 if (page_count(virt_to_page(ptep)) == 1)
3829 return 0;
3830
3831 pud_clear(pud);
3832 put_page(virt_to_page(ptep));
dc6c9a35 3833 mm_dec_nr_pmds(mm);
3212b535
SC
3834 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3835 return 1;
3836}
9e5fc74c
SC
3837#define want_pmd_share() (1)
3838#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3839pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3840{
3841 return NULL;
3842}
e81f2d22
ZZ
3843
3844int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3845{
3846 return 0;
3847}
9e5fc74c 3848#define want_pmd_share() (0)
3212b535
SC
3849#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3850
9e5fc74c
SC
3851#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3852pte_t *huge_pte_alloc(struct mm_struct *mm,
3853 unsigned long addr, unsigned long sz)
3854{
3855 pgd_t *pgd;
3856 pud_t *pud;
3857 pte_t *pte = NULL;
3858
3859 pgd = pgd_offset(mm, addr);
3860 pud = pud_alloc(mm, pgd, addr);
3861 if (pud) {
3862 if (sz == PUD_SIZE) {
3863 pte = (pte_t *)pud;
3864 } else {
3865 BUG_ON(sz != PMD_SIZE);
3866 if (want_pmd_share() && pud_none(*pud))
3867 pte = huge_pmd_share(mm, addr, pud);
3868 else
3869 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3870 }
3871 }
3872 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3873
3874 return pte;
3875}
3876
3877pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3878{
3879 pgd_t *pgd;
3880 pud_t *pud;
3881 pmd_t *pmd = NULL;
3882
3883 pgd = pgd_offset(mm, addr);
3884 if (pgd_present(*pgd)) {
3885 pud = pud_offset(pgd, addr);
3886 if (pud_present(*pud)) {
3887 if (pud_huge(*pud))
3888 return (pte_t *)pud;
3889 pmd = pmd_offset(pud, addr);
3890 }
3891 }
3892 return (pte_t *) pmd;
3893}
3894
61f77eda
NH
3895#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3896
3897/*
3898 * These functions are overwritable if your architecture needs its own
3899 * behavior.
3900 */
3901struct page * __weak
3902follow_huge_addr(struct mm_struct *mm, unsigned long address,
3903 int write)
3904{
3905 return ERR_PTR(-EINVAL);
3906}
3907
3908struct page * __weak
9e5fc74c 3909follow_huge_pmd(struct mm_struct *mm, unsigned long address,
e66f17ff 3910 pmd_t *pmd, int flags)
9e5fc74c 3911{
e66f17ff
NH
3912 struct page *page = NULL;
3913 spinlock_t *ptl;
3914retry:
3915 ptl = pmd_lockptr(mm, pmd);
3916 spin_lock(ptl);
3917 /*
3918 * make sure that the address range covered by this pmd is not
3919 * unmapped from other threads.
3920 */
3921 if (!pmd_huge(*pmd))
3922 goto out;
3923 if (pmd_present(*pmd)) {
97534127 3924 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
e66f17ff
NH
3925 if (flags & FOLL_GET)
3926 get_page(page);
3927 } else {
3928 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
3929 spin_unlock(ptl);
3930 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
3931 goto retry;
3932 }
3933 /*
3934 * hwpoisoned entry is treated as no_page_table in
3935 * follow_page_mask().
3936 */
3937 }
3938out:
3939 spin_unlock(ptl);
9e5fc74c
SC
3940 return page;
3941}
3942
61f77eda 3943struct page * __weak
9e5fc74c 3944follow_huge_pud(struct mm_struct *mm, unsigned long address,
e66f17ff 3945 pud_t *pud, int flags)
9e5fc74c 3946{
e66f17ff
NH
3947 if (flags & FOLL_GET)
3948 return NULL;
9e5fc74c 3949
e66f17ff 3950 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
9e5fc74c
SC
3951}
3952
d5bd9106
AK
3953#ifdef CONFIG_MEMORY_FAILURE
3954
93f70f90
NH
3955/*
3956 * This function is called from memory failure code.
3957 * Assume the caller holds page lock of the head page.
3958 */
6de2b1aa 3959int dequeue_hwpoisoned_huge_page(struct page *hpage)
93f70f90
NH
3960{
3961 struct hstate *h = page_hstate(hpage);
3962 int nid = page_to_nid(hpage);
6de2b1aa 3963 int ret = -EBUSY;
93f70f90
NH
3964
3965 spin_lock(&hugetlb_lock);
7e1f049e
NH
3966 /*
3967 * Just checking !page_huge_active is not enough, because that could be
3968 * an isolated/hwpoisoned hugepage (which have >0 refcount).
3969 */
3970 if (!page_huge_active(hpage) && !page_count(hpage)) {
56f2fb14
NH
3971 /*
3972 * Hwpoisoned hugepage isn't linked to activelist or freelist,
3973 * but dangling hpage->lru can trigger list-debug warnings
3974 * (this happens when we call unpoison_memory() on it),
3975 * so let it point to itself with list_del_init().
3976 */
3977 list_del_init(&hpage->lru);
8c6c2ecb 3978 set_page_refcounted(hpage);
6de2b1aa
NH
3979 h->free_huge_pages--;
3980 h->free_huge_pages_node[nid]--;
3981 ret = 0;
3982 }
93f70f90 3983 spin_unlock(&hugetlb_lock);
6de2b1aa 3984 return ret;
93f70f90 3985}
6de2b1aa 3986#endif
31caf665
NH
3987
3988bool isolate_huge_page(struct page *page, struct list_head *list)
3989{
bcc54222
NH
3990 bool ret = true;
3991
309381fe 3992 VM_BUG_ON_PAGE(!PageHead(page), page);
31caf665 3993 spin_lock(&hugetlb_lock);
bcc54222
NH
3994 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
3995 ret = false;
3996 goto unlock;
3997 }
3998 clear_page_huge_active(page);
31caf665 3999 list_move_tail(&page->lru, list);
bcc54222 4000unlock:
31caf665 4001 spin_unlock(&hugetlb_lock);
bcc54222 4002 return ret;
31caf665
NH
4003}
4004
4005void putback_active_hugepage(struct page *page)
4006{
309381fe 4007 VM_BUG_ON_PAGE(!PageHead(page), page);
31caf665 4008 spin_lock(&hugetlb_lock);
bcc54222 4009 set_page_huge_active(page);
31caf665
NH
4010 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4011 spin_unlock(&hugetlb_lock);
4012 put_page(page);
4013}