Documentation/vm/unevictable-lru.txt: clarify MAP_LOCKED behavior
[linux-2.6-block.git] / mm / hugetlb.c
CommitLineData
1da177e4
LT
1/*
2 * Generic hugetlb support.
6d49e352 3 * (C) Nadia Yvette Chambers, April 2004
1da177e4 4 */
1da177e4
LT
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/mm.h>
e1759c21 9#include <linux/seq_file.h>
1da177e4
LT
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
cddb8a5c 12#include <linux/mmu_notifier.h>
1da177e4 13#include <linux/nodemask.h>
63551ae0 14#include <linux/pagemap.h>
5da7ca86 15#include <linux/mempolicy.h>
3b32123d 16#include <linux/compiler.h>
aea47ff3 17#include <linux/cpuset.h>
3935baa9 18#include <linux/mutex.h>
aa888a74 19#include <linux/bootmem.h>
a3437870 20#include <linux/sysfs.h>
5a0e3ad6 21#include <linux/slab.h>
0fe6e20b 22#include <linux/rmap.h>
fd6a03ed
NH
23#include <linux/swap.h>
24#include <linux/swapops.h>
c8721bbb 25#include <linux/page-isolation.h>
8382d914 26#include <linux/jhash.h>
d6606683 27
63551ae0
DG
28#include <asm/page.h>
29#include <asm/pgtable.h>
24669e58 30#include <asm/tlb.h>
63551ae0 31
24669e58 32#include <linux/io.h>
63551ae0 33#include <linux/hugetlb.h>
9dd540e2 34#include <linux/hugetlb_cgroup.h>
9a305230 35#include <linux/node.h>
7835e98b 36#include "internal.h"
1da177e4 37
753162cd 38int hugepages_treat_as_movable;
a5516438 39
c3f38a38 40int hugetlb_max_hstate __read_mostly;
e5ff2159
AK
41unsigned int default_hstate_idx;
42struct hstate hstates[HUGE_MAX_HSTATE];
641844f5
NH
43/*
44 * Minimum page order among possible hugepage sizes, set to a proper value
45 * at boot time.
46 */
47static unsigned int minimum_order __read_mostly = UINT_MAX;
e5ff2159 48
53ba51d2
JT
49__initdata LIST_HEAD(huge_boot_pages);
50
e5ff2159
AK
51/* for command line parsing */
52static struct hstate * __initdata parsed_hstate;
53static unsigned long __initdata default_hstate_max_huge_pages;
e11bfbfc 54static unsigned long __initdata default_hstate_size;
e5ff2159 55
3935baa9 56/*
31caf665
NH
57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58 * free_huge_pages, and surplus_huge_pages.
3935baa9 59 */
c3f38a38 60DEFINE_SPINLOCK(hugetlb_lock);
0bd0f9fb 61
8382d914
DB
62/*
63 * Serializes faults on the same logical page. This is used to
64 * prevent spurious OOMs when the hugepage pool is fully utilized.
65 */
66static int num_fault_mutexes;
67static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
7ca02d0a
MK
69/* Forward declaration */
70static int hugetlb_acct_memory(struct hstate *h, long delta);
71
90481622
DG
72static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73{
74 bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76 spin_unlock(&spool->lock);
77
78 /* If no pages are used, and no other handles to the subpool
7ca02d0a
MK
79 * remain, give up any reservations mased on minimum size and
80 * free the subpool */
81 if (free) {
82 if (spool->min_hpages != -1)
83 hugetlb_acct_memory(spool->hstate,
84 -spool->min_hpages);
90481622 85 kfree(spool);
7ca02d0a 86 }
90481622
DG
87}
88
7ca02d0a
MK
89struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90 long min_hpages)
90481622
DG
91{
92 struct hugepage_subpool *spool;
93
c6a91820 94 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
90481622
DG
95 if (!spool)
96 return NULL;
97
98 spin_lock_init(&spool->lock);
99 spool->count = 1;
7ca02d0a
MK
100 spool->max_hpages = max_hpages;
101 spool->hstate = h;
102 spool->min_hpages = min_hpages;
103
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105 kfree(spool);
106 return NULL;
107 }
108 spool->rsv_hpages = min_hpages;
90481622
DG
109
110 return spool;
111}
112
113void hugepage_put_subpool(struct hugepage_subpool *spool)
114{
115 spin_lock(&spool->lock);
116 BUG_ON(!spool->count);
117 spool->count--;
118 unlock_or_release_subpool(spool);
119}
120
1c5ecae3
MK
121/*
122 * Subpool accounting for allocating and reserving pages.
123 * Return -ENOMEM if there are not enough resources to satisfy the
124 * the request. Otherwise, return the number of pages by which the
125 * global pools must be adjusted (upward). The returned value may
126 * only be different than the passed value (delta) in the case where
127 * a subpool minimum size must be manitained.
128 */
129static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
90481622
DG
130 long delta)
131{
1c5ecae3 132 long ret = delta;
90481622
DG
133
134 if (!spool)
1c5ecae3 135 return ret;
90481622
DG
136
137 spin_lock(&spool->lock);
1c5ecae3
MK
138
139 if (spool->max_hpages != -1) { /* maximum size accounting */
140 if ((spool->used_hpages + delta) <= spool->max_hpages)
141 spool->used_hpages += delta;
142 else {
143 ret = -ENOMEM;
144 goto unlock_ret;
145 }
90481622 146 }
90481622 147
1c5ecae3
MK
148 if (spool->min_hpages != -1) { /* minimum size accounting */
149 if (delta > spool->rsv_hpages) {
150 /*
151 * Asking for more reserves than those already taken on
152 * behalf of subpool. Return difference.
153 */
154 ret = delta - spool->rsv_hpages;
155 spool->rsv_hpages = 0;
156 } else {
157 ret = 0; /* reserves already accounted for */
158 spool->rsv_hpages -= delta;
159 }
160 }
161
162unlock_ret:
163 spin_unlock(&spool->lock);
90481622
DG
164 return ret;
165}
166
1c5ecae3
MK
167/*
168 * Subpool accounting for freeing and unreserving pages.
169 * Return the number of global page reservations that must be dropped.
170 * The return value may only be different than the passed value (delta)
171 * in the case where a subpool minimum size must be maintained.
172 */
173static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
90481622
DG
174 long delta)
175{
1c5ecae3
MK
176 long ret = delta;
177
90481622 178 if (!spool)
1c5ecae3 179 return delta;
90481622
DG
180
181 spin_lock(&spool->lock);
1c5ecae3
MK
182
183 if (spool->max_hpages != -1) /* maximum size accounting */
184 spool->used_hpages -= delta;
185
186 if (spool->min_hpages != -1) { /* minimum size accounting */
187 if (spool->rsv_hpages + delta <= spool->min_hpages)
188 ret = 0;
189 else
190 ret = spool->rsv_hpages + delta - spool->min_hpages;
191
192 spool->rsv_hpages += delta;
193 if (spool->rsv_hpages > spool->min_hpages)
194 spool->rsv_hpages = spool->min_hpages;
195 }
196
197 /*
198 * If hugetlbfs_put_super couldn't free spool due to an outstanding
199 * quota reference, free it now.
200 */
90481622 201 unlock_or_release_subpool(spool);
1c5ecae3
MK
202
203 return ret;
90481622
DG
204}
205
206static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
207{
208 return HUGETLBFS_SB(inode->i_sb)->spool;
209}
210
211static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
212{
496ad9aa 213 return subpool_inode(file_inode(vma->vm_file));
90481622
DG
214}
215
96822904
AW
216/*
217 * Region tracking -- allows tracking of reservations and instantiated pages
218 * across the pages in a mapping.
84afd99b 219 *
7b24d861
DB
220 * The region data structures are embedded into a resv_map and
221 * protected by a resv_map's lock
96822904
AW
222 */
223struct file_region {
224 struct list_head link;
225 long from;
226 long to;
227};
228
1406ec9b 229static long region_add(struct resv_map *resv, long f, long t)
96822904 230{
1406ec9b 231 struct list_head *head = &resv->regions;
96822904
AW
232 struct file_region *rg, *nrg, *trg;
233
7b24d861 234 spin_lock(&resv->lock);
96822904
AW
235 /* Locate the region we are either in or before. */
236 list_for_each_entry(rg, head, link)
237 if (f <= rg->to)
238 break;
239
240 /* Round our left edge to the current segment if it encloses us. */
241 if (f > rg->from)
242 f = rg->from;
243
244 /* Check for and consume any regions we now overlap with. */
245 nrg = rg;
246 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
247 if (&rg->link == head)
248 break;
249 if (rg->from > t)
250 break;
251
252 /* If this area reaches higher then extend our area to
253 * include it completely. If this is not the first area
254 * which we intend to reuse, free it. */
255 if (rg->to > t)
256 t = rg->to;
257 if (rg != nrg) {
258 list_del(&rg->link);
259 kfree(rg);
260 }
261 }
262 nrg->from = f;
263 nrg->to = t;
7b24d861 264 spin_unlock(&resv->lock);
96822904
AW
265 return 0;
266}
267
1406ec9b 268static long region_chg(struct resv_map *resv, long f, long t)
96822904 269{
1406ec9b 270 struct list_head *head = &resv->regions;
7b24d861 271 struct file_region *rg, *nrg = NULL;
96822904
AW
272 long chg = 0;
273
7b24d861
DB
274retry:
275 spin_lock(&resv->lock);
96822904
AW
276 /* Locate the region we are before or in. */
277 list_for_each_entry(rg, head, link)
278 if (f <= rg->to)
279 break;
280
281 /* If we are below the current region then a new region is required.
282 * Subtle, allocate a new region at the position but make it zero
283 * size such that we can guarantee to record the reservation. */
284 if (&rg->link == head || t < rg->from) {
7b24d861
DB
285 if (!nrg) {
286 spin_unlock(&resv->lock);
287 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
288 if (!nrg)
289 return -ENOMEM;
290
291 nrg->from = f;
292 nrg->to = f;
293 INIT_LIST_HEAD(&nrg->link);
294 goto retry;
295 }
96822904 296
7b24d861
DB
297 list_add(&nrg->link, rg->link.prev);
298 chg = t - f;
299 goto out_nrg;
96822904
AW
300 }
301
302 /* Round our left edge to the current segment if it encloses us. */
303 if (f > rg->from)
304 f = rg->from;
305 chg = t - f;
306
307 /* Check for and consume any regions we now overlap with. */
308 list_for_each_entry(rg, rg->link.prev, link) {
309 if (&rg->link == head)
310 break;
311 if (rg->from > t)
7b24d861 312 goto out;
96822904 313
25985edc 314 /* We overlap with this area, if it extends further than
96822904
AW
315 * us then we must extend ourselves. Account for its
316 * existing reservation. */
317 if (rg->to > t) {
318 chg += rg->to - t;
319 t = rg->to;
320 }
321 chg -= rg->to - rg->from;
322 }
7b24d861
DB
323
324out:
325 spin_unlock(&resv->lock);
326 /* We already know we raced and no longer need the new region */
327 kfree(nrg);
328 return chg;
329out_nrg:
330 spin_unlock(&resv->lock);
96822904
AW
331 return chg;
332}
333
1406ec9b 334static long region_truncate(struct resv_map *resv, long end)
96822904 335{
1406ec9b 336 struct list_head *head = &resv->regions;
96822904
AW
337 struct file_region *rg, *trg;
338 long chg = 0;
339
7b24d861 340 spin_lock(&resv->lock);
96822904
AW
341 /* Locate the region we are either in or before. */
342 list_for_each_entry(rg, head, link)
343 if (end <= rg->to)
344 break;
345 if (&rg->link == head)
7b24d861 346 goto out;
96822904
AW
347
348 /* If we are in the middle of a region then adjust it. */
349 if (end > rg->from) {
350 chg = rg->to - end;
351 rg->to = end;
352 rg = list_entry(rg->link.next, typeof(*rg), link);
353 }
354
355 /* Drop any remaining regions. */
356 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
357 if (&rg->link == head)
358 break;
359 chg += rg->to - rg->from;
360 list_del(&rg->link);
361 kfree(rg);
362 }
7b24d861
DB
363
364out:
365 spin_unlock(&resv->lock);
96822904
AW
366 return chg;
367}
368
1406ec9b 369static long region_count(struct resv_map *resv, long f, long t)
84afd99b 370{
1406ec9b 371 struct list_head *head = &resv->regions;
84afd99b
AW
372 struct file_region *rg;
373 long chg = 0;
374
7b24d861 375 spin_lock(&resv->lock);
84afd99b
AW
376 /* Locate each segment we overlap with, and count that overlap. */
377 list_for_each_entry(rg, head, link) {
f2135a4a
WSH
378 long seg_from;
379 long seg_to;
84afd99b
AW
380
381 if (rg->to <= f)
382 continue;
383 if (rg->from >= t)
384 break;
385
386 seg_from = max(rg->from, f);
387 seg_to = min(rg->to, t);
388
389 chg += seg_to - seg_from;
390 }
7b24d861 391 spin_unlock(&resv->lock);
84afd99b
AW
392
393 return chg;
394}
395
e7c4b0bf
AW
396/*
397 * Convert the address within this vma to the page offset within
398 * the mapping, in pagecache page units; huge pages here.
399 */
a5516438
AK
400static pgoff_t vma_hugecache_offset(struct hstate *h,
401 struct vm_area_struct *vma, unsigned long address)
e7c4b0bf 402{
a5516438
AK
403 return ((address - vma->vm_start) >> huge_page_shift(h)) +
404 (vma->vm_pgoff >> huge_page_order(h));
e7c4b0bf
AW
405}
406
0fe6e20b
NH
407pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
408 unsigned long address)
409{
410 return vma_hugecache_offset(hstate_vma(vma), vma, address);
411}
412
08fba699
MG
413/*
414 * Return the size of the pages allocated when backing a VMA. In the majority
415 * cases this will be same size as used by the page table entries.
416 */
417unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
418{
419 struct hstate *hstate;
420
421 if (!is_vm_hugetlb_page(vma))
422 return PAGE_SIZE;
423
424 hstate = hstate_vma(vma);
425
2415cf12 426 return 1UL << huge_page_shift(hstate);
08fba699 427}
f340ca0f 428EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
08fba699 429
3340289d
MG
430/*
431 * Return the page size being used by the MMU to back a VMA. In the majority
432 * of cases, the page size used by the kernel matches the MMU size. On
433 * architectures where it differs, an architecture-specific version of this
434 * function is required.
435 */
436#ifndef vma_mmu_pagesize
437unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
438{
439 return vma_kernel_pagesize(vma);
440}
441#endif
442
84afd99b
AW
443/*
444 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
445 * bits of the reservation map pointer, which are always clear due to
446 * alignment.
447 */
448#define HPAGE_RESV_OWNER (1UL << 0)
449#define HPAGE_RESV_UNMAPPED (1UL << 1)
04f2cbe3 450#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
84afd99b 451
a1e78772
MG
452/*
453 * These helpers are used to track how many pages are reserved for
454 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
455 * is guaranteed to have their future faults succeed.
456 *
457 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
458 * the reserve counters are updated with the hugetlb_lock held. It is safe
459 * to reset the VMA at fork() time as it is not in use yet and there is no
460 * chance of the global counters getting corrupted as a result of the values.
84afd99b
AW
461 *
462 * The private mapping reservation is represented in a subtly different
463 * manner to a shared mapping. A shared mapping has a region map associated
464 * with the underlying file, this region map represents the backing file
465 * pages which have ever had a reservation assigned which this persists even
466 * after the page is instantiated. A private mapping has a region map
467 * associated with the original mmap which is attached to all VMAs which
468 * reference it, this region map represents those offsets which have consumed
469 * reservation ie. where pages have been instantiated.
a1e78772 470 */
e7c4b0bf
AW
471static unsigned long get_vma_private_data(struct vm_area_struct *vma)
472{
473 return (unsigned long)vma->vm_private_data;
474}
475
476static void set_vma_private_data(struct vm_area_struct *vma,
477 unsigned long value)
478{
479 vma->vm_private_data = (void *)value;
480}
481
9119a41e 482struct resv_map *resv_map_alloc(void)
84afd99b
AW
483{
484 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
485 if (!resv_map)
486 return NULL;
487
488 kref_init(&resv_map->refs);
7b24d861 489 spin_lock_init(&resv_map->lock);
84afd99b
AW
490 INIT_LIST_HEAD(&resv_map->regions);
491
492 return resv_map;
493}
494
9119a41e 495void resv_map_release(struct kref *ref)
84afd99b
AW
496{
497 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
498
499 /* Clear out any active regions before we release the map. */
1406ec9b 500 region_truncate(resv_map, 0);
84afd99b
AW
501 kfree(resv_map);
502}
503
4e35f483
JK
504static inline struct resv_map *inode_resv_map(struct inode *inode)
505{
506 return inode->i_mapping->private_data;
507}
508
84afd99b 509static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
a1e78772 510{
81d1b09c 511 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
4e35f483
JK
512 if (vma->vm_flags & VM_MAYSHARE) {
513 struct address_space *mapping = vma->vm_file->f_mapping;
514 struct inode *inode = mapping->host;
515
516 return inode_resv_map(inode);
517
518 } else {
84afd99b
AW
519 return (struct resv_map *)(get_vma_private_data(vma) &
520 ~HPAGE_RESV_MASK);
4e35f483 521 }
a1e78772
MG
522}
523
84afd99b 524static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
a1e78772 525{
81d1b09c
SL
526 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
527 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
a1e78772 528
84afd99b
AW
529 set_vma_private_data(vma, (get_vma_private_data(vma) &
530 HPAGE_RESV_MASK) | (unsigned long)map);
04f2cbe3
MG
531}
532
533static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
534{
81d1b09c
SL
535 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
536 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
e7c4b0bf
AW
537
538 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
04f2cbe3
MG
539}
540
541static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
542{
81d1b09c 543 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
e7c4b0bf
AW
544
545 return (get_vma_private_data(vma) & flag) != 0;
a1e78772
MG
546}
547
04f2cbe3 548/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
a1e78772
MG
549void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
550{
81d1b09c 551 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
f83a275d 552 if (!(vma->vm_flags & VM_MAYSHARE))
a1e78772
MG
553 vma->vm_private_data = (void *)0;
554}
555
556/* Returns true if the VMA has associated reserve pages */
af0ed73e 557static int vma_has_reserves(struct vm_area_struct *vma, long chg)
a1e78772 558{
af0ed73e
JK
559 if (vma->vm_flags & VM_NORESERVE) {
560 /*
561 * This address is already reserved by other process(chg == 0),
562 * so, we should decrement reserved count. Without decrementing,
563 * reserve count remains after releasing inode, because this
564 * allocated page will go into page cache and is regarded as
565 * coming from reserved pool in releasing step. Currently, we
566 * don't have any other solution to deal with this situation
567 * properly, so add work-around here.
568 */
569 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
570 return 1;
571 else
572 return 0;
573 }
a63884e9
JK
574
575 /* Shared mappings always use reserves */
f83a275d 576 if (vma->vm_flags & VM_MAYSHARE)
7f09ca51 577 return 1;
a63884e9
JK
578
579 /*
580 * Only the process that called mmap() has reserves for
581 * private mappings.
582 */
7f09ca51
MG
583 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
584 return 1;
a63884e9 585
7f09ca51 586 return 0;
a1e78772
MG
587}
588
a5516438 589static void enqueue_huge_page(struct hstate *h, struct page *page)
1da177e4
LT
590{
591 int nid = page_to_nid(page);
0edaecfa 592 list_move(&page->lru, &h->hugepage_freelists[nid]);
a5516438
AK
593 h->free_huge_pages++;
594 h->free_huge_pages_node[nid]++;
1da177e4
LT
595}
596
bf50bab2
NH
597static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
598{
599 struct page *page;
600
c8721bbb
NH
601 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
602 if (!is_migrate_isolate_page(page))
603 break;
604 /*
605 * if 'non-isolated free hugepage' not found on the list,
606 * the allocation fails.
607 */
608 if (&h->hugepage_freelists[nid] == &page->lru)
bf50bab2 609 return NULL;
0edaecfa 610 list_move(&page->lru, &h->hugepage_activelist);
a9869b83 611 set_page_refcounted(page);
bf50bab2
NH
612 h->free_huge_pages--;
613 h->free_huge_pages_node[nid]--;
614 return page;
615}
616
86cdb465
NH
617/* Movability of hugepages depends on migration support. */
618static inline gfp_t htlb_alloc_mask(struct hstate *h)
619{
100873d7 620 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
86cdb465
NH
621 return GFP_HIGHUSER_MOVABLE;
622 else
623 return GFP_HIGHUSER;
624}
625
a5516438
AK
626static struct page *dequeue_huge_page_vma(struct hstate *h,
627 struct vm_area_struct *vma,
af0ed73e
JK
628 unsigned long address, int avoid_reserve,
629 long chg)
1da177e4 630{
b1c12cbc 631 struct page *page = NULL;
480eccf9 632 struct mempolicy *mpol;
19770b32 633 nodemask_t *nodemask;
c0ff7453 634 struct zonelist *zonelist;
dd1a239f
MG
635 struct zone *zone;
636 struct zoneref *z;
cc9a6c87 637 unsigned int cpuset_mems_cookie;
1da177e4 638
a1e78772
MG
639 /*
640 * A child process with MAP_PRIVATE mappings created by their parent
641 * have no page reserves. This check ensures that reservations are
642 * not "stolen". The child may still get SIGKILLed
643 */
af0ed73e 644 if (!vma_has_reserves(vma, chg) &&
a5516438 645 h->free_huge_pages - h->resv_huge_pages == 0)
c0ff7453 646 goto err;
a1e78772 647
04f2cbe3 648 /* If reserves cannot be used, ensure enough pages are in the pool */
a5516438 649 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
6eab04a8 650 goto err;
04f2cbe3 651
9966c4bb 652retry_cpuset:
d26914d1 653 cpuset_mems_cookie = read_mems_allowed_begin();
9966c4bb 654 zonelist = huge_zonelist(vma, address,
86cdb465 655 htlb_alloc_mask(h), &mpol, &nodemask);
9966c4bb 656
19770b32
MG
657 for_each_zone_zonelist_nodemask(zone, z, zonelist,
658 MAX_NR_ZONES - 1, nodemask) {
344736f2 659 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
bf50bab2
NH
660 page = dequeue_huge_page_node(h, zone_to_nid(zone));
661 if (page) {
af0ed73e
JK
662 if (avoid_reserve)
663 break;
664 if (!vma_has_reserves(vma, chg))
665 break;
666
07443a85 667 SetPagePrivate(page);
af0ed73e 668 h->resv_huge_pages--;
bf50bab2
NH
669 break;
670 }
3abf7afd 671 }
1da177e4 672 }
cc9a6c87 673
52cd3b07 674 mpol_cond_put(mpol);
d26914d1 675 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
cc9a6c87 676 goto retry_cpuset;
1da177e4 677 return page;
cc9a6c87
MG
678
679err:
cc9a6c87 680 return NULL;
1da177e4
LT
681}
682
1cac6f2c
LC
683/*
684 * common helper functions for hstate_next_node_to_{alloc|free}.
685 * We may have allocated or freed a huge page based on a different
686 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
687 * be outside of *nodes_allowed. Ensure that we use an allowed
688 * node for alloc or free.
689 */
690static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
691{
692 nid = next_node(nid, *nodes_allowed);
693 if (nid == MAX_NUMNODES)
694 nid = first_node(*nodes_allowed);
695 VM_BUG_ON(nid >= MAX_NUMNODES);
696
697 return nid;
698}
699
700static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
701{
702 if (!node_isset(nid, *nodes_allowed))
703 nid = next_node_allowed(nid, nodes_allowed);
704 return nid;
705}
706
707/*
708 * returns the previously saved node ["this node"] from which to
709 * allocate a persistent huge page for the pool and advance the
710 * next node from which to allocate, handling wrap at end of node
711 * mask.
712 */
713static int hstate_next_node_to_alloc(struct hstate *h,
714 nodemask_t *nodes_allowed)
715{
716 int nid;
717
718 VM_BUG_ON(!nodes_allowed);
719
720 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
721 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
722
723 return nid;
724}
725
726/*
727 * helper for free_pool_huge_page() - return the previously saved
728 * node ["this node"] from which to free a huge page. Advance the
729 * next node id whether or not we find a free huge page to free so
730 * that the next attempt to free addresses the next node.
731 */
732static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
733{
734 int nid;
735
736 VM_BUG_ON(!nodes_allowed);
737
738 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
739 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
740
741 return nid;
742}
743
744#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
745 for (nr_nodes = nodes_weight(*mask); \
746 nr_nodes > 0 && \
747 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
748 nr_nodes--)
749
750#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
751 for (nr_nodes = nodes_weight(*mask); \
752 nr_nodes > 0 && \
753 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
754 nr_nodes--)
755
944d9fec
LC
756#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
757static void destroy_compound_gigantic_page(struct page *page,
758 unsigned long order)
759{
760 int i;
761 int nr_pages = 1 << order;
762 struct page *p = page + 1;
763
764 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
765 __ClearPageTail(p);
766 set_page_refcounted(p);
767 p->first_page = NULL;
768 }
769
770 set_compound_order(page, 0);
771 __ClearPageHead(page);
772}
773
774static void free_gigantic_page(struct page *page, unsigned order)
775{
776 free_contig_range(page_to_pfn(page), 1 << order);
777}
778
779static int __alloc_gigantic_page(unsigned long start_pfn,
780 unsigned long nr_pages)
781{
782 unsigned long end_pfn = start_pfn + nr_pages;
783 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
784}
785
786static bool pfn_range_valid_gigantic(unsigned long start_pfn,
787 unsigned long nr_pages)
788{
789 unsigned long i, end_pfn = start_pfn + nr_pages;
790 struct page *page;
791
792 for (i = start_pfn; i < end_pfn; i++) {
793 if (!pfn_valid(i))
794 return false;
795
796 page = pfn_to_page(i);
797
798 if (PageReserved(page))
799 return false;
800
801 if (page_count(page) > 0)
802 return false;
803
804 if (PageHuge(page))
805 return false;
806 }
807
808 return true;
809}
810
811static bool zone_spans_last_pfn(const struct zone *zone,
812 unsigned long start_pfn, unsigned long nr_pages)
813{
814 unsigned long last_pfn = start_pfn + nr_pages - 1;
815 return zone_spans_pfn(zone, last_pfn);
816}
817
818static struct page *alloc_gigantic_page(int nid, unsigned order)
819{
820 unsigned long nr_pages = 1 << order;
821 unsigned long ret, pfn, flags;
822 struct zone *z;
823
824 z = NODE_DATA(nid)->node_zones;
825 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
826 spin_lock_irqsave(&z->lock, flags);
827
828 pfn = ALIGN(z->zone_start_pfn, nr_pages);
829 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
830 if (pfn_range_valid_gigantic(pfn, nr_pages)) {
831 /*
832 * We release the zone lock here because
833 * alloc_contig_range() will also lock the zone
834 * at some point. If there's an allocation
835 * spinning on this lock, it may win the race
836 * and cause alloc_contig_range() to fail...
837 */
838 spin_unlock_irqrestore(&z->lock, flags);
839 ret = __alloc_gigantic_page(pfn, nr_pages);
840 if (!ret)
841 return pfn_to_page(pfn);
842 spin_lock_irqsave(&z->lock, flags);
843 }
844 pfn += nr_pages;
845 }
846
847 spin_unlock_irqrestore(&z->lock, flags);
848 }
849
850 return NULL;
851}
852
853static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
854static void prep_compound_gigantic_page(struct page *page, unsigned long order);
855
856static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
857{
858 struct page *page;
859
860 page = alloc_gigantic_page(nid, huge_page_order(h));
861 if (page) {
862 prep_compound_gigantic_page(page, huge_page_order(h));
863 prep_new_huge_page(h, page, nid);
864 }
865
866 return page;
867}
868
869static int alloc_fresh_gigantic_page(struct hstate *h,
870 nodemask_t *nodes_allowed)
871{
872 struct page *page = NULL;
873 int nr_nodes, node;
874
875 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
876 page = alloc_fresh_gigantic_page_node(h, node);
877 if (page)
878 return 1;
879 }
880
881 return 0;
882}
883
884static inline bool gigantic_page_supported(void) { return true; }
885#else
886static inline bool gigantic_page_supported(void) { return false; }
887static inline void free_gigantic_page(struct page *page, unsigned order) { }
888static inline void destroy_compound_gigantic_page(struct page *page,
889 unsigned long order) { }
890static inline int alloc_fresh_gigantic_page(struct hstate *h,
891 nodemask_t *nodes_allowed) { return 0; }
892#endif
893
a5516438 894static void update_and_free_page(struct hstate *h, struct page *page)
6af2acb6
AL
895{
896 int i;
a5516438 897
944d9fec
LC
898 if (hstate_is_gigantic(h) && !gigantic_page_supported())
899 return;
18229df5 900
a5516438
AK
901 h->nr_huge_pages--;
902 h->nr_huge_pages_node[page_to_nid(page)]--;
903 for (i = 0; i < pages_per_huge_page(h); i++) {
32f84528
CF
904 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
905 1 << PG_referenced | 1 << PG_dirty |
a7407a27
LC
906 1 << PG_active | 1 << PG_private |
907 1 << PG_writeback);
6af2acb6 908 }
309381fe 909 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
6af2acb6
AL
910 set_compound_page_dtor(page, NULL);
911 set_page_refcounted(page);
944d9fec
LC
912 if (hstate_is_gigantic(h)) {
913 destroy_compound_gigantic_page(page, huge_page_order(h));
914 free_gigantic_page(page, huge_page_order(h));
915 } else {
916 arch_release_hugepage(page);
917 __free_pages(page, huge_page_order(h));
918 }
6af2acb6
AL
919}
920
e5ff2159
AK
921struct hstate *size_to_hstate(unsigned long size)
922{
923 struct hstate *h;
924
925 for_each_hstate(h) {
926 if (huge_page_size(h) == size)
927 return h;
928 }
929 return NULL;
930}
931
bcc54222
NH
932/*
933 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
934 * to hstate->hugepage_activelist.)
935 *
936 * This function can be called for tail pages, but never returns true for them.
937 */
938bool page_huge_active(struct page *page)
939{
940 VM_BUG_ON_PAGE(!PageHuge(page), page);
941 return PageHead(page) && PagePrivate(&page[1]);
942}
943
944/* never called for tail page */
945static void set_page_huge_active(struct page *page)
946{
947 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
948 SetPagePrivate(&page[1]);
949}
950
951static void clear_page_huge_active(struct page *page)
952{
953 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
954 ClearPagePrivate(&page[1]);
955}
956
8f1d26d0 957void free_huge_page(struct page *page)
27a85ef1 958{
a5516438
AK
959 /*
960 * Can't pass hstate in here because it is called from the
961 * compound page destructor.
962 */
e5ff2159 963 struct hstate *h = page_hstate(page);
7893d1d5 964 int nid = page_to_nid(page);
90481622
DG
965 struct hugepage_subpool *spool =
966 (struct hugepage_subpool *)page_private(page);
07443a85 967 bool restore_reserve;
27a85ef1 968
e5df70ab 969 set_page_private(page, 0);
23be7468 970 page->mapping = NULL;
7893d1d5 971 BUG_ON(page_count(page));
0fe6e20b 972 BUG_ON(page_mapcount(page));
07443a85 973 restore_reserve = PagePrivate(page);
16c794b4 974 ClearPagePrivate(page);
27a85ef1 975
1c5ecae3
MK
976 /*
977 * A return code of zero implies that the subpool will be under its
978 * minimum size if the reservation is not restored after page is free.
979 * Therefore, force restore_reserve operation.
980 */
981 if (hugepage_subpool_put_pages(spool, 1) == 0)
982 restore_reserve = true;
983
27a85ef1 984 spin_lock(&hugetlb_lock);
bcc54222 985 clear_page_huge_active(page);
6d76dcf4
AK
986 hugetlb_cgroup_uncharge_page(hstate_index(h),
987 pages_per_huge_page(h), page);
07443a85
JK
988 if (restore_reserve)
989 h->resv_huge_pages++;
990
944d9fec 991 if (h->surplus_huge_pages_node[nid]) {
0edaecfa
AK
992 /* remove the page from active list */
993 list_del(&page->lru);
a5516438
AK
994 update_and_free_page(h, page);
995 h->surplus_huge_pages--;
996 h->surplus_huge_pages_node[nid]--;
7893d1d5 997 } else {
5d3a551c 998 arch_clear_hugepage_flags(page);
a5516438 999 enqueue_huge_page(h, page);
7893d1d5 1000 }
27a85ef1
DG
1001 spin_unlock(&hugetlb_lock);
1002}
1003
a5516438 1004static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
b7ba30c6 1005{
0edaecfa 1006 INIT_LIST_HEAD(&page->lru);
b7ba30c6
AK
1007 set_compound_page_dtor(page, free_huge_page);
1008 spin_lock(&hugetlb_lock);
9dd540e2 1009 set_hugetlb_cgroup(page, NULL);
a5516438
AK
1010 h->nr_huge_pages++;
1011 h->nr_huge_pages_node[nid]++;
b7ba30c6
AK
1012 spin_unlock(&hugetlb_lock);
1013 put_page(page); /* free it into the hugepage allocator */
1014}
1015
2906dd52 1016static void prep_compound_gigantic_page(struct page *page, unsigned long order)
20a0307c
WF
1017{
1018 int i;
1019 int nr_pages = 1 << order;
1020 struct page *p = page + 1;
1021
1022 /* we rely on prep_new_huge_page to set the destructor */
1023 set_compound_order(page, order);
1024 __SetPageHead(page);
ef5a22be 1025 __ClearPageReserved(page);
20a0307c 1026 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
ef5a22be
AA
1027 /*
1028 * For gigantic hugepages allocated through bootmem at
1029 * boot, it's safer to be consistent with the not-gigantic
1030 * hugepages and clear the PG_reserved bit from all tail pages
1031 * too. Otherwse drivers using get_user_pages() to access tail
1032 * pages may get the reference counting wrong if they see
1033 * PG_reserved set on a tail page (despite the head page not
1034 * having PG_reserved set). Enforcing this consistency between
1035 * head and tail pages allows drivers to optimize away a check
1036 * on the head page when they need know if put_page() is needed
1037 * after get_user_pages().
1038 */
1039 __ClearPageReserved(p);
58a84aa9 1040 set_page_count(p, 0);
20a0307c 1041 p->first_page = page;
44fc8057
DR
1042 /* Make sure p->first_page is always valid for PageTail() */
1043 smp_wmb();
1044 __SetPageTail(p);
20a0307c
WF
1045 }
1046}
1047
7795912c
AM
1048/*
1049 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1050 * transparent huge pages. See the PageTransHuge() documentation for more
1051 * details.
1052 */
20a0307c
WF
1053int PageHuge(struct page *page)
1054{
20a0307c
WF
1055 if (!PageCompound(page))
1056 return 0;
1057
1058 page = compound_head(page);
758f66a2 1059 return get_compound_page_dtor(page) == free_huge_page;
20a0307c 1060}
43131e14
NH
1061EXPORT_SYMBOL_GPL(PageHuge);
1062
27c73ae7
AA
1063/*
1064 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1065 * normal or transparent huge pages.
1066 */
1067int PageHeadHuge(struct page *page_head)
1068{
27c73ae7
AA
1069 if (!PageHead(page_head))
1070 return 0;
1071
758f66a2 1072 return get_compound_page_dtor(page_head) == free_huge_page;
27c73ae7 1073}
27c73ae7 1074
13d60f4b
ZY
1075pgoff_t __basepage_index(struct page *page)
1076{
1077 struct page *page_head = compound_head(page);
1078 pgoff_t index = page_index(page_head);
1079 unsigned long compound_idx;
1080
1081 if (!PageHuge(page_head))
1082 return page_index(page);
1083
1084 if (compound_order(page_head) >= MAX_ORDER)
1085 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1086 else
1087 compound_idx = page - page_head;
1088
1089 return (index << compound_order(page_head)) + compound_idx;
1090}
1091
a5516438 1092static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1da177e4 1093{
1da177e4 1094 struct page *page;
f96efd58 1095
6484eb3e 1096 page = alloc_pages_exact_node(nid,
86cdb465 1097 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
551883ae 1098 __GFP_REPEAT|__GFP_NOWARN,
a5516438 1099 huge_page_order(h));
1da177e4 1100 if (page) {
7f2e9525 1101 if (arch_prepare_hugepage(page)) {
caff3a2c 1102 __free_pages(page, huge_page_order(h));
7b8ee84d 1103 return NULL;
7f2e9525 1104 }
a5516438 1105 prep_new_huge_page(h, page, nid);
1da177e4 1106 }
63b4613c
NA
1107
1108 return page;
1109}
1110
b2261026
JK
1111static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1112{
1113 struct page *page;
1114 int nr_nodes, node;
1115 int ret = 0;
1116
1117 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1118 page = alloc_fresh_huge_page_node(h, node);
1119 if (page) {
1120 ret = 1;
1121 break;
1122 }
1123 }
1124
1125 if (ret)
1126 count_vm_event(HTLB_BUDDY_PGALLOC);
1127 else
1128 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1129
1130 return ret;
1131}
1132
e8c5c824
LS
1133/*
1134 * Free huge page from pool from next node to free.
1135 * Attempt to keep persistent huge pages more or less
1136 * balanced over allowed nodes.
1137 * Called with hugetlb_lock locked.
1138 */
6ae11b27
LS
1139static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1140 bool acct_surplus)
e8c5c824 1141{
b2261026 1142 int nr_nodes, node;
e8c5c824
LS
1143 int ret = 0;
1144
b2261026 1145 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
685f3457
LS
1146 /*
1147 * If we're returning unused surplus pages, only examine
1148 * nodes with surplus pages.
1149 */
b2261026
JK
1150 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1151 !list_empty(&h->hugepage_freelists[node])) {
e8c5c824 1152 struct page *page =
b2261026 1153 list_entry(h->hugepage_freelists[node].next,
e8c5c824
LS
1154 struct page, lru);
1155 list_del(&page->lru);
1156 h->free_huge_pages--;
b2261026 1157 h->free_huge_pages_node[node]--;
685f3457
LS
1158 if (acct_surplus) {
1159 h->surplus_huge_pages--;
b2261026 1160 h->surplus_huge_pages_node[node]--;
685f3457 1161 }
e8c5c824
LS
1162 update_and_free_page(h, page);
1163 ret = 1;
9a76db09 1164 break;
e8c5c824 1165 }
b2261026 1166 }
e8c5c824
LS
1167
1168 return ret;
1169}
1170
c8721bbb
NH
1171/*
1172 * Dissolve a given free hugepage into free buddy pages. This function does
1173 * nothing for in-use (including surplus) hugepages.
1174 */
1175static void dissolve_free_huge_page(struct page *page)
1176{
1177 spin_lock(&hugetlb_lock);
1178 if (PageHuge(page) && !page_count(page)) {
1179 struct hstate *h = page_hstate(page);
1180 int nid = page_to_nid(page);
1181 list_del(&page->lru);
1182 h->free_huge_pages--;
1183 h->free_huge_pages_node[nid]--;
1184 update_and_free_page(h, page);
1185 }
1186 spin_unlock(&hugetlb_lock);
1187}
1188
1189/*
1190 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1191 * make specified memory blocks removable from the system.
1192 * Note that start_pfn should aligned with (minimum) hugepage size.
1193 */
1194void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1195{
c8721bbb 1196 unsigned long pfn;
c8721bbb 1197
d0177639
LZ
1198 if (!hugepages_supported())
1199 return;
1200
641844f5
NH
1201 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1202 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
c8721bbb
NH
1203 dissolve_free_huge_page(pfn_to_page(pfn));
1204}
1205
bf50bab2 1206static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
7893d1d5
AL
1207{
1208 struct page *page;
bf50bab2 1209 unsigned int r_nid;
7893d1d5 1210
bae7f4ae 1211 if (hstate_is_gigantic(h))
aa888a74
AK
1212 return NULL;
1213
d1c3fb1f
NA
1214 /*
1215 * Assume we will successfully allocate the surplus page to
1216 * prevent racing processes from causing the surplus to exceed
1217 * overcommit
1218 *
1219 * This however introduces a different race, where a process B
1220 * tries to grow the static hugepage pool while alloc_pages() is
1221 * called by process A. B will only examine the per-node
1222 * counters in determining if surplus huge pages can be
1223 * converted to normal huge pages in adjust_pool_surplus(). A
1224 * won't be able to increment the per-node counter, until the
1225 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1226 * no more huge pages can be converted from surplus to normal
1227 * state (and doesn't try to convert again). Thus, we have a
1228 * case where a surplus huge page exists, the pool is grown, and
1229 * the surplus huge page still exists after, even though it
1230 * should just have been converted to a normal huge page. This
1231 * does not leak memory, though, as the hugepage will be freed
1232 * once it is out of use. It also does not allow the counters to
1233 * go out of whack in adjust_pool_surplus() as we don't modify
1234 * the node values until we've gotten the hugepage and only the
1235 * per-node value is checked there.
1236 */
1237 spin_lock(&hugetlb_lock);
a5516438 1238 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
d1c3fb1f
NA
1239 spin_unlock(&hugetlb_lock);
1240 return NULL;
1241 } else {
a5516438
AK
1242 h->nr_huge_pages++;
1243 h->surplus_huge_pages++;
d1c3fb1f
NA
1244 }
1245 spin_unlock(&hugetlb_lock);
1246
bf50bab2 1247 if (nid == NUMA_NO_NODE)
86cdb465 1248 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
bf50bab2
NH
1249 __GFP_REPEAT|__GFP_NOWARN,
1250 huge_page_order(h));
1251 else
1252 page = alloc_pages_exact_node(nid,
86cdb465 1253 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
bf50bab2 1254 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
d1c3fb1f 1255
caff3a2c
GS
1256 if (page && arch_prepare_hugepage(page)) {
1257 __free_pages(page, huge_page_order(h));
ea5768c7 1258 page = NULL;
caff3a2c
GS
1259 }
1260
d1c3fb1f 1261 spin_lock(&hugetlb_lock);
7893d1d5 1262 if (page) {
0edaecfa 1263 INIT_LIST_HEAD(&page->lru);
bf50bab2 1264 r_nid = page_to_nid(page);
7893d1d5 1265 set_compound_page_dtor(page, free_huge_page);
9dd540e2 1266 set_hugetlb_cgroup(page, NULL);
d1c3fb1f
NA
1267 /*
1268 * We incremented the global counters already
1269 */
bf50bab2
NH
1270 h->nr_huge_pages_node[r_nid]++;
1271 h->surplus_huge_pages_node[r_nid]++;
3b116300 1272 __count_vm_event(HTLB_BUDDY_PGALLOC);
d1c3fb1f 1273 } else {
a5516438
AK
1274 h->nr_huge_pages--;
1275 h->surplus_huge_pages--;
3b116300 1276 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
7893d1d5 1277 }
d1c3fb1f 1278 spin_unlock(&hugetlb_lock);
7893d1d5
AL
1279
1280 return page;
1281}
1282
bf50bab2
NH
1283/*
1284 * This allocation function is useful in the context where vma is irrelevant.
1285 * E.g. soft-offlining uses this function because it only cares physical
1286 * address of error page.
1287 */
1288struct page *alloc_huge_page_node(struct hstate *h, int nid)
1289{
4ef91848 1290 struct page *page = NULL;
bf50bab2
NH
1291
1292 spin_lock(&hugetlb_lock);
4ef91848
JK
1293 if (h->free_huge_pages - h->resv_huge_pages > 0)
1294 page = dequeue_huge_page_node(h, nid);
bf50bab2
NH
1295 spin_unlock(&hugetlb_lock);
1296
94ae8ba7 1297 if (!page)
bf50bab2
NH
1298 page = alloc_buddy_huge_page(h, nid);
1299
1300 return page;
1301}
1302
e4e574b7 1303/*
25985edc 1304 * Increase the hugetlb pool such that it can accommodate a reservation
e4e574b7
AL
1305 * of size 'delta'.
1306 */
a5516438 1307static int gather_surplus_pages(struct hstate *h, int delta)
e4e574b7
AL
1308{
1309 struct list_head surplus_list;
1310 struct page *page, *tmp;
1311 int ret, i;
1312 int needed, allocated;
28073b02 1313 bool alloc_ok = true;
e4e574b7 1314
a5516438 1315 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
ac09b3a1 1316 if (needed <= 0) {
a5516438 1317 h->resv_huge_pages += delta;
e4e574b7 1318 return 0;
ac09b3a1 1319 }
e4e574b7
AL
1320
1321 allocated = 0;
1322 INIT_LIST_HEAD(&surplus_list);
1323
1324 ret = -ENOMEM;
1325retry:
1326 spin_unlock(&hugetlb_lock);
1327 for (i = 0; i < needed; i++) {
bf50bab2 1328 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
28073b02
HD
1329 if (!page) {
1330 alloc_ok = false;
1331 break;
1332 }
e4e574b7
AL
1333 list_add(&page->lru, &surplus_list);
1334 }
28073b02 1335 allocated += i;
e4e574b7
AL
1336
1337 /*
1338 * After retaking hugetlb_lock, we need to recalculate 'needed'
1339 * because either resv_huge_pages or free_huge_pages may have changed.
1340 */
1341 spin_lock(&hugetlb_lock);
a5516438
AK
1342 needed = (h->resv_huge_pages + delta) -
1343 (h->free_huge_pages + allocated);
28073b02
HD
1344 if (needed > 0) {
1345 if (alloc_ok)
1346 goto retry;
1347 /*
1348 * We were not able to allocate enough pages to
1349 * satisfy the entire reservation so we free what
1350 * we've allocated so far.
1351 */
1352 goto free;
1353 }
e4e574b7
AL
1354 /*
1355 * The surplus_list now contains _at_least_ the number of extra pages
25985edc 1356 * needed to accommodate the reservation. Add the appropriate number
e4e574b7 1357 * of pages to the hugetlb pool and free the extras back to the buddy
ac09b3a1
AL
1358 * allocator. Commit the entire reservation here to prevent another
1359 * process from stealing the pages as they are added to the pool but
1360 * before they are reserved.
e4e574b7
AL
1361 */
1362 needed += allocated;
a5516438 1363 h->resv_huge_pages += delta;
e4e574b7 1364 ret = 0;
a9869b83 1365
19fc3f0a 1366 /* Free the needed pages to the hugetlb pool */
e4e574b7 1367 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
19fc3f0a
AL
1368 if ((--needed) < 0)
1369 break;
a9869b83
NH
1370 /*
1371 * This page is now managed by the hugetlb allocator and has
1372 * no users -- drop the buddy allocator's reference.
1373 */
1374 put_page_testzero(page);
309381fe 1375 VM_BUG_ON_PAGE(page_count(page), page);
a5516438 1376 enqueue_huge_page(h, page);
19fc3f0a 1377 }
28073b02 1378free:
b0365c8d 1379 spin_unlock(&hugetlb_lock);
19fc3f0a
AL
1380
1381 /* Free unnecessary surplus pages to the buddy allocator */
c0d934ba
JK
1382 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1383 put_page(page);
a9869b83 1384 spin_lock(&hugetlb_lock);
e4e574b7
AL
1385
1386 return ret;
1387}
1388
1389/*
1390 * When releasing a hugetlb pool reservation, any surplus pages that were
1391 * allocated to satisfy the reservation must be explicitly freed if they were
1392 * never used.
685f3457 1393 * Called with hugetlb_lock held.
e4e574b7 1394 */
a5516438
AK
1395static void return_unused_surplus_pages(struct hstate *h,
1396 unsigned long unused_resv_pages)
e4e574b7 1397{
e4e574b7
AL
1398 unsigned long nr_pages;
1399
ac09b3a1 1400 /* Uncommit the reservation */
a5516438 1401 h->resv_huge_pages -= unused_resv_pages;
ac09b3a1 1402
aa888a74 1403 /* Cannot return gigantic pages currently */
bae7f4ae 1404 if (hstate_is_gigantic(h))
aa888a74
AK
1405 return;
1406
a5516438 1407 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
e4e574b7 1408
685f3457
LS
1409 /*
1410 * We want to release as many surplus pages as possible, spread
9b5e5d0f
LS
1411 * evenly across all nodes with memory. Iterate across these nodes
1412 * until we can no longer free unreserved surplus pages. This occurs
1413 * when the nodes with surplus pages have no free pages.
1414 * free_pool_huge_page() will balance the the freed pages across the
1415 * on-line nodes with memory and will handle the hstate accounting.
685f3457
LS
1416 */
1417 while (nr_pages--) {
8cebfcd0 1418 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
685f3457 1419 break;
7848a4bf 1420 cond_resched_lock(&hugetlb_lock);
e4e574b7
AL
1421 }
1422}
1423
c37f9fb1
AW
1424/*
1425 * Determine if the huge page at addr within the vma has an associated
1426 * reservation. Where it does not we will need to logically increase
90481622
DG
1427 * reservation and actually increase subpool usage before an allocation
1428 * can occur. Where any new reservation would be required the
1429 * reservation change is prepared, but not committed. Once the page
1430 * has been allocated from the subpool and instantiated the change should
1431 * be committed via vma_commit_reservation. No action is required on
1432 * failure.
c37f9fb1 1433 */
e2f17d94 1434static long vma_needs_reservation(struct hstate *h,
a5516438 1435 struct vm_area_struct *vma, unsigned long addr)
c37f9fb1 1436{
4e35f483
JK
1437 struct resv_map *resv;
1438 pgoff_t idx;
1439 long chg;
c37f9fb1 1440
4e35f483
JK
1441 resv = vma_resv_map(vma);
1442 if (!resv)
84afd99b 1443 return 1;
c37f9fb1 1444
4e35f483
JK
1445 idx = vma_hugecache_offset(h, vma, addr);
1446 chg = region_chg(resv, idx, idx + 1);
84afd99b 1447
4e35f483
JK
1448 if (vma->vm_flags & VM_MAYSHARE)
1449 return chg;
1450 else
1451 return chg < 0 ? chg : 0;
c37f9fb1 1452}
a5516438
AK
1453static void vma_commit_reservation(struct hstate *h,
1454 struct vm_area_struct *vma, unsigned long addr)
c37f9fb1 1455{
4e35f483
JK
1456 struct resv_map *resv;
1457 pgoff_t idx;
84afd99b 1458
4e35f483
JK
1459 resv = vma_resv_map(vma);
1460 if (!resv)
1461 return;
84afd99b 1462
4e35f483
JK
1463 idx = vma_hugecache_offset(h, vma, addr);
1464 region_add(resv, idx, idx + 1);
c37f9fb1
AW
1465}
1466
a1e78772 1467static struct page *alloc_huge_page(struct vm_area_struct *vma,
04f2cbe3 1468 unsigned long addr, int avoid_reserve)
1da177e4 1469{
90481622 1470 struct hugepage_subpool *spool = subpool_vma(vma);
a5516438 1471 struct hstate *h = hstate_vma(vma);
348ea204 1472 struct page *page;
e2f17d94 1473 long chg;
6d76dcf4
AK
1474 int ret, idx;
1475 struct hugetlb_cgroup *h_cg;
a1e78772 1476
6d76dcf4 1477 idx = hstate_index(h);
a1e78772 1478 /*
90481622
DG
1479 * Processes that did not create the mapping will have no
1480 * reserves and will not have accounted against subpool
1481 * limit. Check that the subpool limit can be made before
1482 * satisfying the allocation MAP_NORESERVE mappings may also
1483 * need pages and subpool limit allocated allocated if no reserve
1484 * mapping overlaps.
a1e78772 1485 */
a5516438 1486 chg = vma_needs_reservation(h, vma, addr);
c37f9fb1 1487 if (chg < 0)
76dcee75 1488 return ERR_PTR(-ENOMEM);
8bb3f12e 1489 if (chg || avoid_reserve)
1c5ecae3 1490 if (hugepage_subpool_get_pages(spool, 1) < 0)
76dcee75 1491 return ERR_PTR(-ENOSPC);
1da177e4 1492
6d76dcf4 1493 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
8f34af6f
JZ
1494 if (ret)
1495 goto out_subpool_put;
1496
1da177e4 1497 spin_lock(&hugetlb_lock);
af0ed73e 1498 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
81a6fcae 1499 if (!page) {
94ae8ba7 1500 spin_unlock(&hugetlb_lock);
bf50bab2 1501 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
8f34af6f
JZ
1502 if (!page)
1503 goto out_uncharge_cgroup;
1504
79dbb236
AK
1505 spin_lock(&hugetlb_lock);
1506 list_move(&page->lru, &h->hugepage_activelist);
81a6fcae 1507 /* Fall through */
68842c9b 1508 }
81a6fcae
JK
1509 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1510 spin_unlock(&hugetlb_lock);
348ea204 1511
90481622 1512 set_page_private(page, (unsigned long)spool);
90d8b7e6 1513
a5516438 1514 vma_commit_reservation(h, vma, addr);
90d8b7e6 1515 return page;
8f34af6f
JZ
1516
1517out_uncharge_cgroup:
1518 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1519out_subpool_put:
1520 if (chg || avoid_reserve)
1521 hugepage_subpool_put_pages(spool, 1);
1522 return ERR_PTR(-ENOSPC);
b45b5bd6
DG
1523}
1524
74060e4d
NH
1525/*
1526 * alloc_huge_page()'s wrapper which simply returns the page if allocation
1527 * succeeds, otherwise NULL. This function is called from new_vma_page(),
1528 * where no ERR_VALUE is expected to be returned.
1529 */
1530struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1531 unsigned long addr, int avoid_reserve)
1532{
1533 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1534 if (IS_ERR(page))
1535 page = NULL;
1536 return page;
1537}
1538
91f47662 1539int __weak alloc_bootmem_huge_page(struct hstate *h)
aa888a74
AK
1540{
1541 struct huge_bootmem_page *m;
b2261026 1542 int nr_nodes, node;
aa888a74 1543
b2261026 1544 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
aa888a74
AK
1545 void *addr;
1546
8b89a116
GS
1547 addr = memblock_virt_alloc_try_nid_nopanic(
1548 huge_page_size(h), huge_page_size(h),
1549 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
aa888a74
AK
1550 if (addr) {
1551 /*
1552 * Use the beginning of the huge page to store the
1553 * huge_bootmem_page struct (until gather_bootmem
1554 * puts them into the mem_map).
1555 */
1556 m = addr;
91f47662 1557 goto found;
aa888a74 1558 }
aa888a74
AK
1559 }
1560 return 0;
1561
1562found:
df994ead 1563 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
aa888a74
AK
1564 /* Put them into a private list first because mem_map is not up yet */
1565 list_add(&m->list, &huge_boot_pages);
1566 m->hstate = h;
1567 return 1;
1568}
1569
f412c97a 1570static void __init prep_compound_huge_page(struct page *page, int order)
18229df5
AW
1571{
1572 if (unlikely(order > (MAX_ORDER - 1)))
1573 prep_compound_gigantic_page(page, order);
1574 else
1575 prep_compound_page(page, order);
1576}
1577
aa888a74
AK
1578/* Put bootmem huge pages into the standard lists after mem_map is up */
1579static void __init gather_bootmem_prealloc(void)
1580{
1581 struct huge_bootmem_page *m;
1582
1583 list_for_each_entry(m, &huge_boot_pages, list) {
aa888a74 1584 struct hstate *h = m->hstate;
ee8f248d
BB
1585 struct page *page;
1586
1587#ifdef CONFIG_HIGHMEM
1588 page = pfn_to_page(m->phys >> PAGE_SHIFT);
8b89a116
GS
1589 memblock_free_late(__pa(m),
1590 sizeof(struct huge_bootmem_page));
ee8f248d
BB
1591#else
1592 page = virt_to_page(m);
1593#endif
aa888a74 1594 WARN_ON(page_count(page) != 1);
18229df5 1595 prep_compound_huge_page(page, h->order);
ef5a22be 1596 WARN_ON(PageReserved(page));
aa888a74 1597 prep_new_huge_page(h, page, page_to_nid(page));
b0320c7b
RA
1598 /*
1599 * If we had gigantic hugepages allocated at boot time, we need
1600 * to restore the 'stolen' pages to totalram_pages in order to
1601 * fix confusing memory reports from free(1) and another
1602 * side-effects, like CommitLimit going negative.
1603 */
bae7f4ae 1604 if (hstate_is_gigantic(h))
3dcc0571 1605 adjust_managed_page_count(page, 1 << h->order);
aa888a74
AK
1606 }
1607}
1608
8faa8b07 1609static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1da177e4
LT
1610{
1611 unsigned long i;
a5516438 1612
e5ff2159 1613 for (i = 0; i < h->max_huge_pages; ++i) {
bae7f4ae 1614 if (hstate_is_gigantic(h)) {
aa888a74
AK
1615 if (!alloc_bootmem_huge_page(h))
1616 break;
9b5e5d0f 1617 } else if (!alloc_fresh_huge_page(h,
8cebfcd0 1618 &node_states[N_MEMORY]))
1da177e4 1619 break;
1da177e4 1620 }
8faa8b07 1621 h->max_huge_pages = i;
e5ff2159
AK
1622}
1623
1624static void __init hugetlb_init_hstates(void)
1625{
1626 struct hstate *h;
1627
1628 for_each_hstate(h) {
641844f5
NH
1629 if (minimum_order > huge_page_order(h))
1630 minimum_order = huge_page_order(h);
1631
8faa8b07 1632 /* oversize hugepages were init'ed in early boot */
bae7f4ae 1633 if (!hstate_is_gigantic(h))
8faa8b07 1634 hugetlb_hstate_alloc_pages(h);
e5ff2159 1635 }
641844f5 1636 VM_BUG_ON(minimum_order == UINT_MAX);
e5ff2159
AK
1637}
1638
4abd32db
AK
1639static char * __init memfmt(char *buf, unsigned long n)
1640{
1641 if (n >= (1UL << 30))
1642 sprintf(buf, "%lu GB", n >> 30);
1643 else if (n >= (1UL << 20))
1644 sprintf(buf, "%lu MB", n >> 20);
1645 else
1646 sprintf(buf, "%lu KB", n >> 10);
1647 return buf;
1648}
1649
e5ff2159
AK
1650static void __init report_hugepages(void)
1651{
1652 struct hstate *h;
1653
1654 for_each_hstate(h) {
4abd32db 1655 char buf[32];
ffb22af5 1656 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
4abd32db
AK
1657 memfmt(buf, huge_page_size(h)),
1658 h->free_huge_pages);
e5ff2159
AK
1659 }
1660}
1661
1da177e4 1662#ifdef CONFIG_HIGHMEM
6ae11b27
LS
1663static void try_to_free_low(struct hstate *h, unsigned long count,
1664 nodemask_t *nodes_allowed)
1da177e4 1665{
4415cc8d
CL
1666 int i;
1667
bae7f4ae 1668 if (hstate_is_gigantic(h))
aa888a74
AK
1669 return;
1670
6ae11b27 1671 for_each_node_mask(i, *nodes_allowed) {
1da177e4 1672 struct page *page, *next;
a5516438
AK
1673 struct list_head *freel = &h->hugepage_freelists[i];
1674 list_for_each_entry_safe(page, next, freel, lru) {
1675 if (count >= h->nr_huge_pages)
6b0c880d 1676 return;
1da177e4
LT
1677 if (PageHighMem(page))
1678 continue;
1679 list_del(&page->lru);
e5ff2159 1680 update_and_free_page(h, page);
a5516438
AK
1681 h->free_huge_pages--;
1682 h->free_huge_pages_node[page_to_nid(page)]--;
1da177e4
LT
1683 }
1684 }
1685}
1686#else
6ae11b27
LS
1687static inline void try_to_free_low(struct hstate *h, unsigned long count,
1688 nodemask_t *nodes_allowed)
1da177e4
LT
1689{
1690}
1691#endif
1692
20a0307c
WF
1693/*
1694 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1695 * balanced by operating on them in a round-robin fashion.
1696 * Returns 1 if an adjustment was made.
1697 */
6ae11b27
LS
1698static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1699 int delta)
20a0307c 1700{
b2261026 1701 int nr_nodes, node;
20a0307c
WF
1702
1703 VM_BUG_ON(delta != -1 && delta != 1);
20a0307c 1704
b2261026
JK
1705 if (delta < 0) {
1706 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1707 if (h->surplus_huge_pages_node[node])
1708 goto found;
e8c5c824 1709 }
b2261026
JK
1710 } else {
1711 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1712 if (h->surplus_huge_pages_node[node] <
1713 h->nr_huge_pages_node[node])
1714 goto found;
e8c5c824 1715 }
b2261026
JK
1716 }
1717 return 0;
20a0307c 1718
b2261026
JK
1719found:
1720 h->surplus_huge_pages += delta;
1721 h->surplus_huge_pages_node[node] += delta;
1722 return 1;
20a0307c
WF
1723}
1724
a5516438 1725#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
6ae11b27
LS
1726static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1727 nodemask_t *nodes_allowed)
1da177e4 1728{
7893d1d5 1729 unsigned long min_count, ret;
1da177e4 1730
944d9fec 1731 if (hstate_is_gigantic(h) && !gigantic_page_supported())
aa888a74
AK
1732 return h->max_huge_pages;
1733
7893d1d5
AL
1734 /*
1735 * Increase the pool size
1736 * First take pages out of surplus state. Then make up the
1737 * remaining difference by allocating fresh huge pages.
d1c3fb1f
NA
1738 *
1739 * We might race with alloc_buddy_huge_page() here and be unable
1740 * to convert a surplus huge page to a normal huge page. That is
1741 * not critical, though, it just means the overall size of the
1742 * pool might be one hugepage larger than it needs to be, but
1743 * within all the constraints specified by the sysctls.
7893d1d5 1744 */
1da177e4 1745 spin_lock(&hugetlb_lock);
a5516438 1746 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
6ae11b27 1747 if (!adjust_pool_surplus(h, nodes_allowed, -1))
7893d1d5
AL
1748 break;
1749 }
1750
a5516438 1751 while (count > persistent_huge_pages(h)) {
7893d1d5
AL
1752 /*
1753 * If this allocation races such that we no longer need the
1754 * page, free_huge_page will handle it by freeing the page
1755 * and reducing the surplus.
1756 */
1757 spin_unlock(&hugetlb_lock);
944d9fec
LC
1758 if (hstate_is_gigantic(h))
1759 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
1760 else
1761 ret = alloc_fresh_huge_page(h, nodes_allowed);
7893d1d5
AL
1762 spin_lock(&hugetlb_lock);
1763 if (!ret)
1764 goto out;
1765
536240f2
MG
1766 /* Bail for signals. Probably ctrl-c from user */
1767 if (signal_pending(current))
1768 goto out;
7893d1d5 1769 }
7893d1d5
AL
1770
1771 /*
1772 * Decrease the pool size
1773 * First return free pages to the buddy allocator (being careful
1774 * to keep enough around to satisfy reservations). Then place
1775 * pages into surplus state as needed so the pool will shrink
1776 * to the desired size as pages become free.
d1c3fb1f
NA
1777 *
1778 * By placing pages into the surplus state independent of the
1779 * overcommit value, we are allowing the surplus pool size to
1780 * exceed overcommit. There are few sane options here. Since
1781 * alloc_buddy_huge_page() is checking the global counter,
1782 * though, we'll note that we're not allowed to exceed surplus
1783 * and won't grow the pool anywhere else. Not until one of the
1784 * sysctls are changed, or the surplus pages go out of use.
7893d1d5 1785 */
a5516438 1786 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
6b0c880d 1787 min_count = max(count, min_count);
6ae11b27 1788 try_to_free_low(h, min_count, nodes_allowed);
a5516438 1789 while (min_count < persistent_huge_pages(h)) {
6ae11b27 1790 if (!free_pool_huge_page(h, nodes_allowed, 0))
1da177e4 1791 break;
55f67141 1792 cond_resched_lock(&hugetlb_lock);
1da177e4 1793 }
a5516438 1794 while (count < persistent_huge_pages(h)) {
6ae11b27 1795 if (!adjust_pool_surplus(h, nodes_allowed, 1))
7893d1d5
AL
1796 break;
1797 }
1798out:
a5516438 1799 ret = persistent_huge_pages(h);
1da177e4 1800 spin_unlock(&hugetlb_lock);
7893d1d5 1801 return ret;
1da177e4
LT
1802}
1803
a3437870
NA
1804#define HSTATE_ATTR_RO(_name) \
1805 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1806
1807#define HSTATE_ATTR(_name) \
1808 static struct kobj_attribute _name##_attr = \
1809 __ATTR(_name, 0644, _name##_show, _name##_store)
1810
1811static struct kobject *hugepages_kobj;
1812static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1813
9a305230
LS
1814static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1815
1816static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
a3437870
NA
1817{
1818 int i;
9a305230 1819
a3437870 1820 for (i = 0; i < HUGE_MAX_HSTATE; i++)
9a305230
LS
1821 if (hstate_kobjs[i] == kobj) {
1822 if (nidp)
1823 *nidp = NUMA_NO_NODE;
a3437870 1824 return &hstates[i];
9a305230
LS
1825 }
1826
1827 return kobj_to_node_hstate(kobj, nidp);
a3437870
NA
1828}
1829
06808b08 1830static ssize_t nr_hugepages_show_common(struct kobject *kobj,
a3437870
NA
1831 struct kobj_attribute *attr, char *buf)
1832{
9a305230
LS
1833 struct hstate *h;
1834 unsigned long nr_huge_pages;
1835 int nid;
1836
1837 h = kobj_to_hstate(kobj, &nid);
1838 if (nid == NUMA_NO_NODE)
1839 nr_huge_pages = h->nr_huge_pages;
1840 else
1841 nr_huge_pages = h->nr_huge_pages_node[nid];
1842
1843 return sprintf(buf, "%lu\n", nr_huge_pages);
a3437870 1844}
adbe8726 1845
238d3c13
DR
1846static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
1847 struct hstate *h, int nid,
1848 unsigned long count, size_t len)
a3437870
NA
1849{
1850 int err;
bad44b5b 1851 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
a3437870 1852
944d9fec 1853 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
adbe8726
EM
1854 err = -EINVAL;
1855 goto out;
1856 }
1857
9a305230
LS
1858 if (nid == NUMA_NO_NODE) {
1859 /*
1860 * global hstate attribute
1861 */
1862 if (!(obey_mempolicy &&
1863 init_nodemask_of_mempolicy(nodes_allowed))) {
1864 NODEMASK_FREE(nodes_allowed);
8cebfcd0 1865 nodes_allowed = &node_states[N_MEMORY];
9a305230
LS
1866 }
1867 } else if (nodes_allowed) {
1868 /*
1869 * per node hstate attribute: adjust count to global,
1870 * but restrict alloc/free to the specified node.
1871 */
1872 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1873 init_nodemask_of_node(nodes_allowed, nid);
1874 } else
8cebfcd0 1875 nodes_allowed = &node_states[N_MEMORY];
9a305230 1876
06808b08 1877 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
a3437870 1878
8cebfcd0 1879 if (nodes_allowed != &node_states[N_MEMORY])
06808b08
LS
1880 NODEMASK_FREE(nodes_allowed);
1881
1882 return len;
adbe8726
EM
1883out:
1884 NODEMASK_FREE(nodes_allowed);
1885 return err;
06808b08
LS
1886}
1887
238d3c13
DR
1888static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1889 struct kobject *kobj, const char *buf,
1890 size_t len)
1891{
1892 struct hstate *h;
1893 unsigned long count;
1894 int nid;
1895 int err;
1896
1897 err = kstrtoul(buf, 10, &count);
1898 if (err)
1899 return err;
1900
1901 h = kobj_to_hstate(kobj, &nid);
1902 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1903}
1904
06808b08
LS
1905static ssize_t nr_hugepages_show(struct kobject *kobj,
1906 struct kobj_attribute *attr, char *buf)
1907{
1908 return nr_hugepages_show_common(kobj, attr, buf);
1909}
1910
1911static ssize_t nr_hugepages_store(struct kobject *kobj,
1912 struct kobj_attribute *attr, const char *buf, size_t len)
1913{
238d3c13 1914 return nr_hugepages_store_common(false, kobj, buf, len);
a3437870
NA
1915}
1916HSTATE_ATTR(nr_hugepages);
1917
06808b08
LS
1918#ifdef CONFIG_NUMA
1919
1920/*
1921 * hstate attribute for optionally mempolicy-based constraint on persistent
1922 * huge page alloc/free.
1923 */
1924static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1925 struct kobj_attribute *attr, char *buf)
1926{
1927 return nr_hugepages_show_common(kobj, attr, buf);
1928}
1929
1930static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1931 struct kobj_attribute *attr, const char *buf, size_t len)
1932{
238d3c13 1933 return nr_hugepages_store_common(true, kobj, buf, len);
06808b08
LS
1934}
1935HSTATE_ATTR(nr_hugepages_mempolicy);
1936#endif
1937
1938
a3437870
NA
1939static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1940 struct kobj_attribute *attr, char *buf)
1941{
9a305230 1942 struct hstate *h = kobj_to_hstate(kobj, NULL);
a3437870
NA
1943 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1944}
adbe8726 1945
a3437870
NA
1946static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1947 struct kobj_attribute *attr, const char *buf, size_t count)
1948{
1949 int err;
1950 unsigned long input;
9a305230 1951 struct hstate *h = kobj_to_hstate(kobj, NULL);
a3437870 1952
bae7f4ae 1953 if (hstate_is_gigantic(h))
adbe8726
EM
1954 return -EINVAL;
1955
3dbb95f7 1956 err = kstrtoul(buf, 10, &input);
a3437870 1957 if (err)
73ae31e5 1958 return err;
a3437870
NA
1959
1960 spin_lock(&hugetlb_lock);
1961 h->nr_overcommit_huge_pages = input;
1962 spin_unlock(&hugetlb_lock);
1963
1964 return count;
1965}
1966HSTATE_ATTR(nr_overcommit_hugepages);
1967
1968static ssize_t free_hugepages_show(struct kobject *kobj,
1969 struct kobj_attribute *attr, char *buf)
1970{
9a305230
LS
1971 struct hstate *h;
1972 unsigned long free_huge_pages;
1973 int nid;
1974
1975 h = kobj_to_hstate(kobj, &nid);
1976 if (nid == NUMA_NO_NODE)
1977 free_huge_pages = h->free_huge_pages;
1978 else
1979 free_huge_pages = h->free_huge_pages_node[nid];
1980
1981 return sprintf(buf, "%lu\n", free_huge_pages);
a3437870
NA
1982}
1983HSTATE_ATTR_RO(free_hugepages);
1984
1985static ssize_t resv_hugepages_show(struct kobject *kobj,
1986 struct kobj_attribute *attr, char *buf)
1987{
9a305230 1988 struct hstate *h = kobj_to_hstate(kobj, NULL);
a3437870
NA
1989 return sprintf(buf, "%lu\n", h->resv_huge_pages);
1990}
1991HSTATE_ATTR_RO(resv_hugepages);
1992
1993static ssize_t surplus_hugepages_show(struct kobject *kobj,
1994 struct kobj_attribute *attr, char *buf)
1995{
9a305230
LS
1996 struct hstate *h;
1997 unsigned long surplus_huge_pages;
1998 int nid;
1999
2000 h = kobj_to_hstate(kobj, &nid);
2001 if (nid == NUMA_NO_NODE)
2002 surplus_huge_pages = h->surplus_huge_pages;
2003 else
2004 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2005
2006 return sprintf(buf, "%lu\n", surplus_huge_pages);
a3437870
NA
2007}
2008HSTATE_ATTR_RO(surplus_hugepages);
2009
2010static struct attribute *hstate_attrs[] = {
2011 &nr_hugepages_attr.attr,
2012 &nr_overcommit_hugepages_attr.attr,
2013 &free_hugepages_attr.attr,
2014 &resv_hugepages_attr.attr,
2015 &surplus_hugepages_attr.attr,
06808b08
LS
2016#ifdef CONFIG_NUMA
2017 &nr_hugepages_mempolicy_attr.attr,
2018#endif
a3437870
NA
2019 NULL,
2020};
2021
2022static struct attribute_group hstate_attr_group = {
2023 .attrs = hstate_attrs,
2024};
2025
094e9539
JM
2026static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2027 struct kobject **hstate_kobjs,
2028 struct attribute_group *hstate_attr_group)
a3437870
NA
2029{
2030 int retval;
972dc4de 2031 int hi = hstate_index(h);
a3437870 2032
9a305230
LS
2033 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2034 if (!hstate_kobjs[hi])
a3437870
NA
2035 return -ENOMEM;
2036
9a305230 2037 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
a3437870 2038 if (retval)
9a305230 2039 kobject_put(hstate_kobjs[hi]);
a3437870
NA
2040
2041 return retval;
2042}
2043
2044static void __init hugetlb_sysfs_init(void)
2045{
2046 struct hstate *h;
2047 int err;
2048
2049 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2050 if (!hugepages_kobj)
2051 return;
2052
2053 for_each_hstate(h) {
9a305230
LS
2054 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2055 hstate_kobjs, &hstate_attr_group);
a3437870 2056 if (err)
ffb22af5 2057 pr_err("Hugetlb: Unable to add hstate %s", h->name);
a3437870
NA
2058 }
2059}
2060
9a305230
LS
2061#ifdef CONFIG_NUMA
2062
2063/*
2064 * node_hstate/s - associate per node hstate attributes, via their kobjects,
10fbcf4c
KS
2065 * with node devices in node_devices[] using a parallel array. The array
2066 * index of a node device or _hstate == node id.
2067 * This is here to avoid any static dependency of the node device driver, in
9a305230
LS
2068 * the base kernel, on the hugetlb module.
2069 */
2070struct node_hstate {
2071 struct kobject *hugepages_kobj;
2072 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2073};
2074struct node_hstate node_hstates[MAX_NUMNODES];
2075
2076/*
10fbcf4c 2077 * A subset of global hstate attributes for node devices
9a305230
LS
2078 */
2079static struct attribute *per_node_hstate_attrs[] = {
2080 &nr_hugepages_attr.attr,
2081 &free_hugepages_attr.attr,
2082 &surplus_hugepages_attr.attr,
2083 NULL,
2084};
2085
2086static struct attribute_group per_node_hstate_attr_group = {
2087 .attrs = per_node_hstate_attrs,
2088};
2089
2090/*
10fbcf4c 2091 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
9a305230
LS
2092 * Returns node id via non-NULL nidp.
2093 */
2094static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2095{
2096 int nid;
2097
2098 for (nid = 0; nid < nr_node_ids; nid++) {
2099 struct node_hstate *nhs = &node_hstates[nid];
2100 int i;
2101 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2102 if (nhs->hstate_kobjs[i] == kobj) {
2103 if (nidp)
2104 *nidp = nid;
2105 return &hstates[i];
2106 }
2107 }
2108
2109 BUG();
2110 return NULL;
2111}
2112
2113/*
10fbcf4c 2114 * Unregister hstate attributes from a single node device.
9a305230
LS
2115 * No-op if no hstate attributes attached.
2116 */
3cd8b44f 2117static void hugetlb_unregister_node(struct node *node)
9a305230
LS
2118{
2119 struct hstate *h;
10fbcf4c 2120 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
2121
2122 if (!nhs->hugepages_kobj)
9b5e5d0f 2123 return; /* no hstate attributes */
9a305230 2124
972dc4de
AK
2125 for_each_hstate(h) {
2126 int idx = hstate_index(h);
2127 if (nhs->hstate_kobjs[idx]) {
2128 kobject_put(nhs->hstate_kobjs[idx]);
2129 nhs->hstate_kobjs[idx] = NULL;
9a305230 2130 }
972dc4de 2131 }
9a305230
LS
2132
2133 kobject_put(nhs->hugepages_kobj);
2134 nhs->hugepages_kobj = NULL;
2135}
2136
2137/*
10fbcf4c 2138 * hugetlb module exit: unregister hstate attributes from node devices
9a305230
LS
2139 * that have them.
2140 */
2141static void hugetlb_unregister_all_nodes(void)
2142{
2143 int nid;
2144
2145 /*
10fbcf4c 2146 * disable node device registrations.
9a305230
LS
2147 */
2148 register_hugetlbfs_with_node(NULL, NULL);
2149
2150 /*
2151 * remove hstate attributes from any nodes that have them.
2152 */
2153 for (nid = 0; nid < nr_node_ids; nid++)
8732794b 2154 hugetlb_unregister_node(node_devices[nid]);
9a305230
LS
2155}
2156
2157/*
10fbcf4c 2158 * Register hstate attributes for a single node device.
9a305230
LS
2159 * No-op if attributes already registered.
2160 */
3cd8b44f 2161static void hugetlb_register_node(struct node *node)
9a305230
LS
2162{
2163 struct hstate *h;
10fbcf4c 2164 struct node_hstate *nhs = &node_hstates[node->dev.id];
9a305230
LS
2165 int err;
2166
2167 if (nhs->hugepages_kobj)
2168 return; /* already allocated */
2169
2170 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
10fbcf4c 2171 &node->dev.kobj);
9a305230
LS
2172 if (!nhs->hugepages_kobj)
2173 return;
2174
2175 for_each_hstate(h) {
2176 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2177 nhs->hstate_kobjs,
2178 &per_node_hstate_attr_group);
2179 if (err) {
ffb22af5
AM
2180 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2181 h->name, node->dev.id);
9a305230
LS
2182 hugetlb_unregister_node(node);
2183 break;
2184 }
2185 }
2186}
2187
2188/*
9b5e5d0f 2189 * hugetlb init time: register hstate attributes for all registered node
10fbcf4c
KS
2190 * devices of nodes that have memory. All on-line nodes should have
2191 * registered their associated device by this time.
9a305230 2192 */
7d9ca000 2193static void __init hugetlb_register_all_nodes(void)
9a305230
LS
2194{
2195 int nid;
2196
8cebfcd0 2197 for_each_node_state(nid, N_MEMORY) {
8732794b 2198 struct node *node = node_devices[nid];
10fbcf4c 2199 if (node->dev.id == nid)
9a305230
LS
2200 hugetlb_register_node(node);
2201 }
2202
2203 /*
10fbcf4c 2204 * Let the node device driver know we're here so it can
9a305230
LS
2205 * [un]register hstate attributes on node hotplug.
2206 */
2207 register_hugetlbfs_with_node(hugetlb_register_node,
2208 hugetlb_unregister_node);
2209}
2210#else /* !CONFIG_NUMA */
2211
2212static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2213{
2214 BUG();
2215 if (nidp)
2216 *nidp = -1;
2217 return NULL;
2218}
2219
2220static void hugetlb_unregister_all_nodes(void) { }
2221
2222static void hugetlb_register_all_nodes(void) { }
2223
2224#endif
2225
a3437870
NA
2226static void __exit hugetlb_exit(void)
2227{
2228 struct hstate *h;
2229
9a305230
LS
2230 hugetlb_unregister_all_nodes();
2231
a3437870 2232 for_each_hstate(h) {
972dc4de 2233 kobject_put(hstate_kobjs[hstate_index(h)]);
a3437870
NA
2234 }
2235
2236 kobject_put(hugepages_kobj);
8382d914 2237 kfree(htlb_fault_mutex_table);
a3437870
NA
2238}
2239module_exit(hugetlb_exit);
2240
2241static int __init hugetlb_init(void)
2242{
8382d914
DB
2243 int i;
2244
457c1b27 2245 if (!hugepages_supported())
0ef89d25 2246 return 0;
a3437870 2247
e11bfbfc
NP
2248 if (!size_to_hstate(default_hstate_size)) {
2249 default_hstate_size = HPAGE_SIZE;
2250 if (!size_to_hstate(default_hstate_size))
2251 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
a3437870 2252 }
972dc4de 2253 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
e11bfbfc
NP
2254 if (default_hstate_max_huge_pages)
2255 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
a3437870
NA
2256
2257 hugetlb_init_hstates();
aa888a74 2258 gather_bootmem_prealloc();
a3437870
NA
2259 report_hugepages();
2260
2261 hugetlb_sysfs_init();
9a305230 2262 hugetlb_register_all_nodes();
7179e7bf 2263 hugetlb_cgroup_file_init();
9a305230 2264
8382d914
DB
2265#ifdef CONFIG_SMP
2266 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2267#else
2268 num_fault_mutexes = 1;
2269#endif
2270 htlb_fault_mutex_table =
2271 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2272 BUG_ON(!htlb_fault_mutex_table);
2273
2274 for (i = 0; i < num_fault_mutexes; i++)
2275 mutex_init(&htlb_fault_mutex_table[i]);
a3437870
NA
2276 return 0;
2277}
2278module_init(hugetlb_init);
2279
2280/* Should be called on processing a hugepagesz=... option */
2281void __init hugetlb_add_hstate(unsigned order)
2282{
2283 struct hstate *h;
8faa8b07
AK
2284 unsigned long i;
2285
a3437870 2286 if (size_to_hstate(PAGE_SIZE << order)) {
ffb22af5 2287 pr_warning("hugepagesz= specified twice, ignoring\n");
a3437870
NA
2288 return;
2289 }
47d38344 2290 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
a3437870 2291 BUG_ON(order == 0);
47d38344 2292 h = &hstates[hugetlb_max_hstate++];
a3437870
NA
2293 h->order = order;
2294 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
8faa8b07
AK
2295 h->nr_huge_pages = 0;
2296 h->free_huge_pages = 0;
2297 for (i = 0; i < MAX_NUMNODES; ++i)
2298 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
0edaecfa 2299 INIT_LIST_HEAD(&h->hugepage_activelist);
8cebfcd0
LJ
2300 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2301 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
a3437870
NA
2302 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2303 huge_page_size(h)/1024);
8faa8b07 2304
a3437870
NA
2305 parsed_hstate = h;
2306}
2307
e11bfbfc 2308static int __init hugetlb_nrpages_setup(char *s)
a3437870
NA
2309{
2310 unsigned long *mhp;
8faa8b07 2311 static unsigned long *last_mhp;
a3437870
NA
2312
2313 /*
47d38344 2314 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
a3437870
NA
2315 * so this hugepages= parameter goes to the "default hstate".
2316 */
47d38344 2317 if (!hugetlb_max_hstate)
a3437870
NA
2318 mhp = &default_hstate_max_huge_pages;
2319 else
2320 mhp = &parsed_hstate->max_huge_pages;
2321
8faa8b07 2322 if (mhp == last_mhp) {
ffb22af5
AM
2323 pr_warning("hugepages= specified twice without "
2324 "interleaving hugepagesz=, ignoring\n");
8faa8b07
AK
2325 return 1;
2326 }
2327
a3437870
NA
2328 if (sscanf(s, "%lu", mhp) <= 0)
2329 *mhp = 0;
2330
8faa8b07
AK
2331 /*
2332 * Global state is always initialized later in hugetlb_init.
2333 * But we need to allocate >= MAX_ORDER hstates here early to still
2334 * use the bootmem allocator.
2335 */
47d38344 2336 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
8faa8b07
AK
2337 hugetlb_hstate_alloc_pages(parsed_hstate);
2338
2339 last_mhp = mhp;
2340
a3437870
NA
2341 return 1;
2342}
e11bfbfc
NP
2343__setup("hugepages=", hugetlb_nrpages_setup);
2344
2345static int __init hugetlb_default_setup(char *s)
2346{
2347 default_hstate_size = memparse(s, &s);
2348 return 1;
2349}
2350__setup("default_hugepagesz=", hugetlb_default_setup);
a3437870 2351
8a213460
NA
2352static unsigned int cpuset_mems_nr(unsigned int *array)
2353{
2354 int node;
2355 unsigned int nr = 0;
2356
2357 for_each_node_mask(node, cpuset_current_mems_allowed)
2358 nr += array[node];
2359
2360 return nr;
2361}
2362
2363#ifdef CONFIG_SYSCTL
06808b08
LS
2364static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2365 struct ctl_table *table, int write,
2366 void __user *buffer, size_t *length, loff_t *ppos)
1da177e4 2367{
e5ff2159 2368 struct hstate *h = &default_hstate;
238d3c13 2369 unsigned long tmp = h->max_huge_pages;
08d4a246 2370 int ret;
e5ff2159 2371
457c1b27
NA
2372 if (!hugepages_supported())
2373 return -ENOTSUPP;
2374
e5ff2159
AK
2375 table->data = &tmp;
2376 table->maxlen = sizeof(unsigned long);
08d4a246
MH
2377 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2378 if (ret)
2379 goto out;
e5ff2159 2380
238d3c13
DR
2381 if (write)
2382 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2383 NUMA_NO_NODE, tmp, *length);
08d4a246
MH
2384out:
2385 return ret;
1da177e4 2386}
396faf03 2387
06808b08
LS
2388int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2389 void __user *buffer, size_t *length, loff_t *ppos)
2390{
2391
2392 return hugetlb_sysctl_handler_common(false, table, write,
2393 buffer, length, ppos);
2394}
2395
2396#ifdef CONFIG_NUMA
2397int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2398 void __user *buffer, size_t *length, loff_t *ppos)
2399{
2400 return hugetlb_sysctl_handler_common(true, table, write,
2401 buffer, length, ppos);
2402}
2403#endif /* CONFIG_NUMA */
2404
a3d0c6aa 2405int hugetlb_overcommit_handler(struct ctl_table *table, int write,
8d65af78 2406 void __user *buffer,
a3d0c6aa
NA
2407 size_t *length, loff_t *ppos)
2408{
a5516438 2409 struct hstate *h = &default_hstate;
e5ff2159 2410 unsigned long tmp;
08d4a246 2411 int ret;
e5ff2159 2412
457c1b27
NA
2413 if (!hugepages_supported())
2414 return -ENOTSUPP;
2415
c033a93c 2416 tmp = h->nr_overcommit_huge_pages;
e5ff2159 2417
bae7f4ae 2418 if (write && hstate_is_gigantic(h))
adbe8726
EM
2419 return -EINVAL;
2420
e5ff2159
AK
2421 table->data = &tmp;
2422 table->maxlen = sizeof(unsigned long);
08d4a246
MH
2423 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2424 if (ret)
2425 goto out;
e5ff2159
AK
2426
2427 if (write) {
2428 spin_lock(&hugetlb_lock);
2429 h->nr_overcommit_huge_pages = tmp;
2430 spin_unlock(&hugetlb_lock);
2431 }
08d4a246
MH
2432out:
2433 return ret;
a3d0c6aa
NA
2434}
2435
1da177e4
LT
2436#endif /* CONFIG_SYSCTL */
2437
e1759c21 2438void hugetlb_report_meminfo(struct seq_file *m)
1da177e4 2439{
a5516438 2440 struct hstate *h = &default_hstate;
457c1b27
NA
2441 if (!hugepages_supported())
2442 return;
e1759c21 2443 seq_printf(m,
4f98a2fe
RR
2444 "HugePages_Total: %5lu\n"
2445 "HugePages_Free: %5lu\n"
2446 "HugePages_Rsvd: %5lu\n"
2447 "HugePages_Surp: %5lu\n"
2448 "Hugepagesize: %8lu kB\n",
a5516438
AK
2449 h->nr_huge_pages,
2450 h->free_huge_pages,
2451 h->resv_huge_pages,
2452 h->surplus_huge_pages,
2453 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
1da177e4
LT
2454}
2455
2456int hugetlb_report_node_meminfo(int nid, char *buf)
2457{
a5516438 2458 struct hstate *h = &default_hstate;
457c1b27
NA
2459 if (!hugepages_supported())
2460 return 0;
1da177e4
LT
2461 return sprintf(buf,
2462 "Node %d HugePages_Total: %5u\n"
a1de0919
NA
2463 "Node %d HugePages_Free: %5u\n"
2464 "Node %d HugePages_Surp: %5u\n",
a5516438
AK
2465 nid, h->nr_huge_pages_node[nid],
2466 nid, h->free_huge_pages_node[nid],
2467 nid, h->surplus_huge_pages_node[nid]);
1da177e4
LT
2468}
2469
949f7ec5
DR
2470void hugetlb_show_meminfo(void)
2471{
2472 struct hstate *h;
2473 int nid;
2474
457c1b27
NA
2475 if (!hugepages_supported())
2476 return;
2477
949f7ec5
DR
2478 for_each_node_state(nid, N_MEMORY)
2479 for_each_hstate(h)
2480 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2481 nid,
2482 h->nr_huge_pages_node[nid],
2483 h->free_huge_pages_node[nid],
2484 h->surplus_huge_pages_node[nid],
2485 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2486}
2487
1da177e4
LT
2488/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2489unsigned long hugetlb_total_pages(void)
2490{
d0028588
WL
2491 struct hstate *h;
2492 unsigned long nr_total_pages = 0;
2493
2494 for_each_hstate(h)
2495 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2496 return nr_total_pages;
1da177e4 2497}
1da177e4 2498
a5516438 2499static int hugetlb_acct_memory(struct hstate *h, long delta)
fc1b8a73
MG
2500{
2501 int ret = -ENOMEM;
2502
2503 spin_lock(&hugetlb_lock);
2504 /*
2505 * When cpuset is configured, it breaks the strict hugetlb page
2506 * reservation as the accounting is done on a global variable. Such
2507 * reservation is completely rubbish in the presence of cpuset because
2508 * the reservation is not checked against page availability for the
2509 * current cpuset. Application can still potentially OOM'ed by kernel
2510 * with lack of free htlb page in cpuset that the task is in.
2511 * Attempt to enforce strict accounting with cpuset is almost
2512 * impossible (or too ugly) because cpuset is too fluid that
2513 * task or memory node can be dynamically moved between cpusets.
2514 *
2515 * The change of semantics for shared hugetlb mapping with cpuset is
2516 * undesirable. However, in order to preserve some of the semantics,
2517 * we fall back to check against current free page availability as
2518 * a best attempt and hopefully to minimize the impact of changing
2519 * semantics that cpuset has.
2520 */
2521 if (delta > 0) {
a5516438 2522 if (gather_surplus_pages(h, delta) < 0)
fc1b8a73
MG
2523 goto out;
2524
a5516438
AK
2525 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2526 return_unused_surplus_pages(h, delta);
fc1b8a73
MG
2527 goto out;
2528 }
2529 }
2530
2531 ret = 0;
2532 if (delta < 0)
a5516438 2533 return_unused_surplus_pages(h, (unsigned long) -delta);
fc1b8a73
MG
2534
2535out:
2536 spin_unlock(&hugetlb_lock);
2537 return ret;
2538}
2539
84afd99b
AW
2540static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2541{
f522c3ac 2542 struct resv_map *resv = vma_resv_map(vma);
84afd99b
AW
2543
2544 /*
2545 * This new VMA should share its siblings reservation map if present.
2546 * The VMA will only ever have a valid reservation map pointer where
2547 * it is being copied for another still existing VMA. As that VMA
25985edc 2548 * has a reference to the reservation map it cannot disappear until
84afd99b
AW
2549 * after this open call completes. It is therefore safe to take a
2550 * new reference here without additional locking.
2551 */
4e35f483 2552 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
f522c3ac 2553 kref_get(&resv->refs);
84afd99b
AW
2554}
2555
a1e78772
MG
2556static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2557{
a5516438 2558 struct hstate *h = hstate_vma(vma);
f522c3ac 2559 struct resv_map *resv = vma_resv_map(vma);
90481622 2560 struct hugepage_subpool *spool = subpool_vma(vma);
4e35f483 2561 unsigned long reserve, start, end;
1c5ecae3 2562 long gbl_reserve;
84afd99b 2563
4e35f483
JK
2564 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2565 return;
84afd99b 2566
4e35f483
JK
2567 start = vma_hugecache_offset(h, vma, vma->vm_start);
2568 end = vma_hugecache_offset(h, vma, vma->vm_end);
84afd99b 2569
4e35f483 2570 reserve = (end - start) - region_count(resv, start, end);
84afd99b 2571
4e35f483
JK
2572 kref_put(&resv->refs, resv_map_release);
2573
2574 if (reserve) {
1c5ecae3
MK
2575 /*
2576 * Decrement reserve counts. The global reserve count may be
2577 * adjusted if the subpool has a minimum size.
2578 */
2579 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2580 hugetlb_acct_memory(h, -gbl_reserve);
84afd99b 2581 }
a1e78772
MG
2582}
2583
1da177e4
LT
2584/*
2585 * We cannot handle pagefaults against hugetlb pages at all. They cause
2586 * handle_mm_fault() to try to instantiate regular-sized pages in the
2587 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
2588 * this far.
2589 */
d0217ac0 2590static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4
LT
2591{
2592 BUG();
d0217ac0 2593 return 0;
1da177e4
LT
2594}
2595
f0f37e2f 2596const struct vm_operations_struct hugetlb_vm_ops = {
d0217ac0 2597 .fault = hugetlb_vm_op_fault,
84afd99b 2598 .open = hugetlb_vm_op_open,
a1e78772 2599 .close = hugetlb_vm_op_close,
1da177e4
LT
2600};
2601
1e8f889b
DG
2602static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2603 int writable)
63551ae0
DG
2604{
2605 pte_t entry;
2606
1e8f889b 2607 if (writable) {
106c992a
GS
2608 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2609 vma->vm_page_prot)));
63551ae0 2610 } else {
106c992a
GS
2611 entry = huge_pte_wrprotect(mk_huge_pte(page,
2612 vma->vm_page_prot));
63551ae0
DG
2613 }
2614 entry = pte_mkyoung(entry);
2615 entry = pte_mkhuge(entry);
d9ed9faa 2616 entry = arch_make_huge_pte(entry, vma, page, writable);
63551ae0
DG
2617
2618 return entry;
2619}
2620
1e8f889b
DG
2621static void set_huge_ptep_writable(struct vm_area_struct *vma,
2622 unsigned long address, pte_t *ptep)
2623{
2624 pte_t entry;
2625
106c992a 2626 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
32f84528 2627 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4b3073e1 2628 update_mmu_cache(vma, address, ptep);
1e8f889b
DG
2629}
2630
4a705fef
NH
2631static int is_hugetlb_entry_migration(pte_t pte)
2632{
2633 swp_entry_t swp;
2634
2635 if (huge_pte_none(pte) || pte_present(pte))
2636 return 0;
2637 swp = pte_to_swp_entry(pte);
2638 if (non_swap_entry(swp) && is_migration_entry(swp))
2639 return 1;
2640 else
2641 return 0;
2642}
2643
2644static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2645{
2646 swp_entry_t swp;
2647
2648 if (huge_pte_none(pte) || pte_present(pte))
2649 return 0;
2650 swp = pte_to_swp_entry(pte);
2651 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2652 return 1;
2653 else
2654 return 0;
2655}
1e8f889b 2656
63551ae0
DG
2657int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2658 struct vm_area_struct *vma)
2659{
2660 pte_t *src_pte, *dst_pte, entry;
2661 struct page *ptepage;
1c59827d 2662 unsigned long addr;
1e8f889b 2663 int cow;
a5516438
AK
2664 struct hstate *h = hstate_vma(vma);
2665 unsigned long sz = huge_page_size(h);
e8569dd2
AS
2666 unsigned long mmun_start; /* For mmu_notifiers */
2667 unsigned long mmun_end; /* For mmu_notifiers */
2668 int ret = 0;
1e8f889b
DG
2669
2670 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
63551ae0 2671
e8569dd2
AS
2672 mmun_start = vma->vm_start;
2673 mmun_end = vma->vm_end;
2674 if (cow)
2675 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2676
a5516438 2677 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
cb900f41 2678 spinlock_t *src_ptl, *dst_ptl;
c74df32c
HD
2679 src_pte = huge_pte_offset(src, addr);
2680 if (!src_pte)
2681 continue;
a5516438 2682 dst_pte = huge_pte_alloc(dst, addr, sz);
e8569dd2
AS
2683 if (!dst_pte) {
2684 ret = -ENOMEM;
2685 break;
2686 }
c5c99429
LW
2687
2688 /* If the pagetables are shared don't copy or take references */
2689 if (dst_pte == src_pte)
2690 continue;
2691
cb900f41
KS
2692 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2693 src_ptl = huge_pte_lockptr(h, src, src_pte);
2694 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4a705fef
NH
2695 entry = huge_ptep_get(src_pte);
2696 if (huge_pte_none(entry)) { /* skip none entry */
2697 ;
2698 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2699 is_hugetlb_entry_hwpoisoned(entry))) {
2700 swp_entry_t swp_entry = pte_to_swp_entry(entry);
2701
2702 if (is_write_migration_entry(swp_entry) && cow) {
2703 /*
2704 * COW mappings require pages in both
2705 * parent and child to be set to read.
2706 */
2707 make_migration_entry_read(&swp_entry);
2708 entry = swp_entry_to_pte(swp_entry);
2709 set_huge_pte_at(src, addr, src_pte, entry);
2710 }
2711 set_huge_pte_at(dst, addr, dst_pte, entry);
2712 } else {
34ee645e 2713 if (cow) {
7f2e9525 2714 huge_ptep_set_wrprotect(src, addr, src_pte);
34ee645e
JR
2715 mmu_notifier_invalidate_range(src, mmun_start,
2716 mmun_end);
2717 }
0253d634 2718 entry = huge_ptep_get(src_pte);
1c59827d
HD
2719 ptepage = pte_page(entry);
2720 get_page(ptepage);
0fe6e20b 2721 page_dup_rmap(ptepage);
1c59827d
HD
2722 set_huge_pte_at(dst, addr, dst_pte, entry);
2723 }
cb900f41
KS
2724 spin_unlock(src_ptl);
2725 spin_unlock(dst_ptl);
63551ae0 2726 }
63551ae0 2727
e8569dd2
AS
2728 if (cow)
2729 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2730
2731 return ret;
63551ae0
DG
2732}
2733
24669e58
AK
2734void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2735 unsigned long start, unsigned long end,
2736 struct page *ref_page)
63551ae0 2737{
24669e58 2738 int force_flush = 0;
63551ae0
DG
2739 struct mm_struct *mm = vma->vm_mm;
2740 unsigned long address;
c7546f8f 2741 pte_t *ptep;
63551ae0 2742 pte_t pte;
cb900f41 2743 spinlock_t *ptl;
63551ae0 2744 struct page *page;
a5516438
AK
2745 struct hstate *h = hstate_vma(vma);
2746 unsigned long sz = huge_page_size(h);
2ec74c3e
SG
2747 const unsigned long mmun_start = start; /* For mmu_notifiers */
2748 const unsigned long mmun_end = end; /* For mmu_notifiers */
a5516438 2749
63551ae0 2750 WARN_ON(!is_vm_hugetlb_page(vma));
a5516438
AK
2751 BUG_ON(start & ~huge_page_mask(h));
2752 BUG_ON(end & ~huge_page_mask(h));
63551ae0 2753
24669e58 2754 tlb_start_vma(tlb, vma);
2ec74c3e 2755 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
569f48b8 2756 address = start;
24669e58 2757again:
569f48b8 2758 for (; address < end; address += sz) {
c7546f8f 2759 ptep = huge_pte_offset(mm, address);
4c887265 2760 if (!ptep)
c7546f8f
DG
2761 continue;
2762
cb900f41 2763 ptl = huge_pte_lock(h, mm, ptep);
39dde65c 2764 if (huge_pmd_unshare(mm, &address, ptep))
cb900f41 2765 goto unlock;
39dde65c 2766
6629326b
HD
2767 pte = huge_ptep_get(ptep);
2768 if (huge_pte_none(pte))
cb900f41 2769 goto unlock;
6629326b
HD
2770
2771 /*
9fbc1f63
NH
2772 * Migrating hugepage or HWPoisoned hugepage is already
2773 * unmapped and its refcount is dropped, so just clear pte here.
6629326b 2774 */
9fbc1f63 2775 if (unlikely(!pte_present(pte))) {
106c992a 2776 huge_pte_clear(mm, address, ptep);
cb900f41 2777 goto unlock;
8c4894c6 2778 }
6629326b
HD
2779
2780 page = pte_page(pte);
04f2cbe3
MG
2781 /*
2782 * If a reference page is supplied, it is because a specific
2783 * page is being unmapped, not a range. Ensure the page we
2784 * are about to unmap is the actual page of interest.
2785 */
2786 if (ref_page) {
04f2cbe3 2787 if (page != ref_page)
cb900f41 2788 goto unlock;
04f2cbe3
MG
2789
2790 /*
2791 * Mark the VMA as having unmapped its page so that
2792 * future faults in this VMA will fail rather than
2793 * looking like data was lost
2794 */
2795 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2796 }
2797
c7546f8f 2798 pte = huge_ptep_get_and_clear(mm, address, ptep);
24669e58 2799 tlb_remove_tlb_entry(tlb, ptep, address);
106c992a 2800 if (huge_pte_dirty(pte))
6649a386 2801 set_page_dirty(page);
9e81130b 2802
24669e58
AK
2803 page_remove_rmap(page);
2804 force_flush = !__tlb_remove_page(tlb, page);
cb900f41 2805 if (force_flush) {
569f48b8 2806 address += sz;
cb900f41 2807 spin_unlock(ptl);
24669e58 2808 break;
cb900f41 2809 }
9e81130b 2810 /* Bail out after unmapping reference page if supplied */
cb900f41
KS
2811 if (ref_page) {
2812 spin_unlock(ptl);
9e81130b 2813 break;
cb900f41
KS
2814 }
2815unlock:
2816 spin_unlock(ptl);
63551ae0 2817 }
24669e58
AK
2818 /*
2819 * mmu_gather ran out of room to batch pages, we break out of
2820 * the PTE lock to avoid doing the potential expensive TLB invalidate
2821 * and page-free while holding it.
2822 */
2823 if (force_flush) {
2824 force_flush = 0;
2825 tlb_flush_mmu(tlb);
2826 if (address < end && !ref_page)
2827 goto again;
fe1668ae 2828 }
2ec74c3e 2829 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
24669e58 2830 tlb_end_vma(tlb, vma);
1da177e4 2831}
63551ae0 2832
d833352a
MG
2833void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2834 struct vm_area_struct *vma, unsigned long start,
2835 unsigned long end, struct page *ref_page)
2836{
2837 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2838
2839 /*
2840 * Clear this flag so that x86's huge_pmd_share page_table_shareable
2841 * test will fail on a vma being torn down, and not grab a page table
2842 * on its way out. We're lucky that the flag has such an appropriate
2843 * name, and can in fact be safely cleared here. We could clear it
2844 * before the __unmap_hugepage_range above, but all that's necessary
c8c06efa 2845 * is to clear it before releasing the i_mmap_rwsem. This works
d833352a 2846 * because in the context this is called, the VMA is about to be
c8c06efa 2847 * destroyed and the i_mmap_rwsem is held.
d833352a
MG
2848 */
2849 vma->vm_flags &= ~VM_MAYSHARE;
2850}
2851
502717f4 2852void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
04f2cbe3 2853 unsigned long end, struct page *ref_page)
502717f4 2854{
24669e58
AK
2855 struct mm_struct *mm;
2856 struct mmu_gather tlb;
2857
2858 mm = vma->vm_mm;
2859
2b047252 2860 tlb_gather_mmu(&tlb, mm, start, end);
24669e58
AK
2861 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2862 tlb_finish_mmu(&tlb, start, end);
502717f4
CK
2863}
2864
04f2cbe3
MG
2865/*
2866 * This is called when the original mapper is failing to COW a MAP_PRIVATE
2867 * mappping it owns the reserve page for. The intention is to unmap the page
2868 * from other VMAs and let the children be SIGKILLed if they are faulting the
2869 * same region.
2870 */
2f4612af
DB
2871static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2872 struct page *page, unsigned long address)
04f2cbe3 2873{
7526674d 2874 struct hstate *h = hstate_vma(vma);
04f2cbe3
MG
2875 struct vm_area_struct *iter_vma;
2876 struct address_space *mapping;
04f2cbe3
MG
2877 pgoff_t pgoff;
2878
2879 /*
2880 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2881 * from page cache lookup which is in HPAGE_SIZE units.
2882 */
7526674d 2883 address = address & huge_page_mask(h);
36e4f20a
MH
2884 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2885 vma->vm_pgoff;
496ad9aa 2886 mapping = file_inode(vma->vm_file)->i_mapping;
04f2cbe3 2887
4eb2b1dc
MG
2888 /*
2889 * Take the mapping lock for the duration of the table walk. As
2890 * this mapping should be shared between all the VMAs,
2891 * __unmap_hugepage_range() is called as the lock is already held
2892 */
83cde9e8 2893 i_mmap_lock_write(mapping);
6b2dbba8 2894 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
04f2cbe3
MG
2895 /* Do not unmap the current VMA */
2896 if (iter_vma == vma)
2897 continue;
2898
2899 /*
2900 * Unmap the page from other VMAs without their own reserves.
2901 * They get marked to be SIGKILLed if they fault in these
2902 * areas. This is because a future no-page fault on this VMA
2903 * could insert a zeroed page instead of the data existing
2904 * from the time of fork. This would look like data corruption
2905 */
2906 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
24669e58
AK
2907 unmap_hugepage_range(iter_vma, address,
2908 address + huge_page_size(h), page);
04f2cbe3 2909 }
83cde9e8 2910 i_mmap_unlock_write(mapping);
04f2cbe3
MG
2911}
2912
0fe6e20b
NH
2913/*
2914 * Hugetlb_cow() should be called with page lock of the original hugepage held.
ef009b25
MH
2915 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2916 * cannot race with other handlers or page migration.
2917 * Keep the pte_same checks anyway to make transition from the mutex easier.
0fe6e20b 2918 */
1e8f889b 2919static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
04f2cbe3 2920 unsigned long address, pte_t *ptep, pte_t pte,
cb900f41 2921 struct page *pagecache_page, spinlock_t *ptl)
1e8f889b 2922{
a5516438 2923 struct hstate *h = hstate_vma(vma);
1e8f889b 2924 struct page *old_page, *new_page;
ad4404a2 2925 int ret = 0, outside_reserve = 0;
2ec74c3e
SG
2926 unsigned long mmun_start; /* For mmu_notifiers */
2927 unsigned long mmun_end; /* For mmu_notifiers */
1e8f889b
DG
2928
2929 old_page = pte_page(pte);
2930
04f2cbe3 2931retry_avoidcopy:
1e8f889b
DG
2932 /* If no-one else is actually using this page, avoid the copy
2933 * and just make the page writable */
37a2140d
JK
2934 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2935 page_move_anon_rmap(old_page, vma, address);
1e8f889b 2936 set_huge_ptep_writable(vma, address, ptep);
83c54070 2937 return 0;
1e8f889b
DG
2938 }
2939
04f2cbe3
MG
2940 /*
2941 * If the process that created a MAP_PRIVATE mapping is about to
2942 * perform a COW due to a shared page count, attempt to satisfy
2943 * the allocation without using the existing reserves. The pagecache
2944 * page is used to determine if the reserve at this address was
2945 * consumed or not. If reserves were used, a partial faulted mapping
2946 * at the time of fork() could consume its reserves on COW instead
2947 * of the full address range.
2948 */
5944d011 2949 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
04f2cbe3
MG
2950 old_page != pagecache_page)
2951 outside_reserve = 1;
2952
1e8f889b 2953 page_cache_get(old_page);
b76c8cfb 2954
ad4404a2
DB
2955 /*
2956 * Drop page table lock as buddy allocator may be called. It will
2957 * be acquired again before returning to the caller, as expected.
2958 */
cb900f41 2959 spin_unlock(ptl);
04f2cbe3 2960 new_page = alloc_huge_page(vma, address, outside_reserve);
1e8f889b 2961
2fc39cec 2962 if (IS_ERR(new_page)) {
04f2cbe3
MG
2963 /*
2964 * If a process owning a MAP_PRIVATE mapping fails to COW,
2965 * it is due to references held by a child and an insufficient
2966 * huge page pool. To guarantee the original mappers
2967 * reliability, unmap the page from child processes. The child
2968 * may get SIGKILLed if it later faults.
2969 */
2970 if (outside_reserve) {
ad4404a2 2971 page_cache_release(old_page);
04f2cbe3 2972 BUG_ON(huge_pte_none(pte));
2f4612af
DB
2973 unmap_ref_private(mm, vma, old_page, address);
2974 BUG_ON(huge_pte_none(pte));
2975 spin_lock(ptl);
2976 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2977 if (likely(ptep &&
2978 pte_same(huge_ptep_get(ptep), pte)))
2979 goto retry_avoidcopy;
2980 /*
2981 * race occurs while re-acquiring page table
2982 * lock, and our job is done.
2983 */
2984 return 0;
04f2cbe3
MG
2985 }
2986
ad4404a2
DB
2987 ret = (PTR_ERR(new_page) == -ENOMEM) ?
2988 VM_FAULT_OOM : VM_FAULT_SIGBUS;
2989 goto out_release_old;
1e8f889b
DG
2990 }
2991
0fe6e20b
NH
2992 /*
2993 * When the original hugepage is shared one, it does not have
2994 * anon_vma prepared.
2995 */
44e2aa93 2996 if (unlikely(anon_vma_prepare(vma))) {
ad4404a2
DB
2997 ret = VM_FAULT_OOM;
2998 goto out_release_all;
44e2aa93 2999 }
0fe6e20b 3000
47ad8475
AA
3001 copy_user_huge_page(new_page, old_page, address, vma,
3002 pages_per_huge_page(h));
0ed361de 3003 __SetPageUptodate(new_page);
bcc54222 3004 set_page_huge_active(new_page);
1e8f889b 3005
2ec74c3e
SG
3006 mmun_start = address & huge_page_mask(h);
3007 mmun_end = mmun_start + huge_page_size(h);
3008 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
ad4404a2 3009
b76c8cfb 3010 /*
cb900f41 3011 * Retake the page table lock to check for racing updates
b76c8cfb
LW
3012 * before the page tables are altered
3013 */
cb900f41 3014 spin_lock(ptl);
a5516438 3015 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
a9af0c5d 3016 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
07443a85
JK
3017 ClearPagePrivate(new_page);
3018
1e8f889b 3019 /* Break COW */
8fe627ec 3020 huge_ptep_clear_flush(vma, address, ptep);
34ee645e 3021 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
1e8f889b
DG
3022 set_huge_pte_at(mm, address, ptep,
3023 make_huge_pte(vma, new_page, 1));
0fe6e20b 3024 page_remove_rmap(old_page);
cd67f0d2 3025 hugepage_add_new_anon_rmap(new_page, vma, address);
1e8f889b
DG
3026 /* Make the old page be freed below */
3027 new_page = old_page;
3028 }
cb900f41 3029 spin_unlock(ptl);
2ec74c3e 3030 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
ad4404a2 3031out_release_all:
1e8f889b 3032 page_cache_release(new_page);
ad4404a2 3033out_release_old:
1e8f889b 3034 page_cache_release(old_page);
8312034f 3035
ad4404a2
DB
3036 spin_lock(ptl); /* Caller expects lock to be held */
3037 return ret;
1e8f889b
DG
3038}
3039
04f2cbe3 3040/* Return the pagecache page at a given address within a VMA */
a5516438
AK
3041static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3042 struct vm_area_struct *vma, unsigned long address)
04f2cbe3
MG
3043{
3044 struct address_space *mapping;
e7c4b0bf 3045 pgoff_t idx;
04f2cbe3
MG
3046
3047 mapping = vma->vm_file->f_mapping;
a5516438 3048 idx = vma_hugecache_offset(h, vma, address);
04f2cbe3
MG
3049
3050 return find_lock_page(mapping, idx);
3051}
3052
3ae77f43
HD
3053/*
3054 * Return whether there is a pagecache page to back given address within VMA.
3055 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3056 */
3057static bool hugetlbfs_pagecache_present(struct hstate *h,
2a15efc9
HD
3058 struct vm_area_struct *vma, unsigned long address)
3059{
3060 struct address_space *mapping;
3061 pgoff_t idx;
3062 struct page *page;
3063
3064 mapping = vma->vm_file->f_mapping;
3065 idx = vma_hugecache_offset(h, vma, address);
3066
3067 page = find_get_page(mapping, idx);
3068 if (page)
3069 put_page(page);
3070 return page != NULL;
3071}
3072
a1ed3dda 3073static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
8382d914
DB
3074 struct address_space *mapping, pgoff_t idx,
3075 unsigned long address, pte_t *ptep, unsigned int flags)
ac9b9c66 3076{
a5516438 3077 struct hstate *h = hstate_vma(vma);
ac9b9c66 3078 int ret = VM_FAULT_SIGBUS;
409eb8c2 3079 int anon_rmap = 0;
4c887265 3080 unsigned long size;
4c887265 3081 struct page *page;
1e8f889b 3082 pte_t new_pte;
cb900f41 3083 spinlock_t *ptl;
4c887265 3084
04f2cbe3
MG
3085 /*
3086 * Currently, we are forced to kill the process in the event the
3087 * original mapper has unmapped pages from the child due to a failed
25985edc 3088 * COW. Warn that such a situation has occurred as it may not be obvious
04f2cbe3
MG
3089 */
3090 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
ffb22af5
AM
3091 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3092 current->pid);
04f2cbe3
MG
3093 return ret;
3094 }
3095
4c887265
AL
3096 /*
3097 * Use page lock to guard against racing truncation
3098 * before we get page_table_lock.
3099 */
6bda666a
CL
3100retry:
3101 page = find_lock_page(mapping, idx);
3102 if (!page) {
a5516438 3103 size = i_size_read(mapping->host) >> huge_page_shift(h);
ebed4bfc
HD
3104 if (idx >= size)
3105 goto out;
04f2cbe3 3106 page = alloc_huge_page(vma, address, 0);
2fc39cec 3107 if (IS_ERR(page)) {
76dcee75
AK
3108 ret = PTR_ERR(page);
3109 if (ret == -ENOMEM)
3110 ret = VM_FAULT_OOM;
3111 else
3112 ret = VM_FAULT_SIGBUS;
6bda666a
CL
3113 goto out;
3114 }
47ad8475 3115 clear_huge_page(page, address, pages_per_huge_page(h));
0ed361de 3116 __SetPageUptodate(page);
bcc54222 3117 set_page_huge_active(page);
ac9b9c66 3118
f83a275d 3119 if (vma->vm_flags & VM_MAYSHARE) {
6bda666a 3120 int err;
45c682a6 3121 struct inode *inode = mapping->host;
6bda666a
CL
3122
3123 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3124 if (err) {
3125 put_page(page);
6bda666a
CL
3126 if (err == -EEXIST)
3127 goto retry;
3128 goto out;
3129 }
07443a85 3130 ClearPagePrivate(page);
45c682a6
KC
3131
3132 spin_lock(&inode->i_lock);
a5516438 3133 inode->i_blocks += blocks_per_huge_page(h);
45c682a6 3134 spin_unlock(&inode->i_lock);
23be7468 3135 } else {
6bda666a 3136 lock_page(page);
0fe6e20b
NH
3137 if (unlikely(anon_vma_prepare(vma))) {
3138 ret = VM_FAULT_OOM;
3139 goto backout_unlocked;
3140 }
409eb8c2 3141 anon_rmap = 1;
23be7468 3142 }
0fe6e20b 3143 } else {
998b4382
NH
3144 /*
3145 * If memory error occurs between mmap() and fault, some process
3146 * don't have hwpoisoned swap entry for errored virtual address.
3147 * So we need to block hugepage fault by PG_hwpoison bit check.
3148 */
3149 if (unlikely(PageHWPoison(page))) {
32f84528 3150 ret = VM_FAULT_HWPOISON |
972dc4de 3151 VM_FAULT_SET_HINDEX(hstate_index(h));
998b4382
NH
3152 goto backout_unlocked;
3153 }
6bda666a 3154 }
1e8f889b 3155
57303d80
AW
3156 /*
3157 * If we are going to COW a private mapping later, we examine the
3158 * pending reservations for this page now. This will ensure that
3159 * any allocations necessary to record that reservation occur outside
3160 * the spinlock.
3161 */
788c7df4 3162 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2b26736c
AW
3163 if (vma_needs_reservation(h, vma, address) < 0) {
3164 ret = VM_FAULT_OOM;
3165 goto backout_unlocked;
3166 }
57303d80 3167
cb900f41
KS
3168 ptl = huge_pte_lockptr(h, mm, ptep);
3169 spin_lock(ptl);
a5516438 3170 size = i_size_read(mapping->host) >> huge_page_shift(h);
4c887265
AL
3171 if (idx >= size)
3172 goto backout;
3173
83c54070 3174 ret = 0;
7f2e9525 3175 if (!huge_pte_none(huge_ptep_get(ptep)))
4c887265
AL
3176 goto backout;
3177
07443a85
JK
3178 if (anon_rmap) {
3179 ClearPagePrivate(page);
409eb8c2 3180 hugepage_add_new_anon_rmap(page, vma, address);
ac714904 3181 } else
409eb8c2 3182 page_dup_rmap(page);
1e8f889b
DG
3183 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3184 && (vma->vm_flags & VM_SHARED)));
3185 set_huge_pte_at(mm, address, ptep, new_pte);
3186
788c7df4 3187 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
1e8f889b 3188 /* Optimization, do the COW without a second fault */
cb900f41 3189 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
1e8f889b
DG
3190 }
3191
cb900f41 3192 spin_unlock(ptl);
4c887265
AL
3193 unlock_page(page);
3194out:
ac9b9c66 3195 return ret;
4c887265
AL
3196
3197backout:
cb900f41 3198 spin_unlock(ptl);
2b26736c 3199backout_unlocked:
4c887265
AL
3200 unlock_page(page);
3201 put_page(page);
3202 goto out;
ac9b9c66
HD
3203}
3204
8382d914
DB
3205#ifdef CONFIG_SMP
3206static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3207 struct vm_area_struct *vma,
3208 struct address_space *mapping,
3209 pgoff_t idx, unsigned long address)
3210{
3211 unsigned long key[2];
3212 u32 hash;
3213
3214 if (vma->vm_flags & VM_SHARED) {
3215 key[0] = (unsigned long) mapping;
3216 key[1] = idx;
3217 } else {
3218 key[0] = (unsigned long) mm;
3219 key[1] = address >> huge_page_shift(h);
3220 }
3221
3222 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3223
3224 return hash & (num_fault_mutexes - 1);
3225}
3226#else
3227/*
3228 * For uniprocesor systems we always use a single mutex, so just
3229 * return 0 and avoid the hashing overhead.
3230 */
3231static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3232 struct vm_area_struct *vma,
3233 struct address_space *mapping,
3234 pgoff_t idx, unsigned long address)
3235{
3236 return 0;
3237}
3238#endif
3239
86e5216f 3240int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
788c7df4 3241 unsigned long address, unsigned int flags)
86e5216f 3242{
8382d914 3243 pte_t *ptep, entry;
cb900f41 3244 spinlock_t *ptl;
1e8f889b 3245 int ret;
8382d914
DB
3246 u32 hash;
3247 pgoff_t idx;
0fe6e20b 3248 struct page *page = NULL;
57303d80 3249 struct page *pagecache_page = NULL;
a5516438 3250 struct hstate *h = hstate_vma(vma);
8382d914 3251 struct address_space *mapping;
0f792cf9 3252 int need_wait_lock = 0;
86e5216f 3253
1e16a539
KH
3254 address &= huge_page_mask(h);
3255
fd6a03ed
NH
3256 ptep = huge_pte_offset(mm, address);
3257 if (ptep) {
3258 entry = huge_ptep_get(ptep);
290408d4 3259 if (unlikely(is_hugetlb_entry_migration(entry))) {
cb900f41 3260 migration_entry_wait_huge(vma, mm, ptep);
290408d4
NH
3261 return 0;
3262 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
32f84528 3263 return VM_FAULT_HWPOISON_LARGE |
972dc4de 3264 VM_FAULT_SET_HINDEX(hstate_index(h));
fd6a03ed
NH
3265 }
3266
a5516438 3267 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
86e5216f
AL
3268 if (!ptep)
3269 return VM_FAULT_OOM;
3270
8382d914
DB
3271 mapping = vma->vm_file->f_mapping;
3272 idx = vma_hugecache_offset(h, vma, address);
3273
3935baa9
DG
3274 /*
3275 * Serialize hugepage allocation and instantiation, so that we don't
3276 * get spurious allocation failures if two CPUs race to instantiate
3277 * the same page in the page cache.
3278 */
8382d914
DB
3279 hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3280 mutex_lock(&htlb_fault_mutex_table[hash]);
3281
7f2e9525
GS
3282 entry = huge_ptep_get(ptep);
3283 if (huge_pte_none(entry)) {
8382d914 3284 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
b4d1d99f 3285 goto out_mutex;
3935baa9 3286 }
86e5216f 3287
83c54070 3288 ret = 0;
1e8f889b 3289
0f792cf9
NH
3290 /*
3291 * entry could be a migration/hwpoison entry at this point, so this
3292 * check prevents the kernel from going below assuming that we have
3293 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3294 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3295 * handle it.
3296 */
3297 if (!pte_present(entry))
3298 goto out_mutex;
3299
57303d80
AW
3300 /*
3301 * If we are going to COW the mapping later, we examine the pending
3302 * reservations for this page now. This will ensure that any
3303 * allocations necessary to record that reservation occur outside the
3304 * spinlock. For private mappings, we also lookup the pagecache
3305 * page now as it is used to determine if a reservation has been
3306 * consumed.
3307 */
106c992a 3308 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
2b26736c
AW
3309 if (vma_needs_reservation(h, vma, address) < 0) {
3310 ret = VM_FAULT_OOM;
b4d1d99f 3311 goto out_mutex;
2b26736c 3312 }
57303d80 3313
f83a275d 3314 if (!(vma->vm_flags & VM_MAYSHARE))
57303d80
AW
3315 pagecache_page = hugetlbfs_pagecache_page(h,
3316 vma, address);
3317 }
3318
0f792cf9
NH
3319 ptl = huge_pte_lock(h, mm, ptep);
3320
3321 /* Check for a racing update before calling hugetlb_cow */
3322 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3323 goto out_ptl;
3324
56c9cfb1
NH
3325 /*
3326 * hugetlb_cow() requires page locks of pte_page(entry) and
3327 * pagecache_page, so here we need take the former one
3328 * when page != pagecache_page or !pagecache_page.
56c9cfb1
NH
3329 */
3330 page = pte_page(entry);
3331 if (page != pagecache_page)
0f792cf9
NH
3332 if (!trylock_page(page)) {
3333 need_wait_lock = 1;
3334 goto out_ptl;
3335 }
b4d1d99f 3336
0f792cf9 3337 get_page(page);
b4d1d99f 3338
788c7df4 3339 if (flags & FAULT_FLAG_WRITE) {
106c992a 3340 if (!huge_pte_write(entry)) {
57303d80 3341 ret = hugetlb_cow(mm, vma, address, ptep, entry,
cb900f41 3342 pagecache_page, ptl);
0f792cf9 3343 goto out_put_page;
b4d1d99f 3344 }
106c992a 3345 entry = huge_pte_mkdirty(entry);
b4d1d99f
DG
3346 }
3347 entry = pte_mkyoung(entry);
788c7df4
HD
3348 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3349 flags & FAULT_FLAG_WRITE))
4b3073e1 3350 update_mmu_cache(vma, address, ptep);
0f792cf9
NH
3351out_put_page:
3352 if (page != pagecache_page)
3353 unlock_page(page);
3354 put_page(page);
cb900f41
KS
3355out_ptl:
3356 spin_unlock(ptl);
57303d80
AW
3357
3358 if (pagecache_page) {
3359 unlock_page(pagecache_page);
3360 put_page(pagecache_page);
3361 }
b4d1d99f 3362out_mutex:
8382d914 3363 mutex_unlock(&htlb_fault_mutex_table[hash]);
0f792cf9
NH
3364 /*
3365 * Generally it's safe to hold refcount during waiting page lock. But
3366 * here we just wait to defer the next page fault to avoid busy loop and
3367 * the page is not used after unlocked before returning from the current
3368 * page fault. So we are safe from accessing freed page, even if we wait
3369 * here without taking refcount.
3370 */
3371 if (need_wait_lock)
3372 wait_on_page_locked(page);
1e8f889b 3373 return ret;
86e5216f
AL
3374}
3375
28a35716
ML
3376long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3377 struct page **pages, struct vm_area_struct **vmas,
3378 unsigned long *position, unsigned long *nr_pages,
3379 long i, unsigned int flags)
63551ae0 3380{
d5d4b0aa
CK
3381 unsigned long pfn_offset;
3382 unsigned long vaddr = *position;
28a35716 3383 unsigned long remainder = *nr_pages;
a5516438 3384 struct hstate *h = hstate_vma(vma);
63551ae0 3385
63551ae0 3386 while (vaddr < vma->vm_end && remainder) {
4c887265 3387 pte_t *pte;
cb900f41 3388 spinlock_t *ptl = NULL;
2a15efc9 3389 int absent;
4c887265 3390 struct page *page;
63551ae0 3391
02057967
DR
3392 /*
3393 * If we have a pending SIGKILL, don't keep faulting pages and
3394 * potentially allocating memory.
3395 */
3396 if (unlikely(fatal_signal_pending(current))) {
3397 remainder = 0;
3398 break;
3399 }
3400
4c887265
AL
3401 /*
3402 * Some archs (sparc64, sh*) have multiple pte_ts to
2a15efc9 3403 * each hugepage. We have to make sure we get the
4c887265 3404 * first, for the page indexing below to work.
cb900f41
KS
3405 *
3406 * Note that page table lock is not held when pte is null.
4c887265 3407 */
a5516438 3408 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
cb900f41
KS
3409 if (pte)
3410 ptl = huge_pte_lock(h, mm, pte);
2a15efc9
HD
3411 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3412
3413 /*
3414 * When coredumping, it suits get_dump_page if we just return
3ae77f43
HD
3415 * an error where there's an empty slot with no huge pagecache
3416 * to back it. This way, we avoid allocating a hugepage, and
3417 * the sparse dumpfile avoids allocating disk blocks, but its
3418 * huge holes still show up with zeroes where they need to be.
2a15efc9 3419 */
3ae77f43
HD
3420 if (absent && (flags & FOLL_DUMP) &&
3421 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
cb900f41
KS
3422 if (pte)
3423 spin_unlock(ptl);
2a15efc9
HD
3424 remainder = 0;
3425 break;
3426 }
63551ae0 3427
9cc3a5bd
NH
3428 /*
3429 * We need call hugetlb_fault for both hugepages under migration
3430 * (in which case hugetlb_fault waits for the migration,) and
3431 * hwpoisoned hugepages (in which case we need to prevent the
3432 * caller from accessing to them.) In order to do this, we use
3433 * here is_swap_pte instead of is_hugetlb_entry_migration and
3434 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3435 * both cases, and because we can't follow correct pages
3436 * directly from any kind of swap entries.
3437 */
3438 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
106c992a
GS
3439 ((flags & FOLL_WRITE) &&
3440 !huge_pte_write(huge_ptep_get(pte)))) {
4c887265 3441 int ret;
63551ae0 3442
cb900f41
KS
3443 if (pte)
3444 spin_unlock(ptl);
2a15efc9
HD
3445 ret = hugetlb_fault(mm, vma, vaddr,
3446 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
a89182c7 3447 if (!(ret & VM_FAULT_ERROR))
4c887265 3448 continue;
63551ae0 3449
4c887265 3450 remainder = 0;
4c887265
AL
3451 break;
3452 }
3453
a5516438 3454 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
7f2e9525 3455 page = pte_page(huge_ptep_get(pte));
d5d4b0aa 3456same_page:
d6692183 3457 if (pages) {
2a15efc9 3458 pages[i] = mem_map_offset(page, pfn_offset);
a0368d4e 3459 get_page_foll(pages[i]);
d6692183 3460 }
63551ae0
DG
3461
3462 if (vmas)
3463 vmas[i] = vma;
3464
3465 vaddr += PAGE_SIZE;
d5d4b0aa 3466 ++pfn_offset;
63551ae0
DG
3467 --remainder;
3468 ++i;
d5d4b0aa 3469 if (vaddr < vma->vm_end && remainder &&
a5516438 3470 pfn_offset < pages_per_huge_page(h)) {
d5d4b0aa
CK
3471 /*
3472 * We use pfn_offset to avoid touching the pageframes
3473 * of this compound page.
3474 */
3475 goto same_page;
3476 }
cb900f41 3477 spin_unlock(ptl);
63551ae0 3478 }
28a35716 3479 *nr_pages = remainder;
63551ae0
DG
3480 *position = vaddr;
3481
2a15efc9 3482 return i ? i : -EFAULT;
63551ae0 3483}
8f860591 3484
7da4d641 3485unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
8f860591
ZY
3486 unsigned long address, unsigned long end, pgprot_t newprot)
3487{
3488 struct mm_struct *mm = vma->vm_mm;
3489 unsigned long start = address;
3490 pte_t *ptep;
3491 pte_t pte;
a5516438 3492 struct hstate *h = hstate_vma(vma);
7da4d641 3493 unsigned long pages = 0;
8f860591
ZY
3494
3495 BUG_ON(address >= end);
3496 flush_cache_range(vma, address, end);
3497
a5338093 3498 mmu_notifier_invalidate_range_start(mm, start, end);
83cde9e8 3499 i_mmap_lock_write(vma->vm_file->f_mapping);
a5516438 3500 for (; address < end; address += huge_page_size(h)) {
cb900f41 3501 spinlock_t *ptl;
8f860591
ZY
3502 ptep = huge_pte_offset(mm, address);
3503 if (!ptep)
3504 continue;
cb900f41 3505 ptl = huge_pte_lock(h, mm, ptep);
7da4d641
PZ
3506 if (huge_pmd_unshare(mm, &address, ptep)) {
3507 pages++;
cb900f41 3508 spin_unlock(ptl);
39dde65c 3509 continue;
7da4d641 3510 }
a8bda28d
NH
3511 pte = huge_ptep_get(ptep);
3512 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3513 spin_unlock(ptl);
3514 continue;
3515 }
3516 if (unlikely(is_hugetlb_entry_migration(pte))) {
3517 swp_entry_t entry = pte_to_swp_entry(pte);
3518
3519 if (is_write_migration_entry(entry)) {
3520 pte_t newpte;
3521
3522 make_migration_entry_read(&entry);
3523 newpte = swp_entry_to_pte(entry);
3524 set_huge_pte_at(mm, address, ptep, newpte);
3525 pages++;
3526 }
3527 spin_unlock(ptl);
3528 continue;
3529 }
3530 if (!huge_pte_none(pte)) {
8f860591 3531 pte = huge_ptep_get_and_clear(mm, address, ptep);
106c992a 3532 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
be7517d6 3533 pte = arch_make_huge_pte(pte, vma, NULL, 0);
8f860591 3534 set_huge_pte_at(mm, address, ptep, pte);
7da4d641 3535 pages++;
8f860591 3536 }
cb900f41 3537 spin_unlock(ptl);
8f860591 3538 }
d833352a 3539 /*
c8c06efa 3540 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
d833352a 3541 * may have cleared our pud entry and done put_page on the page table:
c8c06efa 3542 * once we release i_mmap_rwsem, another task can do the final put_page
d833352a
MG
3543 * and that page table be reused and filled with junk.
3544 */
8f860591 3545 flush_tlb_range(vma, start, end);
34ee645e 3546 mmu_notifier_invalidate_range(mm, start, end);
83cde9e8 3547 i_mmap_unlock_write(vma->vm_file->f_mapping);
a5338093 3548 mmu_notifier_invalidate_range_end(mm, start, end);
7da4d641
PZ
3549
3550 return pages << h->order;
8f860591
ZY
3551}
3552
a1e78772
MG
3553int hugetlb_reserve_pages(struct inode *inode,
3554 long from, long to,
5a6fe125 3555 struct vm_area_struct *vma,
ca16d140 3556 vm_flags_t vm_flags)
e4e574b7 3557{
17c9d12e 3558 long ret, chg;
a5516438 3559 struct hstate *h = hstate_inode(inode);
90481622 3560 struct hugepage_subpool *spool = subpool_inode(inode);
9119a41e 3561 struct resv_map *resv_map;
1c5ecae3 3562 long gbl_reserve;
e4e574b7 3563
17c9d12e
MG
3564 /*
3565 * Only apply hugepage reservation if asked. At fault time, an
3566 * attempt will be made for VM_NORESERVE to allocate a page
90481622 3567 * without using reserves
17c9d12e 3568 */
ca16d140 3569 if (vm_flags & VM_NORESERVE)
17c9d12e
MG
3570 return 0;
3571
a1e78772
MG
3572 /*
3573 * Shared mappings base their reservation on the number of pages that
3574 * are already allocated on behalf of the file. Private mappings need
3575 * to reserve the full area even if read-only as mprotect() may be
3576 * called to make the mapping read-write. Assume !vma is a shm mapping
3577 */
9119a41e 3578 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4e35f483 3579 resv_map = inode_resv_map(inode);
9119a41e 3580
1406ec9b 3581 chg = region_chg(resv_map, from, to);
9119a41e
JK
3582
3583 } else {
3584 resv_map = resv_map_alloc();
17c9d12e
MG
3585 if (!resv_map)
3586 return -ENOMEM;
3587
a1e78772 3588 chg = to - from;
84afd99b 3589
17c9d12e
MG
3590 set_vma_resv_map(vma, resv_map);
3591 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3592 }
3593
c50ac050
DH
3594 if (chg < 0) {
3595 ret = chg;
3596 goto out_err;
3597 }
8a630112 3598
1c5ecae3
MK
3599 /*
3600 * There must be enough pages in the subpool for the mapping. If
3601 * the subpool has a minimum size, there may be some global
3602 * reservations already in place (gbl_reserve).
3603 */
3604 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
3605 if (gbl_reserve < 0) {
c50ac050
DH
3606 ret = -ENOSPC;
3607 goto out_err;
3608 }
5a6fe125
MG
3609
3610 /*
17c9d12e 3611 * Check enough hugepages are available for the reservation.
90481622 3612 * Hand the pages back to the subpool if there are not
5a6fe125 3613 */
1c5ecae3 3614 ret = hugetlb_acct_memory(h, gbl_reserve);
68842c9b 3615 if (ret < 0) {
1c5ecae3
MK
3616 /* put back original number of pages, chg */
3617 (void)hugepage_subpool_put_pages(spool, chg);
c50ac050 3618 goto out_err;
68842c9b 3619 }
17c9d12e
MG
3620
3621 /*
3622 * Account for the reservations made. Shared mappings record regions
3623 * that have reservations as they are shared by multiple VMAs.
3624 * When the last VMA disappears, the region map says how much
3625 * the reservation was and the page cache tells how much of
3626 * the reservation was consumed. Private mappings are per-VMA and
3627 * only the consumed reservations are tracked. When the VMA
3628 * disappears, the original reservation is the VMA size and the
3629 * consumed reservations are stored in the map. Hence, nothing
3630 * else has to be done for private mappings here
3631 */
f83a275d 3632 if (!vma || vma->vm_flags & VM_MAYSHARE)
1406ec9b 3633 region_add(resv_map, from, to);
a43a8c39 3634 return 0;
c50ac050 3635out_err:
f031dd27
JK
3636 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3637 kref_put(&resv_map->refs, resv_map_release);
c50ac050 3638 return ret;
a43a8c39
CK
3639}
3640
3641void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3642{
a5516438 3643 struct hstate *h = hstate_inode(inode);
4e35f483 3644 struct resv_map *resv_map = inode_resv_map(inode);
9119a41e 3645 long chg = 0;
90481622 3646 struct hugepage_subpool *spool = subpool_inode(inode);
1c5ecae3 3647 long gbl_reserve;
45c682a6 3648
9119a41e 3649 if (resv_map)
1406ec9b 3650 chg = region_truncate(resv_map, offset);
45c682a6 3651 spin_lock(&inode->i_lock);
e4c6f8be 3652 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
45c682a6
KC
3653 spin_unlock(&inode->i_lock);
3654
1c5ecae3
MK
3655 /*
3656 * If the subpool has a minimum size, the number of global
3657 * reservations to be released may be adjusted.
3658 */
3659 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
3660 hugetlb_acct_memory(h, -gbl_reserve);
a43a8c39 3661}
93f70f90 3662
3212b535
SC
3663#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3664static unsigned long page_table_shareable(struct vm_area_struct *svma,
3665 struct vm_area_struct *vma,
3666 unsigned long addr, pgoff_t idx)
3667{
3668 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3669 svma->vm_start;
3670 unsigned long sbase = saddr & PUD_MASK;
3671 unsigned long s_end = sbase + PUD_SIZE;
3672
3673 /* Allow segments to share if only one is marked locked */
3674 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3675 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3676
3677 /*
3678 * match the virtual addresses, permission and the alignment of the
3679 * page table page.
3680 */
3681 if (pmd_index(addr) != pmd_index(saddr) ||
3682 vm_flags != svm_flags ||
3683 sbase < svma->vm_start || svma->vm_end < s_end)
3684 return 0;
3685
3686 return saddr;
3687}
3688
3689static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3690{
3691 unsigned long base = addr & PUD_MASK;
3692 unsigned long end = base + PUD_SIZE;
3693
3694 /*
3695 * check on proper vm_flags and page table alignment
3696 */
3697 if (vma->vm_flags & VM_MAYSHARE &&
3698 vma->vm_start <= base && end <= vma->vm_end)
3699 return 1;
3700 return 0;
3701}
3702
3703/*
3704 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3705 * and returns the corresponding pte. While this is not necessary for the
3706 * !shared pmd case because we can allocate the pmd later as well, it makes the
3707 * code much cleaner. pmd allocation is essential for the shared case because
c8c06efa 3708 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3212b535
SC
3709 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3710 * bad pmd for sharing.
3711 */
3712pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3713{
3714 struct vm_area_struct *vma = find_vma(mm, addr);
3715 struct address_space *mapping = vma->vm_file->f_mapping;
3716 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3717 vma->vm_pgoff;
3718 struct vm_area_struct *svma;
3719 unsigned long saddr;
3720 pte_t *spte = NULL;
3721 pte_t *pte;
cb900f41 3722 spinlock_t *ptl;
3212b535
SC
3723
3724 if (!vma_shareable(vma, addr))
3725 return (pte_t *)pmd_alloc(mm, pud, addr);
3726
83cde9e8 3727 i_mmap_lock_write(mapping);
3212b535
SC
3728 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3729 if (svma == vma)
3730 continue;
3731
3732 saddr = page_table_shareable(svma, vma, addr, idx);
3733 if (saddr) {
3734 spte = huge_pte_offset(svma->vm_mm, saddr);
3735 if (spte) {
dc6c9a35 3736 mm_inc_nr_pmds(mm);
3212b535
SC
3737 get_page(virt_to_page(spte));
3738 break;
3739 }
3740 }
3741 }
3742
3743 if (!spte)
3744 goto out;
3745
cb900f41
KS
3746 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3747 spin_lock(ptl);
dc6c9a35 3748 if (pud_none(*pud)) {
3212b535
SC
3749 pud_populate(mm, pud,
3750 (pmd_t *)((unsigned long)spte & PAGE_MASK));
dc6c9a35 3751 } else {
3212b535 3752 put_page(virt_to_page(spte));
dc6c9a35
KS
3753 mm_inc_nr_pmds(mm);
3754 }
cb900f41 3755 spin_unlock(ptl);
3212b535
SC
3756out:
3757 pte = (pte_t *)pmd_alloc(mm, pud, addr);
83cde9e8 3758 i_mmap_unlock_write(mapping);
3212b535
SC
3759 return pte;
3760}
3761
3762/*
3763 * unmap huge page backed by shared pte.
3764 *
3765 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
3766 * indicated by page_count > 1, unmap is achieved by clearing pud and
3767 * decrementing the ref count. If count == 1, the pte page is not shared.
3768 *
cb900f41 3769 * called with page table lock held.
3212b535
SC
3770 *
3771 * returns: 1 successfully unmapped a shared pte page
3772 * 0 the underlying pte page is not shared, or it is the last user
3773 */
3774int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3775{
3776 pgd_t *pgd = pgd_offset(mm, *addr);
3777 pud_t *pud = pud_offset(pgd, *addr);
3778
3779 BUG_ON(page_count(virt_to_page(ptep)) == 0);
3780 if (page_count(virt_to_page(ptep)) == 1)
3781 return 0;
3782
3783 pud_clear(pud);
3784 put_page(virt_to_page(ptep));
dc6c9a35 3785 mm_dec_nr_pmds(mm);
3212b535
SC
3786 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3787 return 1;
3788}
9e5fc74c
SC
3789#define want_pmd_share() (1)
3790#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3791pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3792{
3793 return NULL;
3794}
e81f2d22
ZZ
3795
3796int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3797{
3798 return 0;
3799}
9e5fc74c 3800#define want_pmd_share() (0)
3212b535
SC
3801#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3802
9e5fc74c
SC
3803#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3804pte_t *huge_pte_alloc(struct mm_struct *mm,
3805 unsigned long addr, unsigned long sz)
3806{
3807 pgd_t *pgd;
3808 pud_t *pud;
3809 pte_t *pte = NULL;
3810
3811 pgd = pgd_offset(mm, addr);
3812 pud = pud_alloc(mm, pgd, addr);
3813 if (pud) {
3814 if (sz == PUD_SIZE) {
3815 pte = (pte_t *)pud;
3816 } else {
3817 BUG_ON(sz != PMD_SIZE);
3818 if (want_pmd_share() && pud_none(*pud))
3819 pte = huge_pmd_share(mm, addr, pud);
3820 else
3821 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3822 }
3823 }
3824 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3825
3826 return pte;
3827}
3828
3829pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3830{
3831 pgd_t *pgd;
3832 pud_t *pud;
3833 pmd_t *pmd = NULL;
3834
3835 pgd = pgd_offset(mm, addr);
3836 if (pgd_present(*pgd)) {
3837 pud = pud_offset(pgd, addr);
3838 if (pud_present(*pud)) {
3839 if (pud_huge(*pud))
3840 return (pte_t *)pud;
3841 pmd = pmd_offset(pud, addr);
3842 }
3843 }
3844 return (pte_t *) pmd;
3845}
3846
61f77eda
NH
3847#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3848
3849/*
3850 * These functions are overwritable if your architecture needs its own
3851 * behavior.
3852 */
3853struct page * __weak
3854follow_huge_addr(struct mm_struct *mm, unsigned long address,
3855 int write)
3856{
3857 return ERR_PTR(-EINVAL);
3858}
3859
3860struct page * __weak
9e5fc74c 3861follow_huge_pmd(struct mm_struct *mm, unsigned long address,
e66f17ff 3862 pmd_t *pmd, int flags)
9e5fc74c 3863{
e66f17ff
NH
3864 struct page *page = NULL;
3865 spinlock_t *ptl;
3866retry:
3867 ptl = pmd_lockptr(mm, pmd);
3868 spin_lock(ptl);
3869 /*
3870 * make sure that the address range covered by this pmd is not
3871 * unmapped from other threads.
3872 */
3873 if (!pmd_huge(*pmd))
3874 goto out;
3875 if (pmd_present(*pmd)) {
97534127 3876 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
e66f17ff
NH
3877 if (flags & FOLL_GET)
3878 get_page(page);
3879 } else {
3880 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
3881 spin_unlock(ptl);
3882 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
3883 goto retry;
3884 }
3885 /*
3886 * hwpoisoned entry is treated as no_page_table in
3887 * follow_page_mask().
3888 */
3889 }
3890out:
3891 spin_unlock(ptl);
9e5fc74c
SC
3892 return page;
3893}
3894
61f77eda 3895struct page * __weak
9e5fc74c 3896follow_huge_pud(struct mm_struct *mm, unsigned long address,
e66f17ff 3897 pud_t *pud, int flags)
9e5fc74c 3898{
e66f17ff
NH
3899 if (flags & FOLL_GET)
3900 return NULL;
9e5fc74c 3901
e66f17ff 3902 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
9e5fc74c
SC
3903}
3904
d5bd9106
AK
3905#ifdef CONFIG_MEMORY_FAILURE
3906
93f70f90
NH
3907/*
3908 * This function is called from memory failure code.
3909 * Assume the caller holds page lock of the head page.
3910 */
6de2b1aa 3911int dequeue_hwpoisoned_huge_page(struct page *hpage)
93f70f90
NH
3912{
3913 struct hstate *h = page_hstate(hpage);
3914 int nid = page_to_nid(hpage);
6de2b1aa 3915 int ret = -EBUSY;
93f70f90
NH
3916
3917 spin_lock(&hugetlb_lock);
7e1f049e
NH
3918 /*
3919 * Just checking !page_huge_active is not enough, because that could be
3920 * an isolated/hwpoisoned hugepage (which have >0 refcount).
3921 */
3922 if (!page_huge_active(hpage) && !page_count(hpage)) {
56f2fb14
NH
3923 /*
3924 * Hwpoisoned hugepage isn't linked to activelist or freelist,
3925 * but dangling hpage->lru can trigger list-debug warnings
3926 * (this happens when we call unpoison_memory() on it),
3927 * so let it point to itself with list_del_init().
3928 */
3929 list_del_init(&hpage->lru);
8c6c2ecb 3930 set_page_refcounted(hpage);
6de2b1aa
NH
3931 h->free_huge_pages--;
3932 h->free_huge_pages_node[nid]--;
3933 ret = 0;
3934 }
93f70f90 3935 spin_unlock(&hugetlb_lock);
6de2b1aa 3936 return ret;
93f70f90 3937}
6de2b1aa 3938#endif
31caf665
NH
3939
3940bool isolate_huge_page(struct page *page, struct list_head *list)
3941{
bcc54222
NH
3942 bool ret = true;
3943
309381fe 3944 VM_BUG_ON_PAGE(!PageHead(page), page);
31caf665 3945 spin_lock(&hugetlb_lock);
bcc54222
NH
3946 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
3947 ret = false;
3948 goto unlock;
3949 }
3950 clear_page_huge_active(page);
31caf665 3951 list_move_tail(&page->lru, list);
bcc54222 3952unlock:
31caf665 3953 spin_unlock(&hugetlb_lock);
bcc54222 3954 return ret;
31caf665
NH
3955}
3956
3957void putback_active_hugepage(struct page *page)
3958{
309381fe 3959 VM_BUG_ON_PAGE(!PageHead(page), page);
31caf665 3960 spin_lock(&hugetlb_lock);
bcc54222 3961 set_page_huge_active(page);
31caf665
NH
3962 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3963 spin_unlock(&hugetlb_lock);
3964 put_page(page);
3965}