mm/hugetlb: add region_del() to delete a specific range of entries
[linux-2.6-block.git] / mm / hugetlb.c
... / ...
CommitLineData
1/*
2 * Generic hugetlb support.
3 * (C) Nadia Yvette Chambers, April 2004
4 */
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/mm.h>
9#include <linux/seq_file.h>
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <linux/mempolicy.h>
16#include <linux/compiler.h>
17#include <linux/cpuset.h>
18#include <linux/mutex.h>
19#include <linux/bootmem.h>
20#include <linux/sysfs.h>
21#include <linux/slab.h>
22#include <linux/rmap.h>
23#include <linux/swap.h>
24#include <linux/swapops.h>
25#include <linux/page-isolation.h>
26#include <linux/jhash.h>
27
28#include <asm/page.h>
29#include <asm/pgtable.h>
30#include <asm/tlb.h>
31
32#include <linux/io.h>
33#include <linux/hugetlb.h>
34#include <linux/hugetlb_cgroup.h>
35#include <linux/node.h>
36#include "internal.h"
37
38int hugepages_treat_as_movable;
39
40int hugetlb_max_hstate __read_mostly;
41unsigned int default_hstate_idx;
42struct hstate hstates[HUGE_MAX_HSTATE];
43/*
44 * Minimum page order among possible hugepage sizes, set to a proper value
45 * at boot time.
46 */
47static unsigned int minimum_order __read_mostly = UINT_MAX;
48
49__initdata LIST_HEAD(huge_boot_pages);
50
51/* for command line parsing */
52static struct hstate * __initdata parsed_hstate;
53static unsigned long __initdata default_hstate_max_huge_pages;
54static unsigned long __initdata default_hstate_size;
55
56/*
57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58 * free_huge_pages, and surplus_huge_pages.
59 */
60DEFINE_SPINLOCK(hugetlb_lock);
61
62/*
63 * Serializes faults on the same logical page. This is used to
64 * prevent spurious OOMs when the hugepage pool is fully utilized.
65 */
66static int num_fault_mutexes;
67static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69/* Forward declaration */
70static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73{
74 bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76 spin_unlock(&spool->lock);
77
78 /* If no pages are used, and no other handles to the subpool
79 * remain, give up any reservations mased on minimum size and
80 * free the subpool */
81 if (free) {
82 if (spool->min_hpages != -1)
83 hugetlb_acct_memory(spool->hstate,
84 -spool->min_hpages);
85 kfree(spool);
86 }
87}
88
89struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90 long min_hpages)
91{
92 struct hugepage_subpool *spool;
93
94 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95 if (!spool)
96 return NULL;
97
98 spin_lock_init(&spool->lock);
99 spool->count = 1;
100 spool->max_hpages = max_hpages;
101 spool->hstate = h;
102 spool->min_hpages = min_hpages;
103
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105 kfree(spool);
106 return NULL;
107 }
108 spool->rsv_hpages = min_hpages;
109
110 return spool;
111}
112
113void hugepage_put_subpool(struct hugepage_subpool *spool)
114{
115 spin_lock(&spool->lock);
116 BUG_ON(!spool->count);
117 spool->count--;
118 unlock_or_release_subpool(spool);
119}
120
121/*
122 * Subpool accounting for allocating and reserving pages.
123 * Return -ENOMEM if there are not enough resources to satisfy the
124 * the request. Otherwise, return the number of pages by which the
125 * global pools must be adjusted (upward). The returned value may
126 * only be different than the passed value (delta) in the case where
127 * a subpool minimum size must be manitained.
128 */
129static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130 long delta)
131{
132 long ret = delta;
133
134 if (!spool)
135 return ret;
136
137 spin_lock(&spool->lock);
138
139 if (spool->max_hpages != -1) { /* maximum size accounting */
140 if ((spool->used_hpages + delta) <= spool->max_hpages)
141 spool->used_hpages += delta;
142 else {
143 ret = -ENOMEM;
144 goto unlock_ret;
145 }
146 }
147
148 if (spool->min_hpages != -1) { /* minimum size accounting */
149 if (delta > spool->rsv_hpages) {
150 /*
151 * Asking for more reserves than those already taken on
152 * behalf of subpool. Return difference.
153 */
154 ret = delta - spool->rsv_hpages;
155 spool->rsv_hpages = 0;
156 } else {
157 ret = 0; /* reserves already accounted for */
158 spool->rsv_hpages -= delta;
159 }
160 }
161
162unlock_ret:
163 spin_unlock(&spool->lock);
164 return ret;
165}
166
167/*
168 * Subpool accounting for freeing and unreserving pages.
169 * Return the number of global page reservations that must be dropped.
170 * The return value may only be different than the passed value (delta)
171 * in the case where a subpool minimum size must be maintained.
172 */
173static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
174 long delta)
175{
176 long ret = delta;
177
178 if (!spool)
179 return delta;
180
181 spin_lock(&spool->lock);
182
183 if (spool->max_hpages != -1) /* maximum size accounting */
184 spool->used_hpages -= delta;
185
186 if (spool->min_hpages != -1) { /* minimum size accounting */
187 if (spool->rsv_hpages + delta <= spool->min_hpages)
188 ret = 0;
189 else
190 ret = spool->rsv_hpages + delta - spool->min_hpages;
191
192 spool->rsv_hpages += delta;
193 if (spool->rsv_hpages > spool->min_hpages)
194 spool->rsv_hpages = spool->min_hpages;
195 }
196
197 /*
198 * If hugetlbfs_put_super couldn't free spool due to an outstanding
199 * quota reference, free it now.
200 */
201 unlock_or_release_subpool(spool);
202
203 return ret;
204}
205
206static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
207{
208 return HUGETLBFS_SB(inode->i_sb)->spool;
209}
210
211static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
212{
213 return subpool_inode(file_inode(vma->vm_file));
214}
215
216/*
217 * Region tracking -- allows tracking of reservations and instantiated pages
218 * across the pages in a mapping.
219 *
220 * The region data structures are embedded into a resv_map and protected
221 * by a resv_map's lock. The set of regions within the resv_map represent
222 * reservations for huge pages, or huge pages that have already been
223 * instantiated within the map. The from and to elements are huge page
224 * indicies into the associated mapping. from indicates the starting index
225 * of the region. to represents the first index past the end of the region.
226 *
227 * For example, a file region structure with from == 0 and to == 4 represents
228 * four huge pages in a mapping. It is important to note that the to element
229 * represents the first element past the end of the region. This is used in
230 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
231 *
232 * Interval notation of the form [from, to) will be used to indicate that
233 * the endpoint from is inclusive and to is exclusive.
234 */
235struct file_region {
236 struct list_head link;
237 long from;
238 long to;
239};
240
241/*
242 * Add the huge page range represented by [f, t) to the reserve
243 * map. In the normal case, existing regions will be expanded
244 * to accommodate the specified range. Sufficient regions should
245 * exist for expansion due to the previous call to region_chg
246 * with the same range. However, it is possible that region_del
247 * could have been called after region_chg and modifed the map
248 * in such a way that no region exists to be expanded. In this
249 * case, pull a region descriptor from the cache associated with
250 * the map and use that for the new range.
251 *
252 * Return the number of new huge pages added to the map. This
253 * number is greater than or equal to zero.
254 */
255static long region_add(struct resv_map *resv, long f, long t)
256{
257 struct list_head *head = &resv->regions;
258 struct file_region *rg, *nrg, *trg;
259 long add = 0;
260
261 spin_lock(&resv->lock);
262 /* Locate the region we are either in or before. */
263 list_for_each_entry(rg, head, link)
264 if (f <= rg->to)
265 break;
266
267 /*
268 * If no region exists which can be expanded to include the
269 * specified range, the list must have been modified by an
270 * interleving call to region_del(). Pull a region descriptor
271 * from the cache and use it for this range.
272 */
273 if (&rg->link == head || t < rg->from) {
274 VM_BUG_ON(resv->region_cache_count <= 0);
275
276 resv->region_cache_count--;
277 nrg = list_first_entry(&resv->region_cache, struct file_region,
278 link);
279 list_del(&nrg->link);
280
281 nrg->from = f;
282 nrg->to = t;
283 list_add(&nrg->link, rg->link.prev);
284
285 add += t - f;
286 goto out_locked;
287 }
288
289 /* Round our left edge to the current segment if it encloses us. */
290 if (f > rg->from)
291 f = rg->from;
292
293 /* Check for and consume any regions we now overlap with. */
294 nrg = rg;
295 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
296 if (&rg->link == head)
297 break;
298 if (rg->from > t)
299 break;
300
301 /* If this area reaches higher then extend our area to
302 * include it completely. If this is not the first area
303 * which we intend to reuse, free it. */
304 if (rg->to > t)
305 t = rg->to;
306 if (rg != nrg) {
307 /* Decrement return value by the deleted range.
308 * Another range will span this area so that by
309 * end of routine add will be >= zero
310 */
311 add -= (rg->to - rg->from);
312 list_del(&rg->link);
313 kfree(rg);
314 }
315 }
316
317 add += (nrg->from - f); /* Added to beginning of region */
318 nrg->from = f;
319 add += t - nrg->to; /* Added to end of region */
320 nrg->to = t;
321
322out_locked:
323 resv->adds_in_progress--;
324 spin_unlock(&resv->lock);
325 VM_BUG_ON(add < 0);
326 return add;
327}
328
329/*
330 * Examine the existing reserve map and determine how many
331 * huge pages in the specified range [f, t) are NOT currently
332 * represented. This routine is called before a subsequent
333 * call to region_add that will actually modify the reserve
334 * map to add the specified range [f, t). region_chg does
335 * not change the number of huge pages represented by the
336 * map. However, if the existing regions in the map can not
337 * be expanded to represent the new range, a new file_region
338 * structure is added to the map as a placeholder. This is
339 * so that the subsequent region_add call will have all the
340 * regions it needs and will not fail.
341 *
342 * Upon entry, region_chg will also examine the cache of region descriptors
343 * associated with the map. If there are not enough descriptors cached, one
344 * will be allocated for the in progress add operation.
345 *
346 * Returns the number of huge pages that need to be added to the existing
347 * reservation map for the range [f, t). This number is greater or equal to
348 * zero. -ENOMEM is returned if a new file_region structure or cache entry
349 * is needed and can not be allocated.
350 */
351static long region_chg(struct resv_map *resv, long f, long t)
352{
353 struct list_head *head = &resv->regions;
354 struct file_region *rg, *nrg = NULL;
355 long chg = 0;
356
357retry:
358 spin_lock(&resv->lock);
359retry_locked:
360 resv->adds_in_progress++;
361
362 /*
363 * Check for sufficient descriptors in the cache to accommodate
364 * the number of in progress add operations.
365 */
366 if (resv->adds_in_progress > resv->region_cache_count) {
367 struct file_region *trg;
368
369 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
370 /* Must drop lock to allocate a new descriptor. */
371 resv->adds_in_progress--;
372 spin_unlock(&resv->lock);
373
374 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
375 if (!trg)
376 return -ENOMEM;
377
378 spin_lock(&resv->lock);
379 list_add(&trg->link, &resv->region_cache);
380 resv->region_cache_count++;
381 goto retry_locked;
382 }
383
384 /* Locate the region we are before or in. */
385 list_for_each_entry(rg, head, link)
386 if (f <= rg->to)
387 break;
388
389 /* If we are below the current region then a new region is required.
390 * Subtle, allocate a new region at the position but make it zero
391 * size such that we can guarantee to record the reservation. */
392 if (&rg->link == head || t < rg->from) {
393 if (!nrg) {
394 resv->adds_in_progress--;
395 spin_unlock(&resv->lock);
396 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
397 if (!nrg)
398 return -ENOMEM;
399
400 nrg->from = f;
401 nrg->to = f;
402 INIT_LIST_HEAD(&nrg->link);
403 goto retry;
404 }
405
406 list_add(&nrg->link, rg->link.prev);
407 chg = t - f;
408 goto out_nrg;
409 }
410
411 /* Round our left edge to the current segment if it encloses us. */
412 if (f > rg->from)
413 f = rg->from;
414 chg = t - f;
415
416 /* Check for and consume any regions we now overlap with. */
417 list_for_each_entry(rg, rg->link.prev, link) {
418 if (&rg->link == head)
419 break;
420 if (rg->from > t)
421 goto out;
422
423 /* We overlap with this area, if it extends further than
424 * us then we must extend ourselves. Account for its
425 * existing reservation. */
426 if (rg->to > t) {
427 chg += rg->to - t;
428 t = rg->to;
429 }
430 chg -= rg->to - rg->from;
431 }
432
433out:
434 spin_unlock(&resv->lock);
435 /* We already know we raced and no longer need the new region */
436 kfree(nrg);
437 return chg;
438out_nrg:
439 spin_unlock(&resv->lock);
440 return chg;
441}
442
443/*
444 * Abort the in progress add operation. The adds_in_progress field
445 * of the resv_map keeps track of the operations in progress between
446 * calls to region_chg and region_add. Operations are sometimes
447 * aborted after the call to region_chg. In such cases, region_abort
448 * is called to decrement the adds_in_progress counter.
449 *
450 * NOTE: The range arguments [f, t) are not needed or used in this
451 * routine. They are kept to make reading the calling code easier as
452 * arguments will match the associated region_chg call.
453 */
454static void region_abort(struct resv_map *resv, long f, long t)
455{
456 spin_lock(&resv->lock);
457 VM_BUG_ON(!resv->region_cache_count);
458 resv->adds_in_progress--;
459 spin_unlock(&resv->lock);
460}
461
462/*
463 * Delete the specified range [f, t) from the reserve map. If the
464 * t parameter is LONG_MAX, this indicates that ALL regions after f
465 * should be deleted. Locate the regions which intersect [f, t)
466 * and either trim, delete or split the existing regions.
467 *
468 * Returns the number of huge pages deleted from the reserve map.
469 * In the normal case, the return value is zero or more. In the
470 * case where a region must be split, a new region descriptor must
471 * be allocated. If the allocation fails, -ENOMEM will be returned.
472 * NOTE: If the parameter t == LONG_MAX, then we will never split
473 * a region and possibly return -ENOMEM. Callers specifying
474 * t == LONG_MAX do not need to check for -ENOMEM error.
475 */
476static long region_del(struct resv_map *resv, long f, long t)
477{
478 struct list_head *head = &resv->regions;
479 struct file_region *rg, *trg;
480 struct file_region *nrg = NULL;
481 long del = 0;
482
483retry:
484 spin_lock(&resv->lock);
485 list_for_each_entry_safe(rg, trg, head, link) {
486 if (rg->to <= f)
487 continue;
488 if (rg->from >= t)
489 break;
490
491 if (f > rg->from && t < rg->to) { /* Must split region */
492 /*
493 * Check for an entry in the cache before dropping
494 * lock and attempting allocation.
495 */
496 if (!nrg &&
497 resv->region_cache_count > resv->adds_in_progress) {
498 nrg = list_first_entry(&resv->region_cache,
499 struct file_region,
500 link);
501 list_del(&nrg->link);
502 resv->region_cache_count--;
503 }
504
505 if (!nrg) {
506 spin_unlock(&resv->lock);
507 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
508 if (!nrg)
509 return -ENOMEM;
510 goto retry;
511 }
512
513 del += t - f;
514
515 /* New entry for end of split region */
516 nrg->from = t;
517 nrg->to = rg->to;
518 INIT_LIST_HEAD(&nrg->link);
519
520 /* Original entry is trimmed */
521 rg->to = f;
522
523 list_add(&nrg->link, &rg->link);
524 nrg = NULL;
525 break;
526 }
527
528 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
529 del += rg->to - rg->from;
530 list_del(&rg->link);
531 kfree(rg);
532 continue;
533 }
534
535 if (f <= rg->from) { /* Trim beginning of region */
536 del += t - rg->from;
537 rg->from = t;
538 } else { /* Trim end of region */
539 del += rg->to - f;
540 rg->to = f;
541 }
542 }
543
544 spin_unlock(&resv->lock);
545 kfree(nrg);
546 return del;
547}
548
549/*
550 * Count and return the number of huge pages in the reserve map
551 * that intersect with the range [f, t).
552 */
553static long region_count(struct resv_map *resv, long f, long t)
554{
555 struct list_head *head = &resv->regions;
556 struct file_region *rg;
557 long chg = 0;
558
559 spin_lock(&resv->lock);
560 /* Locate each segment we overlap with, and count that overlap. */
561 list_for_each_entry(rg, head, link) {
562 long seg_from;
563 long seg_to;
564
565 if (rg->to <= f)
566 continue;
567 if (rg->from >= t)
568 break;
569
570 seg_from = max(rg->from, f);
571 seg_to = min(rg->to, t);
572
573 chg += seg_to - seg_from;
574 }
575 spin_unlock(&resv->lock);
576
577 return chg;
578}
579
580/*
581 * Convert the address within this vma to the page offset within
582 * the mapping, in pagecache page units; huge pages here.
583 */
584static pgoff_t vma_hugecache_offset(struct hstate *h,
585 struct vm_area_struct *vma, unsigned long address)
586{
587 return ((address - vma->vm_start) >> huge_page_shift(h)) +
588 (vma->vm_pgoff >> huge_page_order(h));
589}
590
591pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
592 unsigned long address)
593{
594 return vma_hugecache_offset(hstate_vma(vma), vma, address);
595}
596
597/*
598 * Return the size of the pages allocated when backing a VMA. In the majority
599 * cases this will be same size as used by the page table entries.
600 */
601unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
602{
603 struct hstate *hstate;
604
605 if (!is_vm_hugetlb_page(vma))
606 return PAGE_SIZE;
607
608 hstate = hstate_vma(vma);
609
610 return 1UL << huge_page_shift(hstate);
611}
612EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
613
614/*
615 * Return the page size being used by the MMU to back a VMA. In the majority
616 * of cases, the page size used by the kernel matches the MMU size. On
617 * architectures where it differs, an architecture-specific version of this
618 * function is required.
619 */
620#ifndef vma_mmu_pagesize
621unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
622{
623 return vma_kernel_pagesize(vma);
624}
625#endif
626
627/*
628 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
629 * bits of the reservation map pointer, which are always clear due to
630 * alignment.
631 */
632#define HPAGE_RESV_OWNER (1UL << 0)
633#define HPAGE_RESV_UNMAPPED (1UL << 1)
634#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
635
636/*
637 * These helpers are used to track how many pages are reserved for
638 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
639 * is guaranteed to have their future faults succeed.
640 *
641 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
642 * the reserve counters are updated with the hugetlb_lock held. It is safe
643 * to reset the VMA at fork() time as it is not in use yet and there is no
644 * chance of the global counters getting corrupted as a result of the values.
645 *
646 * The private mapping reservation is represented in a subtly different
647 * manner to a shared mapping. A shared mapping has a region map associated
648 * with the underlying file, this region map represents the backing file
649 * pages which have ever had a reservation assigned which this persists even
650 * after the page is instantiated. A private mapping has a region map
651 * associated with the original mmap which is attached to all VMAs which
652 * reference it, this region map represents those offsets which have consumed
653 * reservation ie. where pages have been instantiated.
654 */
655static unsigned long get_vma_private_data(struct vm_area_struct *vma)
656{
657 return (unsigned long)vma->vm_private_data;
658}
659
660static void set_vma_private_data(struct vm_area_struct *vma,
661 unsigned long value)
662{
663 vma->vm_private_data = (void *)value;
664}
665
666struct resv_map *resv_map_alloc(void)
667{
668 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
669 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
670
671 if (!resv_map || !rg) {
672 kfree(resv_map);
673 kfree(rg);
674 return NULL;
675 }
676
677 kref_init(&resv_map->refs);
678 spin_lock_init(&resv_map->lock);
679 INIT_LIST_HEAD(&resv_map->regions);
680
681 resv_map->adds_in_progress = 0;
682
683 INIT_LIST_HEAD(&resv_map->region_cache);
684 list_add(&rg->link, &resv_map->region_cache);
685 resv_map->region_cache_count = 1;
686
687 return resv_map;
688}
689
690void resv_map_release(struct kref *ref)
691{
692 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
693 struct list_head *head = &resv_map->region_cache;
694 struct file_region *rg, *trg;
695
696 /* Clear out any active regions before we release the map. */
697 region_del(resv_map, 0, LONG_MAX);
698
699 /* ... and any entries left in the cache */
700 list_for_each_entry_safe(rg, trg, head, link) {
701 list_del(&rg->link);
702 kfree(rg);
703 }
704
705 VM_BUG_ON(resv_map->adds_in_progress);
706
707 kfree(resv_map);
708}
709
710static inline struct resv_map *inode_resv_map(struct inode *inode)
711{
712 return inode->i_mapping->private_data;
713}
714
715static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
716{
717 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
718 if (vma->vm_flags & VM_MAYSHARE) {
719 struct address_space *mapping = vma->vm_file->f_mapping;
720 struct inode *inode = mapping->host;
721
722 return inode_resv_map(inode);
723
724 } else {
725 return (struct resv_map *)(get_vma_private_data(vma) &
726 ~HPAGE_RESV_MASK);
727 }
728}
729
730static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
731{
732 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
733 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
734
735 set_vma_private_data(vma, (get_vma_private_data(vma) &
736 HPAGE_RESV_MASK) | (unsigned long)map);
737}
738
739static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
740{
741 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
742 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
743
744 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
745}
746
747static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
748{
749 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
750
751 return (get_vma_private_data(vma) & flag) != 0;
752}
753
754/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
755void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
756{
757 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
758 if (!(vma->vm_flags & VM_MAYSHARE))
759 vma->vm_private_data = (void *)0;
760}
761
762/* Returns true if the VMA has associated reserve pages */
763static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
764{
765 if (vma->vm_flags & VM_NORESERVE) {
766 /*
767 * This address is already reserved by other process(chg == 0),
768 * so, we should decrement reserved count. Without decrementing,
769 * reserve count remains after releasing inode, because this
770 * allocated page will go into page cache and is regarded as
771 * coming from reserved pool in releasing step. Currently, we
772 * don't have any other solution to deal with this situation
773 * properly, so add work-around here.
774 */
775 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
776 return true;
777 else
778 return false;
779 }
780
781 /* Shared mappings always use reserves */
782 if (vma->vm_flags & VM_MAYSHARE)
783 return true;
784
785 /*
786 * Only the process that called mmap() has reserves for
787 * private mappings.
788 */
789 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
790 return true;
791
792 return false;
793}
794
795static void enqueue_huge_page(struct hstate *h, struct page *page)
796{
797 int nid = page_to_nid(page);
798 list_move(&page->lru, &h->hugepage_freelists[nid]);
799 h->free_huge_pages++;
800 h->free_huge_pages_node[nid]++;
801}
802
803static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
804{
805 struct page *page;
806
807 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
808 if (!is_migrate_isolate_page(page))
809 break;
810 /*
811 * if 'non-isolated free hugepage' not found on the list,
812 * the allocation fails.
813 */
814 if (&h->hugepage_freelists[nid] == &page->lru)
815 return NULL;
816 list_move(&page->lru, &h->hugepage_activelist);
817 set_page_refcounted(page);
818 h->free_huge_pages--;
819 h->free_huge_pages_node[nid]--;
820 return page;
821}
822
823/* Movability of hugepages depends on migration support. */
824static inline gfp_t htlb_alloc_mask(struct hstate *h)
825{
826 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
827 return GFP_HIGHUSER_MOVABLE;
828 else
829 return GFP_HIGHUSER;
830}
831
832static struct page *dequeue_huge_page_vma(struct hstate *h,
833 struct vm_area_struct *vma,
834 unsigned long address, int avoid_reserve,
835 long chg)
836{
837 struct page *page = NULL;
838 struct mempolicy *mpol;
839 nodemask_t *nodemask;
840 struct zonelist *zonelist;
841 struct zone *zone;
842 struct zoneref *z;
843 unsigned int cpuset_mems_cookie;
844
845 /*
846 * A child process with MAP_PRIVATE mappings created by their parent
847 * have no page reserves. This check ensures that reservations are
848 * not "stolen". The child may still get SIGKILLed
849 */
850 if (!vma_has_reserves(vma, chg) &&
851 h->free_huge_pages - h->resv_huge_pages == 0)
852 goto err;
853
854 /* If reserves cannot be used, ensure enough pages are in the pool */
855 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
856 goto err;
857
858retry_cpuset:
859 cpuset_mems_cookie = read_mems_allowed_begin();
860 zonelist = huge_zonelist(vma, address,
861 htlb_alloc_mask(h), &mpol, &nodemask);
862
863 for_each_zone_zonelist_nodemask(zone, z, zonelist,
864 MAX_NR_ZONES - 1, nodemask) {
865 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
866 page = dequeue_huge_page_node(h, zone_to_nid(zone));
867 if (page) {
868 if (avoid_reserve)
869 break;
870 if (!vma_has_reserves(vma, chg))
871 break;
872
873 SetPagePrivate(page);
874 h->resv_huge_pages--;
875 break;
876 }
877 }
878 }
879
880 mpol_cond_put(mpol);
881 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
882 goto retry_cpuset;
883 return page;
884
885err:
886 return NULL;
887}
888
889/*
890 * common helper functions for hstate_next_node_to_{alloc|free}.
891 * We may have allocated or freed a huge page based on a different
892 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
893 * be outside of *nodes_allowed. Ensure that we use an allowed
894 * node for alloc or free.
895 */
896static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
897{
898 nid = next_node(nid, *nodes_allowed);
899 if (nid == MAX_NUMNODES)
900 nid = first_node(*nodes_allowed);
901 VM_BUG_ON(nid >= MAX_NUMNODES);
902
903 return nid;
904}
905
906static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
907{
908 if (!node_isset(nid, *nodes_allowed))
909 nid = next_node_allowed(nid, nodes_allowed);
910 return nid;
911}
912
913/*
914 * returns the previously saved node ["this node"] from which to
915 * allocate a persistent huge page for the pool and advance the
916 * next node from which to allocate, handling wrap at end of node
917 * mask.
918 */
919static int hstate_next_node_to_alloc(struct hstate *h,
920 nodemask_t *nodes_allowed)
921{
922 int nid;
923
924 VM_BUG_ON(!nodes_allowed);
925
926 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
927 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
928
929 return nid;
930}
931
932/*
933 * helper for free_pool_huge_page() - return the previously saved
934 * node ["this node"] from which to free a huge page. Advance the
935 * next node id whether or not we find a free huge page to free so
936 * that the next attempt to free addresses the next node.
937 */
938static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
939{
940 int nid;
941
942 VM_BUG_ON(!nodes_allowed);
943
944 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
945 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
946
947 return nid;
948}
949
950#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
951 for (nr_nodes = nodes_weight(*mask); \
952 nr_nodes > 0 && \
953 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
954 nr_nodes--)
955
956#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
957 for (nr_nodes = nodes_weight(*mask); \
958 nr_nodes > 0 && \
959 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
960 nr_nodes--)
961
962#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
963static void destroy_compound_gigantic_page(struct page *page,
964 unsigned long order)
965{
966 int i;
967 int nr_pages = 1 << order;
968 struct page *p = page + 1;
969
970 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
971 __ClearPageTail(p);
972 set_page_refcounted(p);
973 p->first_page = NULL;
974 }
975
976 set_compound_order(page, 0);
977 __ClearPageHead(page);
978}
979
980static void free_gigantic_page(struct page *page, unsigned order)
981{
982 free_contig_range(page_to_pfn(page), 1 << order);
983}
984
985static int __alloc_gigantic_page(unsigned long start_pfn,
986 unsigned long nr_pages)
987{
988 unsigned long end_pfn = start_pfn + nr_pages;
989 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
990}
991
992static bool pfn_range_valid_gigantic(unsigned long start_pfn,
993 unsigned long nr_pages)
994{
995 unsigned long i, end_pfn = start_pfn + nr_pages;
996 struct page *page;
997
998 for (i = start_pfn; i < end_pfn; i++) {
999 if (!pfn_valid(i))
1000 return false;
1001
1002 page = pfn_to_page(i);
1003
1004 if (PageReserved(page))
1005 return false;
1006
1007 if (page_count(page) > 0)
1008 return false;
1009
1010 if (PageHuge(page))
1011 return false;
1012 }
1013
1014 return true;
1015}
1016
1017static bool zone_spans_last_pfn(const struct zone *zone,
1018 unsigned long start_pfn, unsigned long nr_pages)
1019{
1020 unsigned long last_pfn = start_pfn + nr_pages - 1;
1021 return zone_spans_pfn(zone, last_pfn);
1022}
1023
1024static struct page *alloc_gigantic_page(int nid, unsigned order)
1025{
1026 unsigned long nr_pages = 1 << order;
1027 unsigned long ret, pfn, flags;
1028 struct zone *z;
1029
1030 z = NODE_DATA(nid)->node_zones;
1031 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1032 spin_lock_irqsave(&z->lock, flags);
1033
1034 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1035 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1036 if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1037 /*
1038 * We release the zone lock here because
1039 * alloc_contig_range() will also lock the zone
1040 * at some point. If there's an allocation
1041 * spinning on this lock, it may win the race
1042 * and cause alloc_contig_range() to fail...
1043 */
1044 spin_unlock_irqrestore(&z->lock, flags);
1045 ret = __alloc_gigantic_page(pfn, nr_pages);
1046 if (!ret)
1047 return pfn_to_page(pfn);
1048 spin_lock_irqsave(&z->lock, flags);
1049 }
1050 pfn += nr_pages;
1051 }
1052
1053 spin_unlock_irqrestore(&z->lock, flags);
1054 }
1055
1056 return NULL;
1057}
1058
1059static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1060static void prep_compound_gigantic_page(struct page *page, unsigned long order);
1061
1062static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1063{
1064 struct page *page;
1065
1066 page = alloc_gigantic_page(nid, huge_page_order(h));
1067 if (page) {
1068 prep_compound_gigantic_page(page, huge_page_order(h));
1069 prep_new_huge_page(h, page, nid);
1070 }
1071
1072 return page;
1073}
1074
1075static int alloc_fresh_gigantic_page(struct hstate *h,
1076 nodemask_t *nodes_allowed)
1077{
1078 struct page *page = NULL;
1079 int nr_nodes, node;
1080
1081 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1082 page = alloc_fresh_gigantic_page_node(h, node);
1083 if (page)
1084 return 1;
1085 }
1086
1087 return 0;
1088}
1089
1090static inline bool gigantic_page_supported(void) { return true; }
1091#else
1092static inline bool gigantic_page_supported(void) { return false; }
1093static inline void free_gigantic_page(struct page *page, unsigned order) { }
1094static inline void destroy_compound_gigantic_page(struct page *page,
1095 unsigned long order) { }
1096static inline int alloc_fresh_gigantic_page(struct hstate *h,
1097 nodemask_t *nodes_allowed) { return 0; }
1098#endif
1099
1100static void update_and_free_page(struct hstate *h, struct page *page)
1101{
1102 int i;
1103
1104 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1105 return;
1106
1107 h->nr_huge_pages--;
1108 h->nr_huge_pages_node[page_to_nid(page)]--;
1109 for (i = 0; i < pages_per_huge_page(h); i++) {
1110 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1111 1 << PG_referenced | 1 << PG_dirty |
1112 1 << PG_active | 1 << PG_private |
1113 1 << PG_writeback);
1114 }
1115 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1116 set_compound_page_dtor(page, NULL);
1117 set_page_refcounted(page);
1118 if (hstate_is_gigantic(h)) {
1119 destroy_compound_gigantic_page(page, huge_page_order(h));
1120 free_gigantic_page(page, huge_page_order(h));
1121 } else {
1122 __free_pages(page, huge_page_order(h));
1123 }
1124}
1125
1126struct hstate *size_to_hstate(unsigned long size)
1127{
1128 struct hstate *h;
1129
1130 for_each_hstate(h) {
1131 if (huge_page_size(h) == size)
1132 return h;
1133 }
1134 return NULL;
1135}
1136
1137/*
1138 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1139 * to hstate->hugepage_activelist.)
1140 *
1141 * This function can be called for tail pages, but never returns true for them.
1142 */
1143bool page_huge_active(struct page *page)
1144{
1145 VM_BUG_ON_PAGE(!PageHuge(page), page);
1146 return PageHead(page) && PagePrivate(&page[1]);
1147}
1148
1149/* never called for tail page */
1150static void set_page_huge_active(struct page *page)
1151{
1152 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1153 SetPagePrivate(&page[1]);
1154}
1155
1156static void clear_page_huge_active(struct page *page)
1157{
1158 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1159 ClearPagePrivate(&page[1]);
1160}
1161
1162void free_huge_page(struct page *page)
1163{
1164 /*
1165 * Can't pass hstate in here because it is called from the
1166 * compound page destructor.
1167 */
1168 struct hstate *h = page_hstate(page);
1169 int nid = page_to_nid(page);
1170 struct hugepage_subpool *spool =
1171 (struct hugepage_subpool *)page_private(page);
1172 bool restore_reserve;
1173
1174 set_page_private(page, 0);
1175 page->mapping = NULL;
1176 BUG_ON(page_count(page));
1177 BUG_ON(page_mapcount(page));
1178 restore_reserve = PagePrivate(page);
1179 ClearPagePrivate(page);
1180
1181 /*
1182 * A return code of zero implies that the subpool will be under its
1183 * minimum size if the reservation is not restored after page is free.
1184 * Therefore, force restore_reserve operation.
1185 */
1186 if (hugepage_subpool_put_pages(spool, 1) == 0)
1187 restore_reserve = true;
1188
1189 spin_lock(&hugetlb_lock);
1190 clear_page_huge_active(page);
1191 hugetlb_cgroup_uncharge_page(hstate_index(h),
1192 pages_per_huge_page(h), page);
1193 if (restore_reserve)
1194 h->resv_huge_pages++;
1195
1196 if (h->surplus_huge_pages_node[nid]) {
1197 /* remove the page from active list */
1198 list_del(&page->lru);
1199 update_and_free_page(h, page);
1200 h->surplus_huge_pages--;
1201 h->surplus_huge_pages_node[nid]--;
1202 } else {
1203 arch_clear_hugepage_flags(page);
1204 enqueue_huge_page(h, page);
1205 }
1206 spin_unlock(&hugetlb_lock);
1207}
1208
1209static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1210{
1211 INIT_LIST_HEAD(&page->lru);
1212 set_compound_page_dtor(page, free_huge_page);
1213 spin_lock(&hugetlb_lock);
1214 set_hugetlb_cgroup(page, NULL);
1215 h->nr_huge_pages++;
1216 h->nr_huge_pages_node[nid]++;
1217 spin_unlock(&hugetlb_lock);
1218 put_page(page); /* free it into the hugepage allocator */
1219}
1220
1221static void prep_compound_gigantic_page(struct page *page, unsigned long order)
1222{
1223 int i;
1224 int nr_pages = 1 << order;
1225 struct page *p = page + 1;
1226
1227 /* we rely on prep_new_huge_page to set the destructor */
1228 set_compound_order(page, order);
1229 __SetPageHead(page);
1230 __ClearPageReserved(page);
1231 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1232 /*
1233 * For gigantic hugepages allocated through bootmem at
1234 * boot, it's safer to be consistent with the not-gigantic
1235 * hugepages and clear the PG_reserved bit from all tail pages
1236 * too. Otherwse drivers using get_user_pages() to access tail
1237 * pages may get the reference counting wrong if they see
1238 * PG_reserved set on a tail page (despite the head page not
1239 * having PG_reserved set). Enforcing this consistency between
1240 * head and tail pages allows drivers to optimize away a check
1241 * on the head page when they need know if put_page() is needed
1242 * after get_user_pages().
1243 */
1244 __ClearPageReserved(p);
1245 set_page_count(p, 0);
1246 p->first_page = page;
1247 /* Make sure p->first_page is always valid for PageTail() */
1248 smp_wmb();
1249 __SetPageTail(p);
1250 }
1251}
1252
1253/*
1254 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1255 * transparent huge pages. See the PageTransHuge() documentation for more
1256 * details.
1257 */
1258int PageHuge(struct page *page)
1259{
1260 if (!PageCompound(page))
1261 return 0;
1262
1263 page = compound_head(page);
1264 return get_compound_page_dtor(page) == free_huge_page;
1265}
1266EXPORT_SYMBOL_GPL(PageHuge);
1267
1268/*
1269 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1270 * normal or transparent huge pages.
1271 */
1272int PageHeadHuge(struct page *page_head)
1273{
1274 if (!PageHead(page_head))
1275 return 0;
1276
1277 return get_compound_page_dtor(page_head) == free_huge_page;
1278}
1279
1280pgoff_t __basepage_index(struct page *page)
1281{
1282 struct page *page_head = compound_head(page);
1283 pgoff_t index = page_index(page_head);
1284 unsigned long compound_idx;
1285
1286 if (!PageHuge(page_head))
1287 return page_index(page);
1288
1289 if (compound_order(page_head) >= MAX_ORDER)
1290 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1291 else
1292 compound_idx = page - page_head;
1293
1294 return (index << compound_order(page_head)) + compound_idx;
1295}
1296
1297static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1298{
1299 struct page *page;
1300
1301 page = alloc_pages_exact_node(nid,
1302 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1303 __GFP_REPEAT|__GFP_NOWARN,
1304 huge_page_order(h));
1305 if (page) {
1306 prep_new_huge_page(h, page, nid);
1307 }
1308
1309 return page;
1310}
1311
1312static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1313{
1314 struct page *page;
1315 int nr_nodes, node;
1316 int ret = 0;
1317
1318 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1319 page = alloc_fresh_huge_page_node(h, node);
1320 if (page) {
1321 ret = 1;
1322 break;
1323 }
1324 }
1325
1326 if (ret)
1327 count_vm_event(HTLB_BUDDY_PGALLOC);
1328 else
1329 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1330
1331 return ret;
1332}
1333
1334/*
1335 * Free huge page from pool from next node to free.
1336 * Attempt to keep persistent huge pages more or less
1337 * balanced over allowed nodes.
1338 * Called with hugetlb_lock locked.
1339 */
1340static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1341 bool acct_surplus)
1342{
1343 int nr_nodes, node;
1344 int ret = 0;
1345
1346 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1347 /*
1348 * If we're returning unused surplus pages, only examine
1349 * nodes with surplus pages.
1350 */
1351 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1352 !list_empty(&h->hugepage_freelists[node])) {
1353 struct page *page =
1354 list_entry(h->hugepage_freelists[node].next,
1355 struct page, lru);
1356 list_del(&page->lru);
1357 h->free_huge_pages--;
1358 h->free_huge_pages_node[node]--;
1359 if (acct_surplus) {
1360 h->surplus_huge_pages--;
1361 h->surplus_huge_pages_node[node]--;
1362 }
1363 update_and_free_page(h, page);
1364 ret = 1;
1365 break;
1366 }
1367 }
1368
1369 return ret;
1370}
1371
1372/*
1373 * Dissolve a given free hugepage into free buddy pages. This function does
1374 * nothing for in-use (including surplus) hugepages.
1375 */
1376static void dissolve_free_huge_page(struct page *page)
1377{
1378 spin_lock(&hugetlb_lock);
1379 if (PageHuge(page) && !page_count(page)) {
1380 struct hstate *h = page_hstate(page);
1381 int nid = page_to_nid(page);
1382 list_del(&page->lru);
1383 h->free_huge_pages--;
1384 h->free_huge_pages_node[nid]--;
1385 update_and_free_page(h, page);
1386 }
1387 spin_unlock(&hugetlb_lock);
1388}
1389
1390/*
1391 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1392 * make specified memory blocks removable from the system.
1393 * Note that start_pfn should aligned with (minimum) hugepage size.
1394 */
1395void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1396{
1397 unsigned long pfn;
1398
1399 if (!hugepages_supported())
1400 return;
1401
1402 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1403 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1404 dissolve_free_huge_page(pfn_to_page(pfn));
1405}
1406
1407static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1408{
1409 struct page *page;
1410 unsigned int r_nid;
1411
1412 if (hstate_is_gigantic(h))
1413 return NULL;
1414
1415 /*
1416 * Assume we will successfully allocate the surplus page to
1417 * prevent racing processes from causing the surplus to exceed
1418 * overcommit
1419 *
1420 * This however introduces a different race, where a process B
1421 * tries to grow the static hugepage pool while alloc_pages() is
1422 * called by process A. B will only examine the per-node
1423 * counters in determining if surplus huge pages can be
1424 * converted to normal huge pages in adjust_pool_surplus(). A
1425 * won't be able to increment the per-node counter, until the
1426 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1427 * no more huge pages can be converted from surplus to normal
1428 * state (and doesn't try to convert again). Thus, we have a
1429 * case where a surplus huge page exists, the pool is grown, and
1430 * the surplus huge page still exists after, even though it
1431 * should just have been converted to a normal huge page. This
1432 * does not leak memory, though, as the hugepage will be freed
1433 * once it is out of use. It also does not allow the counters to
1434 * go out of whack in adjust_pool_surplus() as we don't modify
1435 * the node values until we've gotten the hugepage and only the
1436 * per-node value is checked there.
1437 */
1438 spin_lock(&hugetlb_lock);
1439 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1440 spin_unlock(&hugetlb_lock);
1441 return NULL;
1442 } else {
1443 h->nr_huge_pages++;
1444 h->surplus_huge_pages++;
1445 }
1446 spin_unlock(&hugetlb_lock);
1447
1448 if (nid == NUMA_NO_NODE)
1449 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1450 __GFP_REPEAT|__GFP_NOWARN,
1451 huge_page_order(h));
1452 else
1453 page = alloc_pages_exact_node(nid,
1454 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1455 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1456
1457 spin_lock(&hugetlb_lock);
1458 if (page) {
1459 INIT_LIST_HEAD(&page->lru);
1460 r_nid = page_to_nid(page);
1461 set_compound_page_dtor(page, free_huge_page);
1462 set_hugetlb_cgroup(page, NULL);
1463 /*
1464 * We incremented the global counters already
1465 */
1466 h->nr_huge_pages_node[r_nid]++;
1467 h->surplus_huge_pages_node[r_nid]++;
1468 __count_vm_event(HTLB_BUDDY_PGALLOC);
1469 } else {
1470 h->nr_huge_pages--;
1471 h->surplus_huge_pages--;
1472 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1473 }
1474 spin_unlock(&hugetlb_lock);
1475
1476 return page;
1477}
1478
1479/*
1480 * This allocation function is useful in the context where vma is irrelevant.
1481 * E.g. soft-offlining uses this function because it only cares physical
1482 * address of error page.
1483 */
1484struct page *alloc_huge_page_node(struct hstate *h, int nid)
1485{
1486 struct page *page = NULL;
1487
1488 spin_lock(&hugetlb_lock);
1489 if (h->free_huge_pages - h->resv_huge_pages > 0)
1490 page = dequeue_huge_page_node(h, nid);
1491 spin_unlock(&hugetlb_lock);
1492
1493 if (!page)
1494 page = alloc_buddy_huge_page(h, nid);
1495
1496 return page;
1497}
1498
1499/*
1500 * Increase the hugetlb pool such that it can accommodate a reservation
1501 * of size 'delta'.
1502 */
1503static int gather_surplus_pages(struct hstate *h, int delta)
1504{
1505 struct list_head surplus_list;
1506 struct page *page, *tmp;
1507 int ret, i;
1508 int needed, allocated;
1509 bool alloc_ok = true;
1510
1511 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1512 if (needed <= 0) {
1513 h->resv_huge_pages += delta;
1514 return 0;
1515 }
1516
1517 allocated = 0;
1518 INIT_LIST_HEAD(&surplus_list);
1519
1520 ret = -ENOMEM;
1521retry:
1522 spin_unlock(&hugetlb_lock);
1523 for (i = 0; i < needed; i++) {
1524 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1525 if (!page) {
1526 alloc_ok = false;
1527 break;
1528 }
1529 list_add(&page->lru, &surplus_list);
1530 }
1531 allocated += i;
1532
1533 /*
1534 * After retaking hugetlb_lock, we need to recalculate 'needed'
1535 * because either resv_huge_pages or free_huge_pages may have changed.
1536 */
1537 spin_lock(&hugetlb_lock);
1538 needed = (h->resv_huge_pages + delta) -
1539 (h->free_huge_pages + allocated);
1540 if (needed > 0) {
1541 if (alloc_ok)
1542 goto retry;
1543 /*
1544 * We were not able to allocate enough pages to
1545 * satisfy the entire reservation so we free what
1546 * we've allocated so far.
1547 */
1548 goto free;
1549 }
1550 /*
1551 * The surplus_list now contains _at_least_ the number of extra pages
1552 * needed to accommodate the reservation. Add the appropriate number
1553 * of pages to the hugetlb pool and free the extras back to the buddy
1554 * allocator. Commit the entire reservation here to prevent another
1555 * process from stealing the pages as they are added to the pool but
1556 * before they are reserved.
1557 */
1558 needed += allocated;
1559 h->resv_huge_pages += delta;
1560 ret = 0;
1561
1562 /* Free the needed pages to the hugetlb pool */
1563 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1564 if ((--needed) < 0)
1565 break;
1566 /*
1567 * This page is now managed by the hugetlb allocator and has
1568 * no users -- drop the buddy allocator's reference.
1569 */
1570 put_page_testzero(page);
1571 VM_BUG_ON_PAGE(page_count(page), page);
1572 enqueue_huge_page(h, page);
1573 }
1574free:
1575 spin_unlock(&hugetlb_lock);
1576
1577 /* Free unnecessary surplus pages to the buddy allocator */
1578 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1579 put_page(page);
1580 spin_lock(&hugetlb_lock);
1581
1582 return ret;
1583}
1584
1585/*
1586 * When releasing a hugetlb pool reservation, any surplus pages that were
1587 * allocated to satisfy the reservation must be explicitly freed if they were
1588 * never used.
1589 * Called with hugetlb_lock held.
1590 */
1591static void return_unused_surplus_pages(struct hstate *h,
1592 unsigned long unused_resv_pages)
1593{
1594 unsigned long nr_pages;
1595
1596 /* Uncommit the reservation */
1597 h->resv_huge_pages -= unused_resv_pages;
1598
1599 /* Cannot return gigantic pages currently */
1600 if (hstate_is_gigantic(h))
1601 return;
1602
1603 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1604
1605 /*
1606 * We want to release as many surplus pages as possible, spread
1607 * evenly across all nodes with memory. Iterate across these nodes
1608 * until we can no longer free unreserved surplus pages. This occurs
1609 * when the nodes with surplus pages have no free pages.
1610 * free_pool_huge_page() will balance the the freed pages across the
1611 * on-line nodes with memory and will handle the hstate accounting.
1612 */
1613 while (nr_pages--) {
1614 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1615 break;
1616 cond_resched_lock(&hugetlb_lock);
1617 }
1618}
1619
1620
1621/*
1622 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1623 * are used by the huge page allocation routines to manage reservations.
1624 *
1625 * vma_needs_reservation is called to determine if the huge page at addr
1626 * within the vma has an associated reservation. If a reservation is
1627 * needed, the value 1 is returned. The caller is then responsible for
1628 * managing the global reservation and subpool usage counts. After
1629 * the huge page has been allocated, vma_commit_reservation is called
1630 * to add the page to the reservation map. If the page allocation fails,
1631 * the reservation must be ended instead of committed. vma_end_reservation
1632 * is called in such cases.
1633 *
1634 * In the normal case, vma_commit_reservation returns the same value
1635 * as the preceding vma_needs_reservation call. The only time this
1636 * is not the case is if a reserve map was changed between calls. It
1637 * is the responsibility of the caller to notice the difference and
1638 * take appropriate action.
1639 */
1640enum vma_resv_mode {
1641 VMA_NEEDS_RESV,
1642 VMA_COMMIT_RESV,
1643 VMA_END_RESV,
1644};
1645static long __vma_reservation_common(struct hstate *h,
1646 struct vm_area_struct *vma, unsigned long addr,
1647 enum vma_resv_mode mode)
1648{
1649 struct resv_map *resv;
1650 pgoff_t idx;
1651 long ret;
1652
1653 resv = vma_resv_map(vma);
1654 if (!resv)
1655 return 1;
1656
1657 idx = vma_hugecache_offset(h, vma, addr);
1658 switch (mode) {
1659 case VMA_NEEDS_RESV:
1660 ret = region_chg(resv, idx, idx + 1);
1661 break;
1662 case VMA_COMMIT_RESV:
1663 ret = region_add(resv, idx, idx + 1);
1664 break;
1665 case VMA_END_RESV:
1666 region_abort(resv, idx, idx + 1);
1667 ret = 0;
1668 break;
1669 default:
1670 BUG();
1671 }
1672
1673 if (vma->vm_flags & VM_MAYSHARE)
1674 return ret;
1675 else
1676 return ret < 0 ? ret : 0;
1677}
1678
1679static long vma_needs_reservation(struct hstate *h,
1680 struct vm_area_struct *vma, unsigned long addr)
1681{
1682 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1683}
1684
1685static long vma_commit_reservation(struct hstate *h,
1686 struct vm_area_struct *vma, unsigned long addr)
1687{
1688 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1689}
1690
1691static void vma_end_reservation(struct hstate *h,
1692 struct vm_area_struct *vma, unsigned long addr)
1693{
1694 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1695}
1696
1697static struct page *alloc_huge_page(struct vm_area_struct *vma,
1698 unsigned long addr, int avoid_reserve)
1699{
1700 struct hugepage_subpool *spool = subpool_vma(vma);
1701 struct hstate *h = hstate_vma(vma);
1702 struct page *page;
1703 long chg, commit;
1704 int ret, idx;
1705 struct hugetlb_cgroup *h_cg;
1706
1707 idx = hstate_index(h);
1708 /*
1709 * Processes that did not create the mapping will have no
1710 * reserves and will not have accounted against subpool
1711 * limit. Check that the subpool limit can be made before
1712 * satisfying the allocation MAP_NORESERVE mappings may also
1713 * need pages and subpool limit allocated allocated if no reserve
1714 * mapping overlaps.
1715 */
1716 chg = vma_needs_reservation(h, vma, addr);
1717 if (chg < 0)
1718 return ERR_PTR(-ENOMEM);
1719 if (chg || avoid_reserve)
1720 if (hugepage_subpool_get_pages(spool, 1) < 0) {
1721 vma_end_reservation(h, vma, addr);
1722 return ERR_PTR(-ENOSPC);
1723 }
1724
1725 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1726 if (ret)
1727 goto out_subpool_put;
1728
1729 spin_lock(&hugetlb_lock);
1730 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1731 if (!page) {
1732 spin_unlock(&hugetlb_lock);
1733 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1734 if (!page)
1735 goto out_uncharge_cgroup;
1736
1737 spin_lock(&hugetlb_lock);
1738 list_move(&page->lru, &h->hugepage_activelist);
1739 /* Fall through */
1740 }
1741 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1742 spin_unlock(&hugetlb_lock);
1743
1744 set_page_private(page, (unsigned long)spool);
1745
1746 commit = vma_commit_reservation(h, vma, addr);
1747 if (unlikely(chg > commit)) {
1748 /*
1749 * The page was added to the reservation map between
1750 * vma_needs_reservation and vma_commit_reservation.
1751 * This indicates a race with hugetlb_reserve_pages.
1752 * Adjust for the subpool count incremented above AND
1753 * in hugetlb_reserve_pages for the same page. Also,
1754 * the reservation count added in hugetlb_reserve_pages
1755 * no longer applies.
1756 */
1757 long rsv_adjust;
1758
1759 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1760 hugetlb_acct_memory(h, -rsv_adjust);
1761 }
1762 return page;
1763
1764out_uncharge_cgroup:
1765 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1766out_subpool_put:
1767 if (chg || avoid_reserve)
1768 hugepage_subpool_put_pages(spool, 1);
1769 vma_end_reservation(h, vma, addr);
1770 return ERR_PTR(-ENOSPC);
1771}
1772
1773/*
1774 * alloc_huge_page()'s wrapper which simply returns the page if allocation
1775 * succeeds, otherwise NULL. This function is called from new_vma_page(),
1776 * where no ERR_VALUE is expected to be returned.
1777 */
1778struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1779 unsigned long addr, int avoid_reserve)
1780{
1781 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1782 if (IS_ERR(page))
1783 page = NULL;
1784 return page;
1785}
1786
1787int __weak alloc_bootmem_huge_page(struct hstate *h)
1788{
1789 struct huge_bootmem_page *m;
1790 int nr_nodes, node;
1791
1792 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1793 void *addr;
1794
1795 addr = memblock_virt_alloc_try_nid_nopanic(
1796 huge_page_size(h), huge_page_size(h),
1797 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1798 if (addr) {
1799 /*
1800 * Use the beginning of the huge page to store the
1801 * huge_bootmem_page struct (until gather_bootmem
1802 * puts them into the mem_map).
1803 */
1804 m = addr;
1805 goto found;
1806 }
1807 }
1808 return 0;
1809
1810found:
1811 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1812 /* Put them into a private list first because mem_map is not up yet */
1813 list_add(&m->list, &huge_boot_pages);
1814 m->hstate = h;
1815 return 1;
1816}
1817
1818static void __init prep_compound_huge_page(struct page *page, int order)
1819{
1820 if (unlikely(order > (MAX_ORDER - 1)))
1821 prep_compound_gigantic_page(page, order);
1822 else
1823 prep_compound_page(page, order);
1824}
1825
1826/* Put bootmem huge pages into the standard lists after mem_map is up */
1827static void __init gather_bootmem_prealloc(void)
1828{
1829 struct huge_bootmem_page *m;
1830
1831 list_for_each_entry(m, &huge_boot_pages, list) {
1832 struct hstate *h = m->hstate;
1833 struct page *page;
1834
1835#ifdef CONFIG_HIGHMEM
1836 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1837 memblock_free_late(__pa(m),
1838 sizeof(struct huge_bootmem_page));
1839#else
1840 page = virt_to_page(m);
1841#endif
1842 WARN_ON(page_count(page) != 1);
1843 prep_compound_huge_page(page, h->order);
1844 WARN_ON(PageReserved(page));
1845 prep_new_huge_page(h, page, page_to_nid(page));
1846 /*
1847 * If we had gigantic hugepages allocated at boot time, we need
1848 * to restore the 'stolen' pages to totalram_pages in order to
1849 * fix confusing memory reports from free(1) and another
1850 * side-effects, like CommitLimit going negative.
1851 */
1852 if (hstate_is_gigantic(h))
1853 adjust_managed_page_count(page, 1 << h->order);
1854 }
1855}
1856
1857static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1858{
1859 unsigned long i;
1860
1861 for (i = 0; i < h->max_huge_pages; ++i) {
1862 if (hstate_is_gigantic(h)) {
1863 if (!alloc_bootmem_huge_page(h))
1864 break;
1865 } else if (!alloc_fresh_huge_page(h,
1866 &node_states[N_MEMORY]))
1867 break;
1868 }
1869 h->max_huge_pages = i;
1870}
1871
1872static void __init hugetlb_init_hstates(void)
1873{
1874 struct hstate *h;
1875
1876 for_each_hstate(h) {
1877 if (minimum_order > huge_page_order(h))
1878 minimum_order = huge_page_order(h);
1879
1880 /* oversize hugepages were init'ed in early boot */
1881 if (!hstate_is_gigantic(h))
1882 hugetlb_hstate_alloc_pages(h);
1883 }
1884 VM_BUG_ON(minimum_order == UINT_MAX);
1885}
1886
1887static char * __init memfmt(char *buf, unsigned long n)
1888{
1889 if (n >= (1UL << 30))
1890 sprintf(buf, "%lu GB", n >> 30);
1891 else if (n >= (1UL << 20))
1892 sprintf(buf, "%lu MB", n >> 20);
1893 else
1894 sprintf(buf, "%lu KB", n >> 10);
1895 return buf;
1896}
1897
1898static void __init report_hugepages(void)
1899{
1900 struct hstate *h;
1901
1902 for_each_hstate(h) {
1903 char buf[32];
1904 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1905 memfmt(buf, huge_page_size(h)),
1906 h->free_huge_pages);
1907 }
1908}
1909
1910#ifdef CONFIG_HIGHMEM
1911static void try_to_free_low(struct hstate *h, unsigned long count,
1912 nodemask_t *nodes_allowed)
1913{
1914 int i;
1915
1916 if (hstate_is_gigantic(h))
1917 return;
1918
1919 for_each_node_mask(i, *nodes_allowed) {
1920 struct page *page, *next;
1921 struct list_head *freel = &h->hugepage_freelists[i];
1922 list_for_each_entry_safe(page, next, freel, lru) {
1923 if (count >= h->nr_huge_pages)
1924 return;
1925 if (PageHighMem(page))
1926 continue;
1927 list_del(&page->lru);
1928 update_and_free_page(h, page);
1929 h->free_huge_pages--;
1930 h->free_huge_pages_node[page_to_nid(page)]--;
1931 }
1932 }
1933}
1934#else
1935static inline void try_to_free_low(struct hstate *h, unsigned long count,
1936 nodemask_t *nodes_allowed)
1937{
1938}
1939#endif
1940
1941/*
1942 * Increment or decrement surplus_huge_pages. Keep node-specific counters
1943 * balanced by operating on them in a round-robin fashion.
1944 * Returns 1 if an adjustment was made.
1945 */
1946static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1947 int delta)
1948{
1949 int nr_nodes, node;
1950
1951 VM_BUG_ON(delta != -1 && delta != 1);
1952
1953 if (delta < 0) {
1954 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1955 if (h->surplus_huge_pages_node[node])
1956 goto found;
1957 }
1958 } else {
1959 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1960 if (h->surplus_huge_pages_node[node] <
1961 h->nr_huge_pages_node[node])
1962 goto found;
1963 }
1964 }
1965 return 0;
1966
1967found:
1968 h->surplus_huge_pages += delta;
1969 h->surplus_huge_pages_node[node] += delta;
1970 return 1;
1971}
1972
1973#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1974static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1975 nodemask_t *nodes_allowed)
1976{
1977 unsigned long min_count, ret;
1978
1979 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1980 return h->max_huge_pages;
1981
1982 /*
1983 * Increase the pool size
1984 * First take pages out of surplus state. Then make up the
1985 * remaining difference by allocating fresh huge pages.
1986 *
1987 * We might race with alloc_buddy_huge_page() here and be unable
1988 * to convert a surplus huge page to a normal huge page. That is
1989 * not critical, though, it just means the overall size of the
1990 * pool might be one hugepage larger than it needs to be, but
1991 * within all the constraints specified by the sysctls.
1992 */
1993 spin_lock(&hugetlb_lock);
1994 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1995 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1996 break;
1997 }
1998
1999 while (count > persistent_huge_pages(h)) {
2000 /*
2001 * If this allocation races such that we no longer need the
2002 * page, free_huge_page will handle it by freeing the page
2003 * and reducing the surplus.
2004 */
2005 spin_unlock(&hugetlb_lock);
2006 if (hstate_is_gigantic(h))
2007 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2008 else
2009 ret = alloc_fresh_huge_page(h, nodes_allowed);
2010 spin_lock(&hugetlb_lock);
2011 if (!ret)
2012 goto out;
2013
2014 /* Bail for signals. Probably ctrl-c from user */
2015 if (signal_pending(current))
2016 goto out;
2017 }
2018
2019 /*
2020 * Decrease the pool size
2021 * First return free pages to the buddy allocator (being careful
2022 * to keep enough around to satisfy reservations). Then place
2023 * pages into surplus state as needed so the pool will shrink
2024 * to the desired size as pages become free.
2025 *
2026 * By placing pages into the surplus state independent of the
2027 * overcommit value, we are allowing the surplus pool size to
2028 * exceed overcommit. There are few sane options here. Since
2029 * alloc_buddy_huge_page() is checking the global counter,
2030 * though, we'll note that we're not allowed to exceed surplus
2031 * and won't grow the pool anywhere else. Not until one of the
2032 * sysctls are changed, or the surplus pages go out of use.
2033 */
2034 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2035 min_count = max(count, min_count);
2036 try_to_free_low(h, min_count, nodes_allowed);
2037 while (min_count < persistent_huge_pages(h)) {
2038 if (!free_pool_huge_page(h, nodes_allowed, 0))
2039 break;
2040 cond_resched_lock(&hugetlb_lock);
2041 }
2042 while (count < persistent_huge_pages(h)) {
2043 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2044 break;
2045 }
2046out:
2047 ret = persistent_huge_pages(h);
2048 spin_unlock(&hugetlb_lock);
2049 return ret;
2050}
2051
2052#define HSTATE_ATTR_RO(_name) \
2053 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2054
2055#define HSTATE_ATTR(_name) \
2056 static struct kobj_attribute _name##_attr = \
2057 __ATTR(_name, 0644, _name##_show, _name##_store)
2058
2059static struct kobject *hugepages_kobj;
2060static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2061
2062static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2063
2064static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2065{
2066 int i;
2067
2068 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2069 if (hstate_kobjs[i] == kobj) {
2070 if (nidp)
2071 *nidp = NUMA_NO_NODE;
2072 return &hstates[i];
2073 }
2074
2075 return kobj_to_node_hstate(kobj, nidp);
2076}
2077
2078static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2079 struct kobj_attribute *attr, char *buf)
2080{
2081 struct hstate *h;
2082 unsigned long nr_huge_pages;
2083 int nid;
2084
2085 h = kobj_to_hstate(kobj, &nid);
2086 if (nid == NUMA_NO_NODE)
2087 nr_huge_pages = h->nr_huge_pages;
2088 else
2089 nr_huge_pages = h->nr_huge_pages_node[nid];
2090
2091 return sprintf(buf, "%lu\n", nr_huge_pages);
2092}
2093
2094static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2095 struct hstate *h, int nid,
2096 unsigned long count, size_t len)
2097{
2098 int err;
2099 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2100
2101 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2102 err = -EINVAL;
2103 goto out;
2104 }
2105
2106 if (nid == NUMA_NO_NODE) {
2107 /*
2108 * global hstate attribute
2109 */
2110 if (!(obey_mempolicy &&
2111 init_nodemask_of_mempolicy(nodes_allowed))) {
2112 NODEMASK_FREE(nodes_allowed);
2113 nodes_allowed = &node_states[N_MEMORY];
2114 }
2115 } else if (nodes_allowed) {
2116 /*
2117 * per node hstate attribute: adjust count to global,
2118 * but restrict alloc/free to the specified node.
2119 */
2120 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2121 init_nodemask_of_node(nodes_allowed, nid);
2122 } else
2123 nodes_allowed = &node_states[N_MEMORY];
2124
2125 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2126
2127 if (nodes_allowed != &node_states[N_MEMORY])
2128 NODEMASK_FREE(nodes_allowed);
2129
2130 return len;
2131out:
2132 NODEMASK_FREE(nodes_allowed);
2133 return err;
2134}
2135
2136static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2137 struct kobject *kobj, const char *buf,
2138 size_t len)
2139{
2140 struct hstate *h;
2141 unsigned long count;
2142 int nid;
2143 int err;
2144
2145 err = kstrtoul(buf, 10, &count);
2146 if (err)
2147 return err;
2148
2149 h = kobj_to_hstate(kobj, &nid);
2150 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2151}
2152
2153static ssize_t nr_hugepages_show(struct kobject *kobj,
2154 struct kobj_attribute *attr, char *buf)
2155{
2156 return nr_hugepages_show_common(kobj, attr, buf);
2157}
2158
2159static ssize_t nr_hugepages_store(struct kobject *kobj,
2160 struct kobj_attribute *attr, const char *buf, size_t len)
2161{
2162 return nr_hugepages_store_common(false, kobj, buf, len);
2163}
2164HSTATE_ATTR(nr_hugepages);
2165
2166#ifdef CONFIG_NUMA
2167
2168/*
2169 * hstate attribute for optionally mempolicy-based constraint on persistent
2170 * huge page alloc/free.
2171 */
2172static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2173 struct kobj_attribute *attr, char *buf)
2174{
2175 return nr_hugepages_show_common(kobj, attr, buf);
2176}
2177
2178static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2179 struct kobj_attribute *attr, const char *buf, size_t len)
2180{
2181 return nr_hugepages_store_common(true, kobj, buf, len);
2182}
2183HSTATE_ATTR(nr_hugepages_mempolicy);
2184#endif
2185
2186
2187static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2188 struct kobj_attribute *attr, char *buf)
2189{
2190 struct hstate *h = kobj_to_hstate(kobj, NULL);
2191 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2192}
2193
2194static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2195 struct kobj_attribute *attr, const char *buf, size_t count)
2196{
2197 int err;
2198 unsigned long input;
2199 struct hstate *h = kobj_to_hstate(kobj, NULL);
2200
2201 if (hstate_is_gigantic(h))
2202 return -EINVAL;
2203
2204 err = kstrtoul(buf, 10, &input);
2205 if (err)
2206 return err;
2207
2208 spin_lock(&hugetlb_lock);
2209 h->nr_overcommit_huge_pages = input;
2210 spin_unlock(&hugetlb_lock);
2211
2212 return count;
2213}
2214HSTATE_ATTR(nr_overcommit_hugepages);
2215
2216static ssize_t free_hugepages_show(struct kobject *kobj,
2217 struct kobj_attribute *attr, char *buf)
2218{
2219 struct hstate *h;
2220 unsigned long free_huge_pages;
2221 int nid;
2222
2223 h = kobj_to_hstate(kobj, &nid);
2224 if (nid == NUMA_NO_NODE)
2225 free_huge_pages = h->free_huge_pages;
2226 else
2227 free_huge_pages = h->free_huge_pages_node[nid];
2228
2229 return sprintf(buf, "%lu\n", free_huge_pages);
2230}
2231HSTATE_ATTR_RO(free_hugepages);
2232
2233static ssize_t resv_hugepages_show(struct kobject *kobj,
2234 struct kobj_attribute *attr, char *buf)
2235{
2236 struct hstate *h = kobj_to_hstate(kobj, NULL);
2237 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2238}
2239HSTATE_ATTR_RO(resv_hugepages);
2240
2241static ssize_t surplus_hugepages_show(struct kobject *kobj,
2242 struct kobj_attribute *attr, char *buf)
2243{
2244 struct hstate *h;
2245 unsigned long surplus_huge_pages;
2246 int nid;
2247
2248 h = kobj_to_hstate(kobj, &nid);
2249 if (nid == NUMA_NO_NODE)
2250 surplus_huge_pages = h->surplus_huge_pages;
2251 else
2252 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2253
2254 return sprintf(buf, "%lu\n", surplus_huge_pages);
2255}
2256HSTATE_ATTR_RO(surplus_hugepages);
2257
2258static struct attribute *hstate_attrs[] = {
2259 &nr_hugepages_attr.attr,
2260 &nr_overcommit_hugepages_attr.attr,
2261 &free_hugepages_attr.attr,
2262 &resv_hugepages_attr.attr,
2263 &surplus_hugepages_attr.attr,
2264#ifdef CONFIG_NUMA
2265 &nr_hugepages_mempolicy_attr.attr,
2266#endif
2267 NULL,
2268};
2269
2270static struct attribute_group hstate_attr_group = {
2271 .attrs = hstate_attrs,
2272};
2273
2274static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2275 struct kobject **hstate_kobjs,
2276 struct attribute_group *hstate_attr_group)
2277{
2278 int retval;
2279 int hi = hstate_index(h);
2280
2281 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2282 if (!hstate_kobjs[hi])
2283 return -ENOMEM;
2284
2285 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2286 if (retval)
2287 kobject_put(hstate_kobjs[hi]);
2288
2289 return retval;
2290}
2291
2292static void __init hugetlb_sysfs_init(void)
2293{
2294 struct hstate *h;
2295 int err;
2296
2297 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2298 if (!hugepages_kobj)
2299 return;
2300
2301 for_each_hstate(h) {
2302 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2303 hstate_kobjs, &hstate_attr_group);
2304 if (err)
2305 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2306 }
2307}
2308
2309#ifdef CONFIG_NUMA
2310
2311/*
2312 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2313 * with node devices in node_devices[] using a parallel array. The array
2314 * index of a node device or _hstate == node id.
2315 * This is here to avoid any static dependency of the node device driver, in
2316 * the base kernel, on the hugetlb module.
2317 */
2318struct node_hstate {
2319 struct kobject *hugepages_kobj;
2320 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2321};
2322struct node_hstate node_hstates[MAX_NUMNODES];
2323
2324/*
2325 * A subset of global hstate attributes for node devices
2326 */
2327static struct attribute *per_node_hstate_attrs[] = {
2328 &nr_hugepages_attr.attr,
2329 &free_hugepages_attr.attr,
2330 &surplus_hugepages_attr.attr,
2331 NULL,
2332};
2333
2334static struct attribute_group per_node_hstate_attr_group = {
2335 .attrs = per_node_hstate_attrs,
2336};
2337
2338/*
2339 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2340 * Returns node id via non-NULL nidp.
2341 */
2342static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2343{
2344 int nid;
2345
2346 for (nid = 0; nid < nr_node_ids; nid++) {
2347 struct node_hstate *nhs = &node_hstates[nid];
2348 int i;
2349 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2350 if (nhs->hstate_kobjs[i] == kobj) {
2351 if (nidp)
2352 *nidp = nid;
2353 return &hstates[i];
2354 }
2355 }
2356
2357 BUG();
2358 return NULL;
2359}
2360
2361/*
2362 * Unregister hstate attributes from a single node device.
2363 * No-op if no hstate attributes attached.
2364 */
2365static void hugetlb_unregister_node(struct node *node)
2366{
2367 struct hstate *h;
2368 struct node_hstate *nhs = &node_hstates[node->dev.id];
2369
2370 if (!nhs->hugepages_kobj)
2371 return; /* no hstate attributes */
2372
2373 for_each_hstate(h) {
2374 int idx = hstate_index(h);
2375 if (nhs->hstate_kobjs[idx]) {
2376 kobject_put(nhs->hstate_kobjs[idx]);
2377 nhs->hstate_kobjs[idx] = NULL;
2378 }
2379 }
2380
2381 kobject_put(nhs->hugepages_kobj);
2382 nhs->hugepages_kobj = NULL;
2383}
2384
2385/*
2386 * hugetlb module exit: unregister hstate attributes from node devices
2387 * that have them.
2388 */
2389static void hugetlb_unregister_all_nodes(void)
2390{
2391 int nid;
2392
2393 /*
2394 * disable node device registrations.
2395 */
2396 register_hugetlbfs_with_node(NULL, NULL);
2397
2398 /*
2399 * remove hstate attributes from any nodes that have them.
2400 */
2401 for (nid = 0; nid < nr_node_ids; nid++)
2402 hugetlb_unregister_node(node_devices[nid]);
2403}
2404
2405/*
2406 * Register hstate attributes for a single node device.
2407 * No-op if attributes already registered.
2408 */
2409static void hugetlb_register_node(struct node *node)
2410{
2411 struct hstate *h;
2412 struct node_hstate *nhs = &node_hstates[node->dev.id];
2413 int err;
2414
2415 if (nhs->hugepages_kobj)
2416 return; /* already allocated */
2417
2418 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2419 &node->dev.kobj);
2420 if (!nhs->hugepages_kobj)
2421 return;
2422
2423 for_each_hstate(h) {
2424 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2425 nhs->hstate_kobjs,
2426 &per_node_hstate_attr_group);
2427 if (err) {
2428 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2429 h->name, node->dev.id);
2430 hugetlb_unregister_node(node);
2431 break;
2432 }
2433 }
2434}
2435
2436/*
2437 * hugetlb init time: register hstate attributes for all registered node
2438 * devices of nodes that have memory. All on-line nodes should have
2439 * registered their associated device by this time.
2440 */
2441static void __init hugetlb_register_all_nodes(void)
2442{
2443 int nid;
2444
2445 for_each_node_state(nid, N_MEMORY) {
2446 struct node *node = node_devices[nid];
2447 if (node->dev.id == nid)
2448 hugetlb_register_node(node);
2449 }
2450
2451 /*
2452 * Let the node device driver know we're here so it can
2453 * [un]register hstate attributes on node hotplug.
2454 */
2455 register_hugetlbfs_with_node(hugetlb_register_node,
2456 hugetlb_unregister_node);
2457}
2458#else /* !CONFIG_NUMA */
2459
2460static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2461{
2462 BUG();
2463 if (nidp)
2464 *nidp = -1;
2465 return NULL;
2466}
2467
2468static void hugetlb_unregister_all_nodes(void) { }
2469
2470static void hugetlb_register_all_nodes(void) { }
2471
2472#endif
2473
2474static void __exit hugetlb_exit(void)
2475{
2476 struct hstate *h;
2477
2478 hugetlb_unregister_all_nodes();
2479
2480 for_each_hstate(h) {
2481 kobject_put(hstate_kobjs[hstate_index(h)]);
2482 }
2483
2484 kobject_put(hugepages_kobj);
2485 kfree(htlb_fault_mutex_table);
2486}
2487module_exit(hugetlb_exit);
2488
2489static int __init hugetlb_init(void)
2490{
2491 int i;
2492
2493 if (!hugepages_supported())
2494 return 0;
2495
2496 if (!size_to_hstate(default_hstate_size)) {
2497 default_hstate_size = HPAGE_SIZE;
2498 if (!size_to_hstate(default_hstate_size))
2499 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2500 }
2501 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2502 if (default_hstate_max_huge_pages)
2503 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2504
2505 hugetlb_init_hstates();
2506 gather_bootmem_prealloc();
2507 report_hugepages();
2508
2509 hugetlb_sysfs_init();
2510 hugetlb_register_all_nodes();
2511 hugetlb_cgroup_file_init();
2512
2513#ifdef CONFIG_SMP
2514 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2515#else
2516 num_fault_mutexes = 1;
2517#endif
2518 htlb_fault_mutex_table =
2519 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2520 BUG_ON(!htlb_fault_mutex_table);
2521
2522 for (i = 0; i < num_fault_mutexes; i++)
2523 mutex_init(&htlb_fault_mutex_table[i]);
2524 return 0;
2525}
2526module_init(hugetlb_init);
2527
2528/* Should be called on processing a hugepagesz=... option */
2529void __init hugetlb_add_hstate(unsigned order)
2530{
2531 struct hstate *h;
2532 unsigned long i;
2533
2534 if (size_to_hstate(PAGE_SIZE << order)) {
2535 pr_warning("hugepagesz= specified twice, ignoring\n");
2536 return;
2537 }
2538 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2539 BUG_ON(order == 0);
2540 h = &hstates[hugetlb_max_hstate++];
2541 h->order = order;
2542 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2543 h->nr_huge_pages = 0;
2544 h->free_huge_pages = 0;
2545 for (i = 0; i < MAX_NUMNODES; ++i)
2546 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2547 INIT_LIST_HEAD(&h->hugepage_activelist);
2548 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2549 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2550 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2551 huge_page_size(h)/1024);
2552
2553 parsed_hstate = h;
2554}
2555
2556static int __init hugetlb_nrpages_setup(char *s)
2557{
2558 unsigned long *mhp;
2559 static unsigned long *last_mhp;
2560
2561 /*
2562 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2563 * so this hugepages= parameter goes to the "default hstate".
2564 */
2565 if (!hugetlb_max_hstate)
2566 mhp = &default_hstate_max_huge_pages;
2567 else
2568 mhp = &parsed_hstate->max_huge_pages;
2569
2570 if (mhp == last_mhp) {
2571 pr_warning("hugepages= specified twice without "
2572 "interleaving hugepagesz=, ignoring\n");
2573 return 1;
2574 }
2575
2576 if (sscanf(s, "%lu", mhp) <= 0)
2577 *mhp = 0;
2578
2579 /*
2580 * Global state is always initialized later in hugetlb_init.
2581 * But we need to allocate >= MAX_ORDER hstates here early to still
2582 * use the bootmem allocator.
2583 */
2584 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2585 hugetlb_hstate_alloc_pages(parsed_hstate);
2586
2587 last_mhp = mhp;
2588
2589 return 1;
2590}
2591__setup("hugepages=", hugetlb_nrpages_setup);
2592
2593static int __init hugetlb_default_setup(char *s)
2594{
2595 default_hstate_size = memparse(s, &s);
2596 return 1;
2597}
2598__setup("default_hugepagesz=", hugetlb_default_setup);
2599
2600static unsigned int cpuset_mems_nr(unsigned int *array)
2601{
2602 int node;
2603 unsigned int nr = 0;
2604
2605 for_each_node_mask(node, cpuset_current_mems_allowed)
2606 nr += array[node];
2607
2608 return nr;
2609}
2610
2611#ifdef CONFIG_SYSCTL
2612static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2613 struct ctl_table *table, int write,
2614 void __user *buffer, size_t *length, loff_t *ppos)
2615{
2616 struct hstate *h = &default_hstate;
2617 unsigned long tmp = h->max_huge_pages;
2618 int ret;
2619
2620 if (!hugepages_supported())
2621 return -ENOTSUPP;
2622
2623 table->data = &tmp;
2624 table->maxlen = sizeof(unsigned long);
2625 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2626 if (ret)
2627 goto out;
2628
2629 if (write)
2630 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2631 NUMA_NO_NODE, tmp, *length);
2632out:
2633 return ret;
2634}
2635
2636int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2637 void __user *buffer, size_t *length, loff_t *ppos)
2638{
2639
2640 return hugetlb_sysctl_handler_common(false, table, write,
2641 buffer, length, ppos);
2642}
2643
2644#ifdef CONFIG_NUMA
2645int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2646 void __user *buffer, size_t *length, loff_t *ppos)
2647{
2648 return hugetlb_sysctl_handler_common(true, table, write,
2649 buffer, length, ppos);
2650}
2651#endif /* CONFIG_NUMA */
2652
2653int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2654 void __user *buffer,
2655 size_t *length, loff_t *ppos)
2656{
2657 struct hstate *h = &default_hstate;
2658 unsigned long tmp;
2659 int ret;
2660
2661 if (!hugepages_supported())
2662 return -ENOTSUPP;
2663
2664 tmp = h->nr_overcommit_huge_pages;
2665
2666 if (write && hstate_is_gigantic(h))
2667 return -EINVAL;
2668
2669 table->data = &tmp;
2670 table->maxlen = sizeof(unsigned long);
2671 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2672 if (ret)
2673 goto out;
2674
2675 if (write) {
2676 spin_lock(&hugetlb_lock);
2677 h->nr_overcommit_huge_pages = tmp;
2678 spin_unlock(&hugetlb_lock);
2679 }
2680out:
2681 return ret;
2682}
2683
2684#endif /* CONFIG_SYSCTL */
2685
2686void hugetlb_report_meminfo(struct seq_file *m)
2687{
2688 struct hstate *h = &default_hstate;
2689 if (!hugepages_supported())
2690 return;
2691 seq_printf(m,
2692 "HugePages_Total: %5lu\n"
2693 "HugePages_Free: %5lu\n"
2694 "HugePages_Rsvd: %5lu\n"
2695 "HugePages_Surp: %5lu\n"
2696 "Hugepagesize: %8lu kB\n",
2697 h->nr_huge_pages,
2698 h->free_huge_pages,
2699 h->resv_huge_pages,
2700 h->surplus_huge_pages,
2701 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2702}
2703
2704int hugetlb_report_node_meminfo(int nid, char *buf)
2705{
2706 struct hstate *h = &default_hstate;
2707 if (!hugepages_supported())
2708 return 0;
2709 return sprintf(buf,
2710 "Node %d HugePages_Total: %5u\n"
2711 "Node %d HugePages_Free: %5u\n"
2712 "Node %d HugePages_Surp: %5u\n",
2713 nid, h->nr_huge_pages_node[nid],
2714 nid, h->free_huge_pages_node[nid],
2715 nid, h->surplus_huge_pages_node[nid]);
2716}
2717
2718void hugetlb_show_meminfo(void)
2719{
2720 struct hstate *h;
2721 int nid;
2722
2723 if (!hugepages_supported())
2724 return;
2725
2726 for_each_node_state(nid, N_MEMORY)
2727 for_each_hstate(h)
2728 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2729 nid,
2730 h->nr_huge_pages_node[nid],
2731 h->free_huge_pages_node[nid],
2732 h->surplus_huge_pages_node[nid],
2733 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2734}
2735
2736/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2737unsigned long hugetlb_total_pages(void)
2738{
2739 struct hstate *h;
2740 unsigned long nr_total_pages = 0;
2741
2742 for_each_hstate(h)
2743 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2744 return nr_total_pages;
2745}
2746
2747static int hugetlb_acct_memory(struct hstate *h, long delta)
2748{
2749 int ret = -ENOMEM;
2750
2751 spin_lock(&hugetlb_lock);
2752 /*
2753 * When cpuset is configured, it breaks the strict hugetlb page
2754 * reservation as the accounting is done on a global variable. Such
2755 * reservation is completely rubbish in the presence of cpuset because
2756 * the reservation is not checked against page availability for the
2757 * current cpuset. Application can still potentially OOM'ed by kernel
2758 * with lack of free htlb page in cpuset that the task is in.
2759 * Attempt to enforce strict accounting with cpuset is almost
2760 * impossible (or too ugly) because cpuset is too fluid that
2761 * task or memory node can be dynamically moved between cpusets.
2762 *
2763 * The change of semantics for shared hugetlb mapping with cpuset is
2764 * undesirable. However, in order to preserve some of the semantics,
2765 * we fall back to check against current free page availability as
2766 * a best attempt and hopefully to minimize the impact of changing
2767 * semantics that cpuset has.
2768 */
2769 if (delta > 0) {
2770 if (gather_surplus_pages(h, delta) < 0)
2771 goto out;
2772
2773 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2774 return_unused_surplus_pages(h, delta);
2775 goto out;
2776 }
2777 }
2778
2779 ret = 0;
2780 if (delta < 0)
2781 return_unused_surplus_pages(h, (unsigned long) -delta);
2782
2783out:
2784 spin_unlock(&hugetlb_lock);
2785 return ret;
2786}
2787
2788static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2789{
2790 struct resv_map *resv = vma_resv_map(vma);
2791
2792 /*
2793 * This new VMA should share its siblings reservation map if present.
2794 * The VMA will only ever have a valid reservation map pointer where
2795 * it is being copied for another still existing VMA. As that VMA
2796 * has a reference to the reservation map it cannot disappear until
2797 * after this open call completes. It is therefore safe to take a
2798 * new reference here without additional locking.
2799 */
2800 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2801 kref_get(&resv->refs);
2802}
2803
2804static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2805{
2806 struct hstate *h = hstate_vma(vma);
2807 struct resv_map *resv = vma_resv_map(vma);
2808 struct hugepage_subpool *spool = subpool_vma(vma);
2809 unsigned long reserve, start, end;
2810 long gbl_reserve;
2811
2812 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2813 return;
2814
2815 start = vma_hugecache_offset(h, vma, vma->vm_start);
2816 end = vma_hugecache_offset(h, vma, vma->vm_end);
2817
2818 reserve = (end - start) - region_count(resv, start, end);
2819
2820 kref_put(&resv->refs, resv_map_release);
2821
2822 if (reserve) {
2823 /*
2824 * Decrement reserve counts. The global reserve count may be
2825 * adjusted if the subpool has a minimum size.
2826 */
2827 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2828 hugetlb_acct_memory(h, -gbl_reserve);
2829 }
2830}
2831
2832/*
2833 * We cannot handle pagefaults against hugetlb pages at all. They cause
2834 * handle_mm_fault() to try to instantiate regular-sized pages in the
2835 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
2836 * this far.
2837 */
2838static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2839{
2840 BUG();
2841 return 0;
2842}
2843
2844const struct vm_operations_struct hugetlb_vm_ops = {
2845 .fault = hugetlb_vm_op_fault,
2846 .open = hugetlb_vm_op_open,
2847 .close = hugetlb_vm_op_close,
2848};
2849
2850static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2851 int writable)
2852{
2853 pte_t entry;
2854
2855 if (writable) {
2856 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2857 vma->vm_page_prot)));
2858 } else {
2859 entry = huge_pte_wrprotect(mk_huge_pte(page,
2860 vma->vm_page_prot));
2861 }
2862 entry = pte_mkyoung(entry);
2863 entry = pte_mkhuge(entry);
2864 entry = arch_make_huge_pte(entry, vma, page, writable);
2865
2866 return entry;
2867}
2868
2869static void set_huge_ptep_writable(struct vm_area_struct *vma,
2870 unsigned long address, pte_t *ptep)
2871{
2872 pte_t entry;
2873
2874 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2875 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2876 update_mmu_cache(vma, address, ptep);
2877}
2878
2879static int is_hugetlb_entry_migration(pte_t pte)
2880{
2881 swp_entry_t swp;
2882
2883 if (huge_pte_none(pte) || pte_present(pte))
2884 return 0;
2885 swp = pte_to_swp_entry(pte);
2886 if (non_swap_entry(swp) && is_migration_entry(swp))
2887 return 1;
2888 else
2889 return 0;
2890}
2891
2892static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2893{
2894 swp_entry_t swp;
2895
2896 if (huge_pte_none(pte) || pte_present(pte))
2897 return 0;
2898 swp = pte_to_swp_entry(pte);
2899 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2900 return 1;
2901 else
2902 return 0;
2903}
2904
2905int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2906 struct vm_area_struct *vma)
2907{
2908 pte_t *src_pte, *dst_pte, entry;
2909 struct page *ptepage;
2910 unsigned long addr;
2911 int cow;
2912 struct hstate *h = hstate_vma(vma);
2913 unsigned long sz = huge_page_size(h);
2914 unsigned long mmun_start; /* For mmu_notifiers */
2915 unsigned long mmun_end; /* For mmu_notifiers */
2916 int ret = 0;
2917
2918 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2919
2920 mmun_start = vma->vm_start;
2921 mmun_end = vma->vm_end;
2922 if (cow)
2923 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2924
2925 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2926 spinlock_t *src_ptl, *dst_ptl;
2927 src_pte = huge_pte_offset(src, addr);
2928 if (!src_pte)
2929 continue;
2930 dst_pte = huge_pte_alloc(dst, addr, sz);
2931 if (!dst_pte) {
2932 ret = -ENOMEM;
2933 break;
2934 }
2935
2936 /* If the pagetables are shared don't copy or take references */
2937 if (dst_pte == src_pte)
2938 continue;
2939
2940 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2941 src_ptl = huge_pte_lockptr(h, src, src_pte);
2942 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2943 entry = huge_ptep_get(src_pte);
2944 if (huge_pte_none(entry)) { /* skip none entry */
2945 ;
2946 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2947 is_hugetlb_entry_hwpoisoned(entry))) {
2948 swp_entry_t swp_entry = pte_to_swp_entry(entry);
2949
2950 if (is_write_migration_entry(swp_entry) && cow) {
2951 /*
2952 * COW mappings require pages in both
2953 * parent and child to be set to read.
2954 */
2955 make_migration_entry_read(&swp_entry);
2956 entry = swp_entry_to_pte(swp_entry);
2957 set_huge_pte_at(src, addr, src_pte, entry);
2958 }
2959 set_huge_pte_at(dst, addr, dst_pte, entry);
2960 } else {
2961 if (cow) {
2962 huge_ptep_set_wrprotect(src, addr, src_pte);
2963 mmu_notifier_invalidate_range(src, mmun_start,
2964 mmun_end);
2965 }
2966 entry = huge_ptep_get(src_pte);
2967 ptepage = pte_page(entry);
2968 get_page(ptepage);
2969 page_dup_rmap(ptepage);
2970 set_huge_pte_at(dst, addr, dst_pte, entry);
2971 }
2972 spin_unlock(src_ptl);
2973 spin_unlock(dst_ptl);
2974 }
2975
2976 if (cow)
2977 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2978
2979 return ret;
2980}
2981
2982void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2983 unsigned long start, unsigned long end,
2984 struct page *ref_page)
2985{
2986 int force_flush = 0;
2987 struct mm_struct *mm = vma->vm_mm;
2988 unsigned long address;
2989 pte_t *ptep;
2990 pte_t pte;
2991 spinlock_t *ptl;
2992 struct page *page;
2993 struct hstate *h = hstate_vma(vma);
2994 unsigned long sz = huge_page_size(h);
2995 const unsigned long mmun_start = start; /* For mmu_notifiers */
2996 const unsigned long mmun_end = end; /* For mmu_notifiers */
2997
2998 WARN_ON(!is_vm_hugetlb_page(vma));
2999 BUG_ON(start & ~huge_page_mask(h));
3000 BUG_ON(end & ~huge_page_mask(h));
3001
3002 tlb_start_vma(tlb, vma);
3003 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3004 address = start;
3005again:
3006 for (; address < end; address += sz) {
3007 ptep = huge_pte_offset(mm, address);
3008 if (!ptep)
3009 continue;
3010
3011 ptl = huge_pte_lock(h, mm, ptep);
3012 if (huge_pmd_unshare(mm, &address, ptep))
3013 goto unlock;
3014
3015 pte = huge_ptep_get(ptep);
3016 if (huge_pte_none(pte))
3017 goto unlock;
3018
3019 /*
3020 * Migrating hugepage or HWPoisoned hugepage is already
3021 * unmapped and its refcount is dropped, so just clear pte here.
3022 */
3023 if (unlikely(!pte_present(pte))) {
3024 huge_pte_clear(mm, address, ptep);
3025 goto unlock;
3026 }
3027
3028 page = pte_page(pte);
3029 /*
3030 * If a reference page is supplied, it is because a specific
3031 * page is being unmapped, not a range. Ensure the page we
3032 * are about to unmap is the actual page of interest.
3033 */
3034 if (ref_page) {
3035 if (page != ref_page)
3036 goto unlock;
3037
3038 /*
3039 * Mark the VMA as having unmapped its page so that
3040 * future faults in this VMA will fail rather than
3041 * looking like data was lost
3042 */
3043 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3044 }
3045
3046 pte = huge_ptep_get_and_clear(mm, address, ptep);
3047 tlb_remove_tlb_entry(tlb, ptep, address);
3048 if (huge_pte_dirty(pte))
3049 set_page_dirty(page);
3050
3051 page_remove_rmap(page);
3052 force_flush = !__tlb_remove_page(tlb, page);
3053 if (force_flush) {
3054 address += sz;
3055 spin_unlock(ptl);
3056 break;
3057 }
3058 /* Bail out after unmapping reference page if supplied */
3059 if (ref_page) {
3060 spin_unlock(ptl);
3061 break;
3062 }
3063unlock:
3064 spin_unlock(ptl);
3065 }
3066 /*
3067 * mmu_gather ran out of room to batch pages, we break out of
3068 * the PTE lock to avoid doing the potential expensive TLB invalidate
3069 * and page-free while holding it.
3070 */
3071 if (force_flush) {
3072 force_flush = 0;
3073 tlb_flush_mmu(tlb);
3074 if (address < end && !ref_page)
3075 goto again;
3076 }
3077 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3078 tlb_end_vma(tlb, vma);
3079}
3080
3081void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3082 struct vm_area_struct *vma, unsigned long start,
3083 unsigned long end, struct page *ref_page)
3084{
3085 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3086
3087 /*
3088 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3089 * test will fail on a vma being torn down, and not grab a page table
3090 * on its way out. We're lucky that the flag has such an appropriate
3091 * name, and can in fact be safely cleared here. We could clear it
3092 * before the __unmap_hugepage_range above, but all that's necessary
3093 * is to clear it before releasing the i_mmap_rwsem. This works
3094 * because in the context this is called, the VMA is about to be
3095 * destroyed and the i_mmap_rwsem is held.
3096 */
3097 vma->vm_flags &= ~VM_MAYSHARE;
3098}
3099
3100void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3101 unsigned long end, struct page *ref_page)
3102{
3103 struct mm_struct *mm;
3104 struct mmu_gather tlb;
3105
3106 mm = vma->vm_mm;
3107
3108 tlb_gather_mmu(&tlb, mm, start, end);
3109 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3110 tlb_finish_mmu(&tlb, start, end);
3111}
3112
3113/*
3114 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3115 * mappping it owns the reserve page for. The intention is to unmap the page
3116 * from other VMAs and let the children be SIGKILLed if they are faulting the
3117 * same region.
3118 */
3119static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3120 struct page *page, unsigned long address)
3121{
3122 struct hstate *h = hstate_vma(vma);
3123 struct vm_area_struct *iter_vma;
3124 struct address_space *mapping;
3125 pgoff_t pgoff;
3126
3127 /*
3128 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3129 * from page cache lookup which is in HPAGE_SIZE units.
3130 */
3131 address = address & huge_page_mask(h);
3132 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3133 vma->vm_pgoff;
3134 mapping = file_inode(vma->vm_file)->i_mapping;
3135
3136 /*
3137 * Take the mapping lock for the duration of the table walk. As
3138 * this mapping should be shared between all the VMAs,
3139 * __unmap_hugepage_range() is called as the lock is already held
3140 */
3141 i_mmap_lock_write(mapping);
3142 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3143 /* Do not unmap the current VMA */
3144 if (iter_vma == vma)
3145 continue;
3146
3147 /*
3148 * Unmap the page from other VMAs without their own reserves.
3149 * They get marked to be SIGKILLed if they fault in these
3150 * areas. This is because a future no-page fault on this VMA
3151 * could insert a zeroed page instead of the data existing
3152 * from the time of fork. This would look like data corruption
3153 */
3154 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3155 unmap_hugepage_range(iter_vma, address,
3156 address + huge_page_size(h), page);
3157 }
3158 i_mmap_unlock_write(mapping);
3159}
3160
3161/*
3162 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3163 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3164 * cannot race with other handlers or page migration.
3165 * Keep the pte_same checks anyway to make transition from the mutex easier.
3166 */
3167static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3168 unsigned long address, pte_t *ptep, pte_t pte,
3169 struct page *pagecache_page, spinlock_t *ptl)
3170{
3171 struct hstate *h = hstate_vma(vma);
3172 struct page *old_page, *new_page;
3173 int ret = 0, outside_reserve = 0;
3174 unsigned long mmun_start; /* For mmu_notifiers */
3175 unsigned long mmun_end; /* For mmu_notifiers */
3176
3177 old_page = pte_page(pte);
3178
3179retry_avoidcopy:
3180 /* If no-one else is actually using this page, avoid the copy
3181 * and just make the page writable */
3182 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3183 page_move_anon_rmap(old_page, vma, address);
3184 set_huge_ptep_writable(vma, address, ptep);
3185 return 0;
3186 }
3187
3188 /*
3189 * If the process that created a MAP_PRIVATE mapping is about to
3190 * perform a COW due to a shared page count, attempt to satisfy
3191 * the allocation without using the existing reserves. The pagecache
3192 * page is used to determine if the reserve at this address was
3193 * consumed or not. If reserves were used, a partial faulted mapping
3194 * at the time of fork() could consume its reserves on COW instead
3195 * of the full address range.
3196 */
3197 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3198 old_page != pagecache_page)
3199 outside_reserve = 1;
3200
3201 page_cache_get(old_page);
3202
3203 /*
3204 * Drop page table lock as buddy allocator may be called. It will
3205 * be acquired again before returning to the caller, as expected.
3206 */
3207 spin_unlock(ptl);
3208 new_page = alloc_huge_page(vma, address, outside_reserve);
3209
3210 if (IS_ERR(new_page)) {
3211 /*
3212 * If a process owning a MAP_PRIVATE mapping fails to COW,
3213 * it is due to references held by a child and an insufficient
3214 * huge page pool. To guarantee the original mappers
3215 * reliability, unmap the page from child processes. The child
3216 * may get SIGKILLed if it later faults.
3217 */
3218 if (outside_reserve) {
3219 page_cache_release(old_page);
3220 BUG_ON(huge_pte_none(pte));
3221 unmap_ref_private(mm, vma, old_page, address);
3222 BUG_ON(huge_pte_none(pte));
3223 spin_lock(ptl);
3224 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3225 if (likely(ptep &&
3226 pte_same(huge_ptep_get(ptep), pte)))
3227 goto retry_avoidcopy;
3228 /*
3229 * race occurs while re-acquiring page table
3230 * lock, and our job is done.
3231 */
3232 return 0;
3233 }
3234
3235 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3236 VM_FAULT_OOM : VM_FAULT_SIGBUS;
3237 goto out_release_old;
3238 }
3239
3240 /*
3241 * When the original hugepage is shared one, it does not have
3242 * anon_vma prepared.
3243 */
3244 if (unlikely(anon_vma_prepare(vma))) {
3245 ret = VM_FAULT_OOM;
3246 goto out_release_all;
3247 }
3248
3249 copy_user_huge_page(new_page, old_page, address, vma,
3250 pages_per_huge_page(h));
3251 __SetPageUptodate(new_page);
3252 set_page_huge_active(new_page);
3253
3254 mmun_start = address & huge_page_mask(h);
3255 mmun_end = mmun_start + huge_page_size(h);
3256 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3257
3258 /*
3259 * Retake the page table lock to check for racing updates
3260 * before the page tables are altered
3261 */
3262 spin_lock(ptl);
3263 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3264 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3265 ClearPagePrivate(new_page);
3266
3267 /* Break COW */
3268 huge_ptep_clear_flush(vma, address, ptep);
3269 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3270 set_huge_pte_at(mm, address, ptep,
3271 make_huge_pte(vma, new_page, 1));
3272 page_remove_rmap(old_page);
3273 hugepage_add_new_anon_rmap(new_page, vma, address);
3274 /* Make the old page be freed below */
3275 new_page = old_page;
3276 }
3277 spin_unlock(ptl);
3278 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3279out_release_all:
3280 page_cache_release(new_page);
3281out_release_old:
3282 page_cache_release(old_page);
3283
3284 spin_lock(ptl); /* Caller expects lock to be held */
3285 return ret;
3286}
3287
3288/* Return the pagecache page at a given address within a VMA */
3289static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3290 struct vm_area_struct *vma, unsigned long address)
3291{
3292 struct address_space *mapping;
3293 pgoff_t idx;
3294
3295 mapping = vma->vm_file->f_mapping;
3296 idx = vma_hugecache_offset(h, vma, address);
3297
3298 return find_lock_page(mapping, idx);
3299}
3300
3301/*
3302 * Return whether there is a pagecache page to back given address within VMA.
3303 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3304 */
3305static bool hugetlbfs_pagecache_present(struct hstate *h,
3306 struct vm_area_struct *vma, unsigned long address)
3307{
3308 struct address_space *mapping;
3309 pgoff_t idx;
3310 struct page *page;
3311
3312 mapping = vma->vm_file->f_mapping;
3313 idx = vma_hugecache_offset(h, vma, address);
3314
3315 page = find_get_page(mapping, idx);
3316 if (page)
3317 put_page(page);
3318 return page != NULL;
3319}
3320
3321static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3322 struct address_space *mapping, pgoff_t idx,
3323 unsigned long address, pte_t *ptep, unsigned int flags)
3324{
3325 struct hstate *h = hstate_vma(vma);
3326 int ret = VM_FAULT_SIGBUS;
3327 int anon_rmap = 0;
3328 unsigned long size;
3329 struct page *page;
3330 pte_t new_pte;
3331 spinlock_t *ptl;
3332
3333 /*
3334 * Currently, we are forced to kill the process in the event the
3335 * original mapper has unmapped pages from the child due to a failed
3336 * COW. Warn that such a situation has occurred as it may not be obvious
3337 */
3338 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3339 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3340 current->pid);
3341 return ret;
3342 }
3343
3344 /*
3345 * Use page lock to guard against racing truncation
3346 * before we get page_table_lock.
3347 */
3348retry:
3349 page = find_lock_page(mapping, idx);
3350 if (!page) {
3351 size = i_size_read(mapping->host) >> huge_page_shift(h);
3352 if (idx >= size)
3353 goto out;
3354 page = alloc_huge_page(vma, address, 0);
3355 if (IS_ERR(page)) {
3356 ret = PTR_ERR(page);
3357 if (ret == -ENOMEM)
3358 ret = VM_FAULT_OOM;
3359 else
3360 ret = VM_FAULT_SIGBUS;
3361 goto out;
3362 }
3363 clear_huge_page(page, address, pages_per_huge_page(h));
3364 __SetPageUptodate(page);
3365 set_page_huge_active(page);
3366
3367 if (vma->vm_flags & VM_MAYSHARE) {
3368 int err;
3369 struct inode *inode = mapping->host;
3370
3371 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3372 if (err) {
3373 put_page(page);
3374 if (err == -EEXIST)
3375 goto retry;
3376 goto out;
3377 }
3378 ClearPagePrivate(page);
3379
3380 spin_lock(&inode->i_lock);
3381 inode->i_blocks += blocks_per_huge_page(h);
3382 spin_unlock(&inode->i_lock);
3383 } else {
3384 lock_page(page);
3385 if (unlikely(anon_vma_prepare(vma))) {
3386 ret = VM_FAULT_OOM;
3387 goto backout_unlocked;
3388 }
3389 anon_rmap = 1;
3390 }
3391 } else {
3392 /*
3393 * If memory error occurs between mmap() and fault, some process
3394 * don't have hwpoisoned swap entry for errored virtual address.
3395 * So we need to block hugepage fault by PG_hwpoison bit check.
3396 */
3397 if (unlikely(PageHWPoison(page))) {
3398 ret = VM_FAULT_HWPOISON |
3399 VM_FAULT_SET_HINDEX(hstate_index(h));
3400 goto backout_unlocked;
3401 }
3402 }
3403
3404 /*
3405 * If we are going to COW a private mapping later, we examine the
3406 * pending reservations for this page now. This will ensure that
3407 * any allocations necessary to record that reservation occur outside
3408 * the spinlock.
3409 */
3410 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3411 if (vma_needs_reservation(h, vma, address) < 0) {
3412 ret = VM_FAULT_OOM;
3413 goto backout_unlocked;
3414 }
3415 /* Just decrements count, does not deallocate */
3416 vma_end_reservation(h, vma, address);
3417 }
3418
3419 ptl = huge_pte_lockptr(h, mm, ptep);
3420 spin_lock(ptl);
3421 size = i_size_read(mapping->host) >> huge_page_shift(h);
3422 if (idx >= size)
3423 goto backout;
3424
3425 ret = 0;
3426 if (!huge_pte_none(huge_ptep_get(ptep)))
3427 goto backout;
3428
3429 if (anon_rmap) {
3430 ClearPagePrivate(page);
3431 hugepage_add_new_anon_rmap(page, vma, address);
3432 } else
3433 page_dup_rmap(page);
3434 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3435 && (vma->vm_flags & VM_SHARED)));
3436 set_huge_pte_at(mm, address, ptep, new_pte);
3437
3438 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3439 /* Optimization, do the COW without a second fault */
3440 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3441 }
3442
3443 spin_unlock(ptl);
3444 unlock_page(page);
3445out:
3446 return ret;
3447
3448backout:
3449 spin_unlock(ptl);
3450backout_unlocked:
3451 unlock_page(page);
3452 put_page(page);
3453 goto out;
3454}
3455
3456#ifdef CONFIG_SMP
3457static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3458 struct vm_area_struct *vma,
3459 struct address_space *mapping,
3460 pgoff_t idx, unsigned long address)
3461{
3462 unsigned long key[2];
3463 u32 hash;
3464
3465 if (vma->vm_flags & VM_SHARED) {
3466 key[0] = (unsigned long) mapping;
3467 key[1] = idx;
3468 } else {
3469 key[0] = (unsigned long) mm;
3470 key[1] = address >> huge_page_shift(h);
3471 }
3472
3473 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3474
3475 return hash & (num_fault_mutexes - 1);
3476}
3477#else
3478/*
3479 * For uniprocesor systems we always use a single mutex, so just
3480 * return 0 and avoid the hashing overhead.
3481 */
3482static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3483 struct vm_area_struct *vma,
3484 struct address_space *mapping,
3485 pgoff_t idx, unsigned long address)
3486{
3487 return 0;
3488}
3489#endif
3490
3491int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3492 unsigned long address, unsigned int flags)
3493{
3494 pte_t *ptep, entry;
3495 spinlock_t *ptl;
3496 int ret;
3497 u32 hash;
3498 pgoff_t idx;
3499 struct page *page = NULL;
3500 struct page *pagecache_page = NULL;
3501 struct hstate *h = hstate_vma(vma);
3502 struct address_space *mapping;
3503 int need_wait_lock = 0;
3504
3505 address &= huge_page_mask(h);
3506
3507 ptep = huge_pte_offset(mm, address);
3508 if (ptep) {
3509 entry = huge_ptep_get(ptep);
3510 if (unlikely(is_hugetlb_entry_migration(entry))) {
3511 migration_entry_wait_huge(vma, mm, ptep);
3512 return 0;
3513 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3514 return VM_FAULT_HWPOISON_LARGE |
3515 VM_FAULT_SET_HINDEX(hstate_index(h));
3516 }
3517
3518 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3519 if (!ptep)
3520 return VM_FAULT_OOM;
3521
3522 mapping = vma->vm_file->f_mapping;
3523 idx = vma_hugecache_offset(h, vma, address);
3524
3525 /*
3526 * Serialize hugepage allocation and instantiation, so that we don't
3527 * get spurious allocation failures if two CPUs race to instantiate
3528 * the same page in the page cache.
3529 */
3530 hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3531 mutex_lock(&htlb_fault_mutex_table[hash]);
3532
3533 entry = huge_ptep_get(ptep);
3534 if (huge_pte_none(entry)) {
3535 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3536 goto out_mutex;
3537 }
3538
3539 ret = 0;
3540
3541 /*
3542 * entry could be a migration/hwpoison entry at this point, so this
3543 * check prevents the kernel from going below assuming that we have
3544 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3545 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3546 * handle it.
3547 */
3548 if (!pte_present(entry))
3549 goto out_mutex;
3550
3551 /*
3552 * If we are going to COW the mapping later, we examine the pending
3553 * reservations for this page now. This will ensure that any
3554 * allocations necessary to record that reservation occur outside the
3555 * spinlock. For private mappings, we also lookup the pagecache
3556 * page now as it is used to determine if a reservation has been
3557 * consumed.
3558 */
3559 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3560 if (vma_needs_reservation(h, vma, address) < 0) {
3561 ret = VM_FAULT_OOM;
3562 goto out_mutex;
3563 }
3564 /* Just decrements count, does not deallocate */
3565 vma_end_reservation(h, vma, address);
3566
3567 if (!(vma->vm_flags & VM_MAYSHARE))
3568 pagecache_page = hugetlbfs_pagecache_page(h,
3569 vma, address);
3570 }
3571
3572 ptl = huge_pte_lock(h, mm, ptep);
3573
3574 /* Check for a racing update before calling hugetlb_cow */
3575 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3576 goto out_ptl;
3577
3578 /*
3579 * hugetlb_cow() requires page locks of pte_page(entry) and
3580 * pagecache_page, so here we need take the former one
3581 * when page != pagecache_page or !pagecache_page.
3582 */
3583 page = pte_page(entry);
3584 if (page != pagecache_page)
3585 if (!trylock_page(page)) {
3586 need_wait_lock = 1;
3587 goto out_ptl;
3588 }
3589
3590 get_page(page);
3591
3592 if (flags & FAULT_FLAG_WRITE) {
3593 if (!huge_pte_write(entry)) {
3594 ret = hugetlb_cow(mm, vma, address, ptep, entry,
3595 pagecache_page, ptl);
3596 goto out_put_page;
3597 }
3598 entry = huge_pte_mkdirty(entry);
3599 }
3600 entry = pte_mkyoung(entry);
3601 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3602 flags & FAULT_FLAG_WRITE))
3603 update_mmu_cache(vma, address, ptep);
3604out_put_page:
3605 if (page != pagecache_page)
3606 unlock_page(page);
3607 put_page(page);
3608out_ptl:
3609 spin_unlock(ptl);
3610
3611 if (pagecache_page) {
3612 unlock_page(pagecache_page);
3613 put_page(pagecache_page);
3614 }
3615out_mutex:
3616 mutex_unlock(&htlb_fault_mutex_table[hash]);
3617 /*
3618 * Generally it's safe to hold refcount during waiting page lock. But
3619 * here we just wait to defer the next page fault to avoid busy loop and
3620 * the page is not used after unlocked before returning from the current
3621 * page fault. So we are safe from accessing freed page, even if we wait
3622 * here without taking refcount.
3623 */
3624 if (need_wait_lock)
3625 wait_on_page_locked(page);
3626 return ret;
3627}
3628
3629long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3630 struct page **pages, struct vm_area_struct **vmas,
3631 unsigned long *position, unsigned long *nr_pages,
3632 long i, unsigned int flags)
3633{
3634 unsigned long pfn_offset;
3635 unsigned long vaddr = *position;
3636 unsigned long remainder = *nr_pages;
3637 struct hstate *h = hstate_vma(vma);
3638
3639 while (vaddr < vma->vm_end && remainder) {
3640 pte_t *pte;
3641 spinlock_t *ptl = NULL;
3642 int absent;
3643 struct page *page;
3644
3645 /*
3646 * If we have a pending SIGKILL, don't keep faulting pages and
3647 * potentially allocating memory.
3648 */
3649 if (unlikely(fatal_signal_pending(current))) {
3650 remainder = 0;
3651 break;
3652 }
3653
3654 /*
3655 * Some archs (sparc64, sh*) have multiple pte_ts to
3656 * each hugepage. We have to make sure we get the
3657 * first, for the page indexing below to work.
3658 *
3659 * Note that page table lock is not held when pte is null.
3660 */
3661 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3662 if (pte)
3663 ptl = huge_pte_lock(h, mm, pte);
3664 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3665
3666 /*
3667 * When coredumping, it suits get_dump_page if we just return
3668 * an error where there's an empty slot with no huge pagecache
3669 * to back it. This way, we avoid allocating a hugepage, and
3670 * the sparse dumpfile avoids allocating disk blocks, but its
3671 * huge holes still show up with zeroes where they need to be.
3672 */
3673 if (absent && (flags & FOLL_DUMP) &&
3674 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3675 if (pte)
3676 spin_unlock(ptl);
3677 remainder = 0;
3678 break;
3679 }
3680
3681 /*
3682 * We need call hugetlb_fault for both hugepages under migration
3683 * (in which case hugetlb_fault waits for the migration,) and
3684 * hwpoisoned hugepages (in which case we need to prevent the
3685 * caller from accessing to them.) In order to do this, we use
3686 * here is_swap_pte instead of is_hugetlb_entry_migration and
3687 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3688 * both cases, and because we can't follow correct pages
3689 * directly from any kind of swap entries.
3690 */
3691 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3692 ((flags & FOLL_WRITE) &&
3693 !huge_pte_write(huge_ptep_get(pte)))) {
3694 int ret;
3695
3696 if (pte)
3697 spin_unlock(ptl);
3698 ret = hugetlb_fault(mm, vma, vaddr,
3699 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3700 if (!(ret & VM_FAULT_ERROR))
3701 continue;
3702
3703 remainder = 0;
3704 break;
3705 }
3706
3707 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3708 page = pte_page(huge_ptep_get(pte));
3709same_page:
3710 if (pages) {
3711 pages[i] = mem_map_offset(page, pfn_offset);
3712 get_page_foll(pages[i]);
3713 }
3714
3715 if (vmas)
3716 vmas[i] = vma;
3717
3718 vaddr += PAGE_SIZE;
3719 ++pfn_offset;
3720 --remainder;
3721 ++i;
3722 if (vaddr < vma->vm_end && remainder &&
3723 pfn_offset < pages_per_huge_page(h)) {
3724 /*
3725 * We use pfn_offset to avoid touching the pageframes
3726 * of this compound page.
3727 */
3728 goto same_page;
3729 }
3730 spin_unlock(ptl);
3731 }
3732 *nr_pages = remainder;
3733 *position = vaddr;
3734
3735 return i ? i : -EFAULT;
3736}
3737
3738unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3739 unsigned long address, unsigned long end, pgprot_t newprot)
3740{
3741 struct mm_struct *mm = vma->vm_mm;
3742 unsigned long start = address;
3743 pte_t *ptep;
3744 pte_t pte;
3745 struct hstate *h = hstate_vma(vma);
3746 unsigned long pages = 0;
3747
3748 BUG_ON(address >= end);
3749 flush_cache_range(vma, address, end);
3750
3751 mmu_notifier_invalidate_range_start(mm, start, end);
3752 i_mmap_lock_write(vma->vm_file->f_mapping);
3753 for (; address < end; address += huge_page_size(h)) {
3754 spinlock_t *ptl;
3755 ptep = huge_pte_offset(mm, address);
3756 if (!ptep)
3757 continue;
3758 ptl = huge_pte_lock(h, mm, ptep);
3759 if (huge_pmd_unshare(mm, &address, ptep)) {
3760 pages++;
3761 spin_unlock(ptl);
3762 continue;
3763 }
3764 pte = huge_ptep_get(ptep);
3765 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3766 spin_unlock(ptl);
3767 continue;
3768 }
3769 if (unlikely(is_hugetlb_entry_migration(pte))) {
3770 swp_entry_t entry = pte_to_swp_entry(pte);
3771
3772 if (is_write_migration_entry(entry)) {
3773 pte_t newpte;
3774
3775 make_migration_entry_read(&entry);
3776 newpte = swp_entry_to_pte(entry);
3777 set_huge_pte_at(mm, address, ptep, newpte);
3778 pages++;
3779 }
3780 spin_unlock(ptl);
3781 continue;
3782 }
3783 if (!huge_pte_none(pte)) {
3784 pte = huge_ptep_get_and_clear(mm, address, ptep);
3785 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3786 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3787 set_huge_pte_at(mm, address, ptep, pte);
3788 pages++;
3789 }
3790 spin_unlock(ptl);
3791 }
3792 /*
3793 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3794 * may have cleared our pud entry and done put_page on the page table:
3795 * once we release i_mmap_rwsem, another task can do the final put_page
3796 * and that page table be reused and filled with junk.
3797 */
3798 flush_tlb_range(vma, start, end);
3799 mmu_notifier_invalidate_range(mm, start, end);
3800 i_mmap_unlock_write(vma->vm_file->f_mapping);
3801 mmu_notifier_invalidate_range_end(mm, start, end);
3802
3803 return pages << h->order;
3804}
3805
3806int hugetlb_reserve_pages(struct inode *inode,
3807 long from, long to,
3808 struct vm_area_struct *vma,
3809 vm_flags_t vm_flags)
3810{
3811 long ret, chg;
3812 struct hstate *h = hstate_inode(inode);
3813 struct hugepage_subpool *spool = subpool_inode(inode);
3814 struct resv_map *resv_map;
3815 long gbl_reserve;
3816
3817 /*
3818 * Only apply hugepage reservation if asked. At fault time, an
3819 * attempt will be made for VM_NORESERVE to allocate a page
3820 * without using reserves
3821 */
3822 if (vm_flags & VM_NORESERVE)
3823 return 0;
3824
3825 /*
3826 * Shared mappings base their reservation on the number of pages that
3827 * are already allocated on behalf of the file. Private mappings need
3828 * to reserve the full area even if read-only as mprotect() may be
3829 * called to make the mapping read-write. Assume !vma is a shm mapping
3830 */
3831 if (!vma || vma->vm_flags & VM_MAYSHARE) {
3832 resv_map = inode_resv_map(inode);
3833
3834 chg = region_chg(resv_map, from, to);
3835
3836 } else {
3837 resv_map = resv_map_alloc();
3838 if (!resv_map)
3839 return -ENOMEM;
3840
3841 chg = to - from;
3842
3843 set_vma_resv_map(vma, resv_map);
3844 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3845 }
3846
3847 if (chg < 0) {
3848 ret = chg;
3849 goto out_err;
3850 }
3851
3852 /*
3853 * There must be enough pages in the subpool for the mapping. If
3854 * the subpool has a minimum size, there may be some global
3855 * reservations already in place (gbl_reserve).
3856 */
3857 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
3858 if (gbl_reserve < 0) {
3859 ret = -ENOSPC;
3860 goto out_err;
3861 }
3862
3863 /*
3864 * Check enough hugepages are available for the reservation.
3865 * Hand the pages back to the subpool if there are not
3866 */
3867 ret = hugetlb_acct_memory(h, gbl_reserve);
3868 if (ret < 0) {
3869 /* put back original number of pages, chg */
3870 (void)hugepage_subpool_put_pages(spool, chg);
3871 goto out_err;
3872 }
3873
3874 /*
3875 * Account for the reservations made. Shared mappings record regions
3876 * that have reservations as they are shared by multiple VMAs.
3877 * When the last VMA disappears, the region map says how much
3878 * the reservation was and the page cache tells how much of
3879 * the reservation was consumed. Private mappings are per-VMA and
3880 * only the consumed reservations are tracked. When the VMA
3881 * disappears, the original reservation is the VMA size and the
3882 * consumed reservations are stored in the map. Hence, nothing
3883 * else has to be done for private mappings here
3884 */
3885 if (!vma || vma->vm_flags & VM_MAYSHARE) {
3886 long add = region_add(resv_map, from, to);
3887
3888 if (unlikely(chg > add)) {
3889 /*
3890 * pages in this range were added to the reserve
3891 * map between region_chg and region_add. This
3892 * indicates a race with alloc_huge_page. Adjust
3893 * the subpool and reserve counts modified above
3894 * based on the difference.
3895 */
3896 long rsv_adjust;
3897
3898 rsv_adjust = hugepage_subpool_put_pages(spool,
3899 chg - add);
3900 hugetlb_acct_memory(h, -rsv_adjust);
3901 }
3902 }
3903 return 0;
3904out_err:
3905 if (!vma || vma->vm_flags & VM_MAYSHARE)
3906 region_abort(resv_map, from, to);
3907 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3908 kref_put(&resv_map->refs, resv_map_release);
3909 return ret;
3910}
3911
3912void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3913{
3914 struct hstate *h = hstate_inode(inode);
3915 struct resv_map *resv_map = inode_resv_map(inode);
3916 long chg = 0;
3917 struct hugepage_subpool *spool = subpool_inode(inode);
3918 long gbl_reserve;
3919
3920 if (resv_map)
3921 chg = region_del(resv_map, offset, LONG_MAX);
3922 spin_lock(&inode->i_lock);
3923 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3924 spin_unlock(&inode->i_lock);
3925
3926 /*
3927 * If the subpool has a minimum size, the number of global
3928 * reservations to be released may be adjusted.
3929 */
3930 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
3931 hugetlb_acct_memory(h, -gbl_reserve);
3932}
3933
3934#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3935static unsigned long page_table_shareable(struct vm_area_struct *svma,
3936 struct vm_area_struct *vma,
3937 unsigned long addr, pgoff_t idx)
3938{
3939 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3940 svma->vm_start;
3941 unsigned long sbase = saddr & PUD_MASK;
3942 unsigned long s_end = sbase + PUD_SIZE;
3943
3944 /* Allow segments to share if only one is marked locked */
3945 unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3946 unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3947
3948 /*
3949 * match the virtual addresses, permission and the alignment of the
3950 * page table page.
3951 */
3952 if (pmd_index(addr) != pmd_index(saddr) ||
3953 vm_flags != svm_flags ||
3954 sbase < svma->vm_start || svma->vm_end < s_end)
3955 return 0;
3956
3957 return saddr;
3958}
3959
3960static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3961{
3962 unsigned long base = addr & PUD_MASK;
3963 unsigned long end = base + PUD_SIZE;
3964
3965 /*
3966 * check on proper vm_flags and page table alignment
3967 */
3968 if (vma->vm_flags & VM_MAYSHARE &&
3969 vma->vm_start <= base && end <= vma->vm_end)
3970 return true;
3971 return false;
3972}
3973
3974/*
3975 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3976 * and returns the corresponding pte. While this is not necessary for the
3977 * !shared pmd case because we can allocate the pmd later as well, it makes the
3978 * code much cleaner. pmd allocation is essential for the shared case because
3979 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3980 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3981 * bad pmd for sharing.
3982 */
3983pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3984{
3985 struct vm_area_struct *vma = find_vma(mm, addr);
3986 struct address_space *mapping = vma->vm_file->f_mapping;
3987 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3988 vma->vm_pgoff;
3989 struct vm_area_struct *svma;
3990 unsigned long saddr;
3991 pte_t *spte = NULL;
3992 pte_t *pte;
3993 spinlock_t *ptl;
3994
3995 if (!vma_shareable(vma, addr))
3996 return (pte_t *)pmd_alloc(mm, pud, addr);
3997
3998 i_mmap_lock_write(mapping);
3999 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4000 if (svma == vma)
4001 continue;
4002
4003 saddr = page_table_shareable(svma, vma, addr, idx);
4004 if (saddr) {
4005 spte = huge_pte_offset(svma->vm_mm, saddr);
4006 if (spte) {
4007 mm_inc_nr_pmds(mm);
4008 get_page(virt_to_page(spte));
4009 break;
4010 }
4011 }
4012 }
4013
4014 if (!spte)
4015 goto out;
4016
4017 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4018 spin_lock(ptl);
4019 if (pud_none(*pud)) {
4020 pud_populate(mm, pud,
4021 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4022 } else {
4023 put_page(virt_to_page(spte));
4024 mm_inc_nr_pmds(mm);
4025 }
4026 spin_unlock(ptl);
4027out:
4028 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4029 i_mmap_unlock_write(mapping);
4030 return pte;
4031}
4032
4033/*
4034 * unmap huge page backed by shared pte.
4035 *
4036 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
4037 * indicated by page_count > 1, unmap is achieved by clearing pud and
4038 * decrementing the ref count. If count == 1, the pte page is not shared.
4039 *
4040 * called with page table lock held.
4041 *
4042 * returns: 1 successfully unmapped a shared pte page
4043 * 0 the underlying pte page is not shared, or it is the last user
4044 */
4045int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4046{
4047 pgd_t *pgd = pgd_offset(mm, *addr);
4048 pud_t *pud = pud_offset(pgd, *addr);
4049
4050 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4051 if (page_count(virt_to_page(ptep)) == 1)
4052 return 0;
4053
4054 pud_clear(pud);
4055 put_page(virt_to_page(ptep));
4056 mm_dec_nr_pmds(mm);
4057 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4058 return 1;
4059}
4060#define want_pmd_share() (1)
4061#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4062pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4063{
4064 return NULL;
4065}
4066
4067int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4068{
4069 return 0;
4070}
4071#define want_pmd_share() (0)
4072#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4073
4074#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4075pte_t *huge_pte_alloc(struct mm_struct *mm,
4076 unsigned long addr, unsigned long sz)
4077{
4078 pgd_t *pgd;
4079 pud_t *pud;
4080 pte_t *pte = NULL;
4081
4082 pgd = pgd_offset(mm, addr);
4083 pud = pud_alloc(mm, pgd, addr);
4084 if (pud) {
4085 if (sz == PUD_SIZE) {
4086 pte = (pte_t *)pud;
4087 } else {
4088 BUG_ON(sz != PMD_SIZE);
4089 if (want_pmd_share() && pud_none(*pud))
4090 pte = huge_pmd_share(mm, addr, pud);
4091 else
4092 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4093 }
4094 }
4095 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4096
4097 return pte;
4098}
4099
4100pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4101{
4102 pgd_t *pgd;
4103 pud_t *pud;
4104 pmd_t *pmd = NULL;
4105
4106 pgd = pgd_offset(mm, addr);
4107 if (pgd_present(*pgd)) {
4108 pud = pud_offset(pgd, addr);
4109 if (pud_present(*pud)) {
4110 if (pud_huge(*pud))
4111 return (pte_t *)pud;
4112 pmd = pmd_offset(pud, addr);
4113 }
4114 }
4115 return (pte_t *) pmd;
4116}
4117
4118#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4119
4120/*
4121 * These functions are overwritable if your architecture needs its own
4122 * behavior.
4123 */
4124struct page * __weak
4125follow_huge_addr(struct mm_struct *mm, unsigned long address,
4126 int write)
4127{
4128 return ERR_PTR(-EINVAL);
4129}
4130
4131struct page * __weak
4132follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4133 pmd_t *pmd, int flags)
4134{
4135 struct page *page = NULL;
4136 spinlock_t *ptl;
4137retry:
4138 ptl = pmd_lockptr(mm, pmd);
4139 spin_lock(ptl);
4140 /*
4141 * make sure that the address range covered by this pmd is not
4142 * unmapped from other threads.
4143 */
4144 if (!pmd_huge(*pmd))
4145 goto out;
4146 if (pmd_present(*pmd)) {
4147 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4148 if (flags & FOLL_GET)
4149 get_page(page);
4150 } else {
4151 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4152 spin_unlock(ptl);
4153 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4154 goto retry;
4155 }
4156 /*
4157 * hwpoisoned entry is treated as no_page_table in
4158 * follow_page_mask().
4159 */
4160 }
4161out:
4162 spin_unlock(ptl);
4163 return page;
4164}
4165
4166struct page * __weak
4167follow_huge_pud(struct mm_struct *mm, unsigned long address,
4168 pud_t *pud, int flags)
4169{
4170 if (flags & FOLL_GET)
4171 return NULL;
4172
4173 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4174}
4175
4176#ifdef CONFIG_MEMORY_FAILURE
4177
4178/*
4179 * This function is called from memory failure code.
4180 * Assume the caller holds page lock of the head page.
4181 */
4182int dequeue_hwpoisoned_huge_page(struct page *hpage)
4183{
4184 struct hstate *h = page_hstate(hpage);
4185 int nid = page_to_nid(hpage);
4186 int ret = -EBUSY;
4187
4188 spin_lock(&hugetlb_lock);
4189 /*
4190 * Just checking !page_huge_active is not enough, because that could be
4191 * an isolated/hwpoisoned hugepage (which have >0 refcount).
4192 */
4193 if (!page_huge_active(hpage) && !page_count(hpage)) {
4194 /*
4195 * Hwpoisoned hugepage isn't linked to activelist or freelist,
4196 * but dangling hpage->lru can trigger list-debug warnings
4197 * (this happens when we call unpoison_memory() on it),
4198 * so let it point to itself with list_del_init().
4199 */
4200 list_del_init(&hpage->lru);
4201 set_page_refcounted(hpage);
4202 h->free_huge_pages--;
4203 h->free_huge_pages_node[nid]--;
4204 ret = 0;
4205 }
4206 spin_unlock(&hugetlb_lock);
4207 return ret;
4208}
4209#endif
4210
4211bool isolate_huge_page(struct page *page, struct list_head *list)
4212{
4213 bool ret = true;
4214
4215 VM_BUG_ON_PAGE(!PageHead(page), page);
4216 spin_lock(&hugetlb_lock);
4217 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4218 ret = false;
4219 goto unlock;
4220 }
4221 clear_page_huge_active(page);
4222 list_move_tail(&page->lru, list);
4223unlock:
4224 spin_unlock(&hugetlb_lock);
4225 return ret;
4226}
4227
4228void putback_active_hugepage(struct page *page)
4229{
4230 VM_BUG_ON_PAGE(!PageHead(page), page);
4231 spin_lock(&hugetlb_lock);
4232 set_page_huge_active(page);
4233 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4234 spin_unlock(&hugetlb_lock);
4235 put_page(page);
4236}