mm/vmalloc: occupy newly allocated vmap block just after allocation
[linux-2.6-block.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
27
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include "internal.h"
37
38 int hugepages_treat_as_movable;
39
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
43
44 __initdata LIST_HEAD(huge_boot_pages);
45
46 /* for command line parsing */
47 static struct hstate * __initdata parsed_hstate;
48 static unsigned long __initdata default_hstate_max_huge_pages;
49 static unsigned long __initdata default_hstate_size;
50
51 /*
52  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
53  * free_huge_pages, and surplus_huge_pages.
54  */
55 DEFINE_SPINLOCK(hugetlb_lock);
56
57 /*
58  * Serializes faults on the same logical page.  This is used to
59  * prevent spurious OOMs when the hugepage pool is fully utilized.
60  */
61 static int num_fault_mutexes;
62 static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
63
64 /* Forward declaration */
65 static int hugetlb_acct_memory(struct hstate *h, long delta);
66
67 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
68 {
69         bool free = (spool->count == 0) && (spool->used_hpages == 0);
70
71         spin_unlock(&spool->lock);
72
73         /* If no pages are used, and no other handles to the subpool
74          * remain, give up any reservations mased on minimum size and
75          * free the subpool */
76         if (free) {
77                 if (spool->min_hpages != -1)
78                         hugetlb_acct_memory(spool->hstate,
79                                                 -spool->min_hpages);
80                 kfree(spool);
81         }
82 }
83
84 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
85                                                 long min_hpages)
86 {
87         struct hugepage_subpool *spool;
88
89         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
90         if (!spool)
91                 return NULL;
92
93         spin_lock_init(&spool->lock);
94         spool->count = 1;
95         spool->max_hpages = max_hpages;
96         spool->hstate = h;
97         spool->min_hpages = min_hpages;
98
99         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
100                 kfree(spool);
101                 return NULL;
102         }
103         spool->rsv_hpages = min_hpages;
104
105         return spool;
106 }
107
108 void hugepage_put_subpool(struct hugepage_subpool *spool)
109 {
110         spin_lock(&spool->lock);
111         BUG_ON(!spool->count);
112         spool->count--;
113         unlock_or_release_subpool(spool);
114 }
115
116 /*
117  * Subpool accounting for allocating and reserving pages.
118  * Return -ENOMEM if there are not enough resources to satisfy the
119  * the request.  Otherwise, return the number of pages by which the
120  * global pools must be adjusted (upward).  The returned value may
121  * only be different than the passed value (delta) in the case where
122  * a subpool minimum size must be manitained.
123  */
124 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
125                                       long delta)
126 {
127         long ret = delta;
128
129         if (!spool)
130                 return ret;
131
132         spin_lock(&spool->lock);
133
134         if (spool->max_hpages != -1) {          /* maximum size accounting */
135                 if ((spool->used_hpages + delta) <= spool->max_hpages)
136                         spool->used_hpages += delta;
137                 else {
138                         ret = -ENOMEM;
139                         goto unlock_ret;
140                 }
141         }
142
143         if (spool->min_hpages != -1) {          /* minimum size accounting */
144                 if (delta > spool->rsv_hpages) {
145                         /*
146                          * Asking for more reserves than those already taken on
147                          * behalf of subpool.  Return difference.
148                          */
149                         ret = delta - spool->rsv_hpages;
150                         spool->rsv_hpages = 0;
151                 } else {
152                         ret = 0;        /* reserves already accounted for */
153                         spool->rsv_hpages -= delta;
154                 }
155         }
156
157 unlock_ret:
158         spin_unlock(&spool->lock);
159         return ret;
160 }
161
162 /*
163  * Subpool accounting for freeing and unreserving pages.
164  * Return the number of global page reservations that must be dropped.
165  * The return value may only be different than the passed value (delta)
166  * in the case where a subpool minimum size must be maintained.
167  */
168 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
169                                        long delta)
170 {
171         long ret = delta;
172
173         if (!spool)
174                 return delta;
175
176         spin_lock(&spool->lock);
177
178         if (spool->max_hpages != -1)            /* maximum size accounting */
179                 spool->used_hpages -= delta;
180
181         if (spool->min_hpages != -1) {          /* minimum size accounting */
182                 if (spool->rsv_hpages + delta <= spool->min_hpages)
183                         ret = 0;
184                 else
185                         ret = spool->rsv_hpages + delta - spool->min_hpages;
186
187                 spool->rsv_hpages += delta;
188                 if (spool->rsv_hpages > spool->min_hpages)
189                         spool->rsv_hpages = spool->min_hpages;
190         }
191
192         /*
193          * If hugetlbfs_put_super couldn't free spool due to an outstanding
194          * quota reference, free it now.
195          */
196         unlock_or_release_subpool(spool);
197
198         return ret;
199 }
200
201 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
202 {
203         return HUGETLBFS_SB(inode->i_sb)->spool;
204 }
205
206 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
207 {
208         return subpool_inode(file_inode(vma->vm_file));
209 }
210
211 /*
212  * Region tracking -- allows tracking of reservations and instantiated pages
213  *                    across the pages in a mapping.
214  *
215  * The region data structures are embedded into a resv_map and
216  * protected by a resv_map's lock
217  */
218 struct file_region {
219         struct list_head link;
220         long from;
221         long to;
222 };
223
224 static long region_add(struct resv_map *resv, long f, long t)
225 {
226         struct list_head *head = &resv->regions;
227         struct file_region *rg, *nrg, *trg;
228
229         spin_lock(&resv->lock);
230         /* Locate the region we are either in or before. */
231         list_for_each_entry(rg, head, link)
232                 if (f <= rg->to)
233                         break;
234
235         /* Round our left edge to the current segment if it encloses us. */
236         if (f > rg->from)
237                 f = rg->from;
238
239         /* Check for and consume any regions we now overlap with. */
240         nrg = rg;
241         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
242                 if (&rg->link == head)
243                         break;
244                 if (rg->from > t)
245                         break;
246
247                 /* If this area reaches higher then extend our area to
248                  * include it completely.  If this is not the first area
249                  * which we intend to reuse, free it. */
250                 if (rg->to > t)
251                         t = rg->to;
252                 if (rg != nrg) {
253                         list_del(&rg->link);
254                         kfree(rg);
255                 }
256         }
257         nrg->from = f;
258         nrg->to = t;
259         spin_unlock(&resv->lock);
260         return 0;
261 }
262
263 static long region_chg(struct resv_map *resv, long f, long t)
264 {
265         struct list_head *head = &resv->regions;
266         struct file_region *rg, *nrg = NULL;
267         long chg = 0;
268
269 retry:
270         spin_lock(&resv->lock);
271         /* Locate the region we are before or in. */
272         list_for_each_entry(rg, head, link)
273                 if (f <= rg->to)
274                         break;
275
276         /* If we are below the current region then a new region is required.
277          * Subtle, allocate a new region at the position but make it zero
278          * size such that we can guarantee to record the reservation. */
279         if (&rg->link == head || t < rg->from) {
280                 if (!nrg) {
281                         spin_unlock(&resv->lock);
282                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
283                         if (!nrg)
284                                 return -ENOMEM;
285
286                         nrg->from = f;
287                         nrg->to   = f;
288                         INIT_LIST_HEAD(&nrg->link);
289                         goto retry;
290                 }
291
292                 list_add(&nrg->link, rg->link.prev);
293                 chg = t - f;
294                 goto out_nrg;
295         }
296
297         /* Round our left edge to the current segment if it encloses us. */
298         if (f > rg->from)
299                 f = rg->from;
300         chg = t - f;
301
302         /* Check for and consume any regions we now overlap with. */
303         list_for_each_entry(rg, rg->link.prev, link) {
304                 if (&rg->link == head)
305                         break;
306                 if (rg->from > t)
307                         goto out;
308
309                 /* We overlap with this area, if it extends further than
310                  * us then we must extend ourselves.  Account for its
311                  * existing reservation. */
312                 if (rg->to > t) {
313                         chg += rg->to - t;
314                         t = rg->to;
315                 }
316                 chg -= rg->to - rg->from;
317         }
318
319 out:
320         spin_unlock(&resv->lock);
321         /*  We already know we raced and no longer need the new region */
322         kfree(nrg);
323         return chg;
324 out_nrg:
325         spin_unlock(&resv->lock);
326         return chg;
327 }
328
329 static long region_truncate(struct resv_map *resv, long end)
330 {
331         struct list_head *head = &resv->regions;
332         struct file_region *rg, *trg;
333         long chg = 0;
334
335         spin_lock(&resv->lock);
336         /* Locate the region we are either in or before. */
337         list_for_each_entry(rg, head, link)
338                 if (end <= rg->to)
339                         break;
340         if (&rg->link == head)
341                 goto out;
342
343         /* If we are in the middle of a region then adjust it. */
344         if (end > rg->from) {
345                 chg = rg->to - end;
346                 rg->to = end;
347                 rg = list_entry(rg->link.next, typeof(*rg), link);
348         }
349
350         /* Drop any remaining regions. */
351         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
352                 if (&rg->link == head)
353                         break;
354                 chg += rg->to - rg->from;
355                 list_del(&rg->link);
356                 kfree(rg);
357         }
358
359 out:
360         spin_unlock(&resv->lock);
361         return chg;
362 }
363
364 static long region_count(struct resv_map *resv, long f, long t)
365 {
366         struct list_head *head = &resv->regions;
367         struct file_region *rg;
368         long chg = 0;
369
370         spin_lock(&resv->lock);
371         /* Locate each segment we overlap with, and count that overlap. */
372         list_for_each_entry(rg, head, link) {
373                 long seg_from;
374                 long seg_to;
375
376                 if (rg->to <= f)
377                         continue;
378                 if (rg->from >= t)
379                         break;
380
381                 seg_from = max(rg->from, f);
382                 seg_to = min(rg->to, t);
383
384                 chg += seg_to - seg_from;
385         }
386         spin_unlock(&resv->lock);
387
388         return chg;
389 }
390
391 /*
392  * Convert the address within this vma to the page offset within
393  * the mapping, in pagecache page units; huge pages here.
394  */
395 static pgoff_t vma_hugecache_offset(struct hstate *h,
396                         struct vm_area_struct *vma, unsigned long address)
397 {
398         return ((address - vma->vm_start) >> huge_page_shift(h)) +
399                         (vma->vm_pgoff >> huge_page_order(h));
400 }
401
402 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
403                                      unsigned long address)
404 {
405         return vma_hugecache_offset(hstate_vma(vma), vma, address);
406 }
407
408 /*
409  * Return the size of the pages allocated when backing a VMA. In the majority
410  * cases this will be same size as used by the page table entries.
411  */
412 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
413 {
414         struct hstate *hstate;
415
416         if (!is_vm_hugetlb_page(vma))
417                 return PAGE_SIZE;
418
419         hstate = hstate_vma(vma);
420
421         return 1UL << huge_page_shift(hstate);
422 }
423 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
424
425 /*
426  * Return the page size being used by the MMU to back a VMA. In the majority
427  * of cases, the page size used by the kernel matches the MMU size. On
428  * architectures where it differs, an architecture-specific version of this
429  * function is required.
430  */
431 #ifndef vma_mmu_pagesize
432 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
433 {
434         return vma_kernel_pagesize(vma);
435 }
436 #endif
437
438 /*
439  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
440  * bits of the reservation map pointer, which are always clear due to
441  * alignment.
442  */
443 #define HPAGE_RESV_OWNER    (1UL << 0)
444 #define HPAGE_RESV_UNMAPPED (1UL << 1)
445 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
446
447 /*
448  * These helpers are used to track how many pages are reserved for
449  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
450  * is guaranteed to have their future faults succeed.
451  *
452  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
453  * the reserve counters are updated with the hugetlb_lock held. It is safe
454  * to reset the VMA at fork() time as it is not in use yet and there is no
455  * chance of the global counters getting corrupted as a result of the values.
456  *
457  * The private mapping reservation is represented in a subtly different
458  * manner to a shared mapping.  A shared mapping has a region map associated
459  * with the underlying file, this region map represents the backing file
460  * pages which have ever had a reservation assigned which this persists even
461  * after the page is instantiated.  A private mapping has a region map
462  * associated with the original mmap which is attached to all VMAs which
463  * reference it, this region map represents those offsets which have consumed
464  * reservation ie. where pages have been instantiated.
465  */
466 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
467 {
468         return (unsigned long)vma->vm_private_data;
469 }
470
471 static void set_vma_private_data(struct vm_area_struct *vma,
472                                                         unsigned long value)
473 {
474         vma->vm_private_data = (void *)value;
475 }
476
477 struct resv_map *resv_map_alloc(void)
478 {
479         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
480         if (!resv_map)
481                 return NULL;
482
483         kref_init(&resv_map->refs);
484         spin_lock_init(&resv_map->lock);
485         INIT_LIST_HEAD(&resv_map->regions);
486
487         return resv_map;
488 }
489
490 void resv_map_release(struct kref *ref)
491 {
492         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
493
494         /* Clear out any active regions before we release the map. */
495         region_truncate(resv_map, 0);
496         kfree(resv_map);
497 }
498
499 static inline struct resv_map *inode_resv_map(struct inode *inode)
500 {
501         return inode->i_mapping->private_data;
502 }
503
504 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
505 {
506         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
507         if (vma->vm_flags & VM_MAYSHARE) {
508                 struct address_space *mapping = vma->vm_file->f_mapping;
509                 struct inode *inode = mapping->host;
510
511                 return inode_resv_map(inode);
512
513         } else {
514                 return (struct resv_map *)(get_vma_private_data(vma) &
515                                                         ~HPAGE_RESV_MASK);
516         }
517 }
518
519 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
520 {
521         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
522         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
523
524         set_vma_private_data(vma, (get_vma_private_data(vma) &
525                                 HPAGE_RESV_MASK) | (unsigned long)map);
526 }
527
528 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
529 {
530         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
531         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
532
533         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
534 }
535
536 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
537 {
538         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
539
540         return (get_vma_private_data(vma) & flag) != 0;
541 }
542
543 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
544 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
545 {
546         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
547         if (!(vma->vm_flags & VM_MAYSHARE))
548                 vma->vm_private_data = (void *)0;
549 }
550
551 /* Returns true if the VMA has associated reserve pages */
552 static int vma_has_reserves(struct vm_area_struct *vma, long chg)
553 {
554         if (vma->vm_flags & VM_NORESERVE) {
555                 /*
556                  * This address is already reserved by other process(chg == 0),
557                  * so, we should decrement reserved count. Without decrementing,
558                  * reserve count remains after releasing inode, because this
559                  * allocated page will go into page cache and is regarded as
560                  * coming from reserved pool in releasing step.  Currently, we
561                  * don't have any other solution to deal with this situation
562                  * properly, so add work-around here.
563                  */
564                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
565                         return 1;
566                 else
567                         return 0;
568         }
569
570         /* Shared mappings always use reserves */
571         if (vma->vm_flags & VM_MAYSHARE)
572                 return 1;
573
574         /*
575          * Only the process that called mmap() has reserves for
576          * private mappings.
577          */
578         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
579                 return 1;
580
581         return 0;
582 }
583
584 static void enqueue_huge_page(struct hstate *h, struct page *page)
585 {
586         int nid = page_to_nid(page);
587         list_move(&page->lru, &h->hugepage_freelists[nid]);
588         h->free_huge_pages++;
589         h->free_huge_pages_node[nid]++;
590 }
591
592 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
593 {
594         struct page *page;
595
596         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
597                 if (!is_migrate_isolate_page(page))
598                         break;
599         /*
600          * if 'non-isolated free hugepage' not found on the list,
601          * the allocation fails.
602          */
603         if (&h->hugepage_freelists[nid] == &page->lru)
604                 return NULL;
605         list_move(&page->lru, &h->hugepage_activelist);
606         set_page_refcounted(page);
607         h->free_huge_pages--;
608         h->free_huge_pages_node[nid]--;
609         return page;
610 }
611
612 /* Movability of hugepages depends on migration support. */
613 static inline gfp_t htlb_alloc_mask(struct hstate *h)
614 {
615         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
616                 return GFP_HIGHUSER_MOVABLE;
617         else
618                 return GFP_HIGHUSER;
619 }
620
621 static struct page *dequeue_huge_page_vma(struct hstate *h,
622                                 struct vm_area_struct *vma,
623                                 unsigned long address, int avoid_reserve,
624                                 long chg)
625 {
626         struct page *page = NULL;
627         struct mempolicy *mpol;
628         nodemask_t *nodemask;
629         struct zonelist *zonelist;
630         struct zone *zone;
631         struct zoneref *z;
632         unsigned int cpuset_mems_cookie;
633
634         /*
635          * A child process with MAP_PRIVATE mappings created by their parent
636          * have no page reserves. This check ensures that reservations are
637          * not "stolen". The child may still get SIGKILLed
638          */
639         if (!vma_has_reserves(vma, chg) &&
640                         h->free_huge_pages - h->resv_huge_pages == 0)
641                 goto err;
642
643         /* If reserves cannot be used, ensure enough pages are in the pool */
644         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
645                 goto err;
646
647 retry_cpuset:
648         cpuset_mems_cookie = read_mems_allowed_begin();
649         zonelist = huge_zonelist(vma, address,
650                                         htlb_alloc_mask(h), &mpol, &nodemask);
651
652         for_each_zone_zonelist_nodemask(zone, z, zonelist,
653                                                 MAX_NR_ZONES - 1, nodemask) {
654                 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
655                         page = dequeue_huge_page_node(h, zone_to_nid(zone));
656                         if (page) {
657                                 if (avoid_reserve)
658                                         break;
659                                 if (!vma_has_reserves(vma, chg))
660                                         break;
661
662                                 SetPagePrivate(page);
663                                 h->resv_huge_pages--;
664                                 break;
665                         }
666                 }
667         }
668
669         mpol_cond_put(mpol);
670         if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
671                 goto retry_cpuset;
672         return page;
673
674 err:
675         return NULL;
676 }
677
678 /*
679  * common helper functions for hstate_next_node_to_{alloc|free}.
680  * We may have allocated or freed a huge page based on a different
681  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
682  * be outside of *nodes_allowed.  Ensure that we use an allowed
683  * node for alloc or free.
684  */
685 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
686 {
687         nid = next_node(nid, *nodes_allowed);
688         if (nid == MAX_NUMNODES)
689                 nid = first_node(*nodes_allowed);
690         VM_BUG_ON(nid >= MAX_NUMNODES);
691
692         return nid;
693 }
694
695 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
696 {
697         if (!node_isset(nid, *nodes_allowed))
698                 nid = next_node_allowed(nid, nodes_allowed);
699         return nid;
700 }
701
702 /*
703  * returns the previously saved node ["this node"] from which to
704  * allocate a persistent huge page for the pool and advance the
705  * next node from which to allocate, handling wrap at end of node
706  * mask.
707  */
708 static int hstate_next_node_to_alloc(struct hstate *h,
709                                         nodemask_t *nodes_allowed)
710 {
711         int nid;
712
713         VM_BUG_ON(!nodes_allowed);
714
715         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
716         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
717
718         return nid;
719 }
720
721 /*
722  * helper for free_pool_huge_page() - return the previously saved
723  * node ["this node"] from which to free a huge page.  Advance the
724  * next node id whether or not we find a free huge page to free so
725  * that the next attempt to free addresses the next node.
726  */
727 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
728 {
729         int nid;
730
731         VM_BUG_ON(!nodes_allowed);
732
733         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
734         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
735
736         return nid;
737 }
738
739 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
740         for (nr_nodes = nodes_weight(*mask);                            \
741                 nr_nodes > 0 &&                                         \
742                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
743                 nr_nodes--)
744
745 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
746         for (nr_nodes = nodes_weight(*mask);                            \
747                 nr_nodes > 0 &&                                         \
748                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
749                 nr_nodes--)
750
751 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
752 static void destroy_compound_gigantic_page(struct page *page,
753                                         unsigned long order)
754 {
755         int i;
756         int nr_pages = 1 << order;
757         struct page *p = page + 1;
758
759         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
760                 __ClearPageTail(p);
761                 set_page_refcounted(p);
762                 p->first_page = NULL;
763         }
764
765         set_compound_order(page, 0);
766         __ClearPageHead(page);
767 }
768
769 static void free_gigantic_page(struct page *page, unsigned order)
770 {
771         free_contig_range(page_to_pfn(page), 1 << order);
772 }
773
774 static int __alloc_gigantic_page(unsigned long start_pfn,
775                                 unsigned long nr_pages)
776 {
777         unsigned long end_pfn = start_pfn + nr_pages;
778         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
779 }
780
781 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
782                                 unsigned long nr_pages)
783 {
784         unsigned long i, end_pfn = start_pfn + nr_pages;
785         struct page *page;
786
787         for (i = start_pfn; i < end_pfn; i++) {
788                 if (!pfn_valid(i))
789                         return false;
790
791                 page = pfn_to_page(i);
792
793                 if (PageReserved(page))
794                         return false;
795
796                 if (page_count(page) > 0)
797                         return false;
798
799                 if (PageHuge(page))
800                         return false;
801         }
802
803         return true;
804 }
805
806 static bool zone_spans_last_pfn(const struct zone *zone,
807                         unsigned long start_pfn, unsigned long nr_pages)
808 {
809         unsigned long last_pfn = start_pfn + nr_pages - 1;
810         return zone_spans_pfn(zone, last_pfn);
811 }
812
813 static struct page *alloc_gigantic_page(int nid, unsigned order)
814 {
815         unsigned long nr_pages = 1 << order;
816         unsigned long ret, pfn, flags;
817         struct zone *z;
818
819         z = NODE_DATA(nid)->node_zones;
820         for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
821                 spin_lock_irqsave(&z->lock, flags);
822
823                 pfn = ALIGN(z->zone_start_pfn, nr_pages);
824                 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
825                         if (pfn_range_valid_gigantic(pfn, nr_pages)) {
826                                 /*
827                                  * We release the zone lock here because
828                                  * alloc_contig_range() will also lock the zone
829                                  * at some point. If there's an allocation
830                                  * spinning on this lock, it may win the race
831                                  * and cause alloc_contig_range() to fail...
832                                  */
833                                 spin_unlock_irqrestore(&z->lock, flags);
834                                 ret = __alloc_gigantic_page(pfn, nr_pages);
835                                 if (!ret)
836                                         return pfn_to_page(pfn);
837                                 spin_lock_irqsave(&z->lock, flags);
838                         }
839                         pfn += nr_pages;
840                 }
841
842                 spin_unlock_irqrestore(&z->lock, flags);
843         }
844
845         return NULL;
846 }
847
848 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
849 static void prep_compound_gigantic_page(struct page *page, unsigned long order);
850
851 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
852 {
853         struct page *page;
854
855         page = alloc_gigantic_page(nid, huge_page_order(h));
856         if (page) {
857                 prep_compound_gigantic_page(page, huge_page_order(h));
858                 prep_new_huge_page(h, page, nid);
859         }
860
861         return page;
862 }
863
864 static int alloc_fresh_gigantic_page(struct hstate *h,
865                                 nodemask_t *nodes_allowed)
866 {
867         struct page *page = NULL;
868         int nr_nodes, node;
869
870         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
871                 page = alloc_fresh_gigantic_page_node(h, node);
872                 if (page)
873                         return 1;
874         }
875
876         return 0;
877 }
878
879 static inline bool gigantic_page_supported(void) { return true; }
880 #else
881 static inline bool gigantic_page_supported(void) { return false; }
882 static inline void free_gigantic_page(struct page *page, unsigned order) { }
883 static inline void destroy_compound_gigantic_page(struct page *page,
884                                                 unsigned long order) { }
885 static inline int alloc_fresh_gigantic_page(struct hstate *h,
886                                         nodemask_t *nodes_allowed) { return 0; }
887 #endif
888
889 static void update_and_free_page(struct hstate *h, struct page *page)
890 {
891         int i;
892
893         if (hstate_is_gigantic(h) && !gigantic_page_supported())
894                 return;
895
896         h->nr_huge_pages--;
897         h->nr_huge_pages_node[page_to_nid(page)]--;
898         for (i = 0; i < pages_per_huge_page(h); i++) {
899                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
900                                 1 << PG_referenced | 1 << PG_dirty |
901                                 1 << PG_active | 1 << PG_private |
902                                 1 << PG_writeback);
903         }
904         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
905         set_compound_page_dtor(page, NULL);
906         set_page_refcounted(page);
907         if (hstate_is_gigantic(h)) {
908                 destroy_compound_gigantic_page(page, huge_page_order(h));
909                 free_gigantic_page(page, huge_page_order(h));
910         } else {
911                 arch_release_hugepage(page);
912                 __free_pages(page, huge_page_order(h));
913         }
914 }
915
916 struct hstate *size_to_hstate(unsigned long size)
917 {
918         struct hstate *h;
919
920         for_each_hstate(h) {
921                 if (huge_page_size(h) == size)
922                         return h;
923         }
924         return NULL;
925 }
926
927 void free_huge_page(struct page *page)
928 {
929         /*
930          * Can't pass hstate in here because it is called from the
931          * compound page destructor.
932          */
933         struct hstate *h = page_hstate(page);
934         int nid = page_to_nid(page);
935         struct hugepage_subpool *spool =
936                 (struct hugepage_subpool *)page_private(page);
937         bool restore_reserve;
938
939         set_page_private(page, 0);
940         page->mapping = NULL;
941         BUG_ON(page_count(page));
942         BUG_ON(page_mapcount(page));
943         restore_reserve = PagePrivate(page);
944         ClearPagePrivate(page);
945
946         /*
947          * A return code of zero implies that the subpool will be under its
948          * minimum size if the reservation is not restored after page is free.
949          * Therefore, force restore_reserve operation.
950          */
951         if (hugepage_subpool_put_pages(spool, 1) == 0)
952                 restore_reserve = true;
953
954         spin_lock(&hugetlb_lock);
955         hugetlb_cgroup_uncharge_page(hstate_index(h),
956                                      pages_per_huge_page(h), page);
957         if (restore_reserve)
958                 h->resv_huge_pages++;
959
960         if (h->surplus_huge_pages_node[nid]) {
961                 /* remove the page from active list */
962                 list_del(&page->lru);
963                 update_and_free_page(h, page);
964                 h->surplus_huge_pages--;
965                 h->surplus_huge_pages_node[nid]--;
966         } else {
967                 arch_clear_hugepage_flags(page);
968                 enqueue_huge_page(h, page);
969         }
970         spin_unlock(&hugetlb_lock);
971 }
972
973 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
974 {
975         INIT_LIST_HEAD(&page->lru);
976         set_compound_page_dtor(page, free_huge_page);
977         spin_lock(&hugetlb_lock);
978         set_hugetlb_cgroup(page, NULL);
979         h->nr_huge_pages++;
980         h->nr_huge_pages_node[nid]++;
981         spin_unlock(&hugetlb_lock);
982         put_page(page); /* free it into the hugepage allocator */
983 }
984
985 static void prep_compound_gigantic_page(struct page *page, unsigned long order)
986 {
987         int i;
988         int nr_pages = 1 << order;
989         struct page *p = page + 1;
990
991         /* we rely on prep_new_huge_page to set the destructor */
992         set_compound_order(page, order);
993         __SetPageHead(page);
994         __ClearPageReserved(page);
995         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
996                 /*
997                  * For gigantic hugepages allocated through bootmem at
998                  * boot, it's safer to be consistent with the not-gigantic
999                  * hugepages and clear the PG_reserved bit from all tail pages
1000                  * too.  Otherwse drivers using get_user_pages() to access tail
1001                  * pages may get the reference counting wrong if they see
1002                  * PG_reserved set on a tail page (despite the head page not
1003                  * having PG_reserved set).  Enforcing this consistency between
1004                  * head and tail pages allows drivers to optimize away a check
1005                  * on the head page when they need know if put_page() is needed
1006                  * after get_user_pages().
1007                  */
1008                 __ClearPageReserved(p);
1009                 set_page_count(p, 0);
1010                 p->first_page = page;
1011                 /* Make sure p->first_page is always valid for PageTail() */
1012                 smp_wmb();
1013                 __SetPageTail(p);
1014         }
1015 }
1016
1017 /*
1018  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1019  * transparent huge pages.  See the PageTransHuge() documentation for more
1020  * details.
1021  */
1022 int PageHuge(struct page *page)
1023 {
1024         if (!PageCompound(page))
1025                 return 0;
1026
1027         page = compound_head(page);
1028         return get_compound_page_dtor(page) == free_huge_page;
1029 }
1030 EXPORT_SYMBOL_GPL(PageHuge);
1031
1032 /*
1033  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1034  * normal or transparent huge pages.
1035  */
1036 int PageHeadHuge(struct page *page_head)
1037 {
1038         if (!PageHead(page_head))
1039                 return 0;
1040
1041         return get_compound_page_dtor(page_head) == free_huge_page;
1042 }
1043
1044 pgoff_t __basepage_index(struct page *page)
1045 {
1046         struct page *page_head = compound_head(page);
1047         pgoff_t index = page_index(page_head);
1048         unsigned long compound_idx;
1049
1050         if (!PageHuge(page_head))
1051                 return page_index(page);
1052
1053         if (compound_order(page_head) >= MAX_ORDER)
1054                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1055         else
1056                 compound_idx = page - page_head;
1057
1058         return (index << compound_order(page_head)) + compound_idx;
1059 }
1060
1061 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1062 {
1063         struct page *page;
1064
1065         page = alloc_pages_exact_node(nid,
1066                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1067                                                 __GFP_REPEAT|__GFP_NOWARN,
1068                 huge_page_order(h));
1069         if (page) {
1070                 if (arch_prepare_hugepage(page)) {
1071                         __free_pages(page, huge_page_order(h));
1072                         return NULL;
1073                 }
1074                 prep_new_huge_page(h, page, nid);
1075         }
1076
1077         return page;
1078 }
1079
1080 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1081 {
1082         struct page *page;
1083         int nr_nodes, node;
1084         int ret = 0;
1085
1086         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1087                 page = alloc_fresh_huge_page_node(h, node);
1088                 if (page) {
1089                         ret = 1;
1090                         break;
1091                 }
1092         }
1093
1094         if (ret)
1095                 count_vm_event(HTLB_BUDDY_PGALLOC);
1096         else
1097                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1098
1099         return ret;
1100 }
1101
1102 /*
1103  * Free huge page from pool from next node to free.
1104  * Attempt to keep persistent huge pages more or less
1105  * balanced over allowed nodes.
1106  * Called with hugetlb_lock locked.
1107  */
1108 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1109                                                          bool acct_surplus)
1110 {
1111         int nr_nodes, node;
1112         int ret = 0;
1113
1114         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1115                 /*
1116                  * If we're returning unused surplus pages, only examine
1117                  * nodes with surplus pages.
1118                  */
1119                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1120                     !list_empty(&h->hugepage_freelists[node])) {
1121                         struct page *page =
1122                                 list_entry(h->hugepage_freelists[node].next,
1123                                           struct page, lru);
1124                         list_del(&page->lru);
1125                         h->free_huge_pages--;
1126                         h->free_huge_pages_node[node]--;
1127                         if (acct_surplus) {
1128                                 h->surplus_huge_pages--;
1129                                 h->surplus_huge_pages_node[node]--;
1130                         }
1131                         update_and_free_page(h, page);
1132                         ret = 1;
1133                         break;
1134                 }
1135         }
1136
1137         return ret;
1138 }
1139
1140 /*
1141  * Dissolve a given free hugepage into free buddy pages. This function does
1142  * nothing for in-use (including surplus) hugepages.
1143  */
1144 static void dissolve_free_huge_page(struct page *page)
1145 {
1146         spin_lock(&hugetlb_lock);
1147         if (PageHuge(page) && !page_count(page)) {
1148                 struct hstate *h = page_hstate(page);
1149                 int nid = page_to_nid(page);
1150                 list_del(&page->lru);
1151                 h->free_huge_pages--;
1152                 h->free_huge_pages_node[nid]--;
1153                 update_and_free_page(h, page);
1154         }
1155         spin_unlock(&hugetlb_lock);
1156 }
1157
1158 /*
1159  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1160  * make specified memory blocks removable from the system.
1161  * Note that start_pfn should aligned with (minimum) hugepage size.
1162  */
1163 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1164 {
1165         unsigned int order = 8 * sizeof(void *);
1166         unsigned long pfn;
1167         struct hstate *h;
1168
1169         if (!hugepages_supported())
1170                 return;
1171
1172         /* Set scan step to minimum hugepage size */
1173         for_each_hstate(h)
1174                 if (order > huge_page_order(h))
1175                         order = huge_page_order(h);
1176         VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
1177         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
1178                 dissolve_free_huge_page(pfn_to_page(pfn));
1179 }
1180
1181 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
1182 {
1183         struct page *page;
1184         unsigned int r_nid;
1185
1186         if (hstate_is_gigantic(h))
1187                 return NULL;
1188
1189         /*
1190          * Assume we will successfully allocate the surplus page to
1191          * prevent racing processes from causing the surplus to exceed
1192          * overcommit
1193          *
1194          * This however introduces a different race, where a process B
1195          * tries to grow the static hugepage pool while alloc_pages() is
1196          * called by process A. B will only examine the per-node
1197          * counters in determining if surplus huge pages can be
1198          * converted to normal huge pages in adjust_pool_surplus(). A
1199          * won't be able to increment the per-node counter, until the
1200          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1201          * no more huge pages can be converted from surplus to normal
1202          * state (and doesn't try to convert again). Thus, we have a
1203          * case where a surplus huge page exists, the pool is grown, and
1204          * the surplus huge page still exists after, even though it
1205          * should just have been converted to a normal huge page. This
1206          * does not leak memory, though, as the hugepage will be freed
1207          * once it is out of use. It also does not allow the counters to
1208          * go out of whack in adjust_pool_surplus() as we don't modify
1209          * the node values until we've gotten the hugepage and only the
1210          * per-node value is checked there.
1211          */
1212         spin_lock(&hugetlb_lock);
1213         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1214                 spin_unlock(&hugetlb_lock);
1215                 return NULL;
1216         } else {
1217                 h->nr_huge_pages++;
1218                 h->surplus_huge_pages++;
1219         }
1220         spin_unlock(&hugetlb_lock);
1221
1222         if (nid == NUMA_NO_NODE)
1223                 page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
1224                                    __GFP_REPEAT|__GFP_NOWARN,
1225                                    huge_page_order(h));
1226         else
1227                 page = alloc_pages_exact_node(nid,
1228                         htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1229                         __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
1230
1231         if (page && arch_prepare_hugepage(page)) {
1232                 __free_pages(page, huge_page_order(h));
1233                 page = NULL;
1234         }
1235
1236         spin_lock(&hugetlb_lock);
1237         if (page) {
1238                 INIT_LIST_HEAD(&page->lru);
1239                 r_nid = page_to_nid(page);
1240                 set_compound_page_dtor(page, free_huge_page);
1241                 set_hugetlb_cgroup(page, NULL);
1242                 /*
1243                  * We incremented the global counters already
1244                  */
1245                 h->nr_huge_pages_node[r_nid]++;
1246                 h->surplus_huge_pages_node[r_nid]++;
1247                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1248         } else {
1249                 h->nr_huge_pages--;
1250                 h->surplus_huge_pages--;
1251                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1252         }
1253         spin_unlock(&hugetlb_lock);
1254
1255         return page;
1256 }
1257
1258 /*
1259  * This allocation function is useful in the context where vma is irrelevant.
1260  * E.g. soft-offlining uses this function because it only cares physical
1261  * address of error page.
1262  */
1263 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1264 {
1265         struct page *page = NULL;
1266
1267         spin_lock(&hugetlb_lock);
1268         if (h->free_huge_pages - h->resv_huge_pages > 0)
1269                 page = dequeue_huge_page_node(h, nid);
1270         spin_unlock(&hugetlb_lock);
1271
1272         if (!page)
1273                 page = alloc_buddy_huge_page(h, nid);
1274
1275         return page;
1276 }
1277
1278 /*
1279  * Increase the hugetlb pool such that it can accommodate a reservation
1280  * of size 'delta'.
1281  */
1282 static int gather_surplus_pages(struct hstate *h, int delta)
1283 {
1284         struct list_head surplus_list;
1285         struct page *page, *tmp;
1286         int ret, i;
1287         int needed, allocated;
1288         bool alloc_ok = true;
1289
1290         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1291         if (needed <= 0) {
1292                 h->resv_huge_pages += delta;
1293                 return 0;
1294         }
1295
1296         allocated = 0;
1297         INIT_LIST_HEAD(&surplus_list);
1298
1299         ret = -ENOMEM;
1300 retry:
1301         spin_unlock(&hugetlb_lock);
1302         for (i = 0; i < needed; i++) {
1303                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1304                 if (!page) {
1305                         alloc_ok = false;
1306                         break;
1307                 }
1308                 list_add(&page->lru, &surplus_list);
1309         }
1310         allocated += i;
1311
1312         /*
1313          * After retaking hugetlb_lock, we need to recalculate 'needed'
1314          * because either resv_huge_pages or free_huge_pages may have changed.
1315          */
1316         spin_lock(&hugetlb_lock);
1317         needed = (h->resv_huge_pages + delta) -
1318                         (h->free_huge_pages + allocated);
1319         if (needed > 0) {
1320                 if (alloc_ok)
1321                         goto retry;
1322                 /*
1323                  * We were not able to allocate enough pages to
1324                  * satisfy the entire reservation so we free what
1325                  * we've allocated so far.
1326                  */
1327                 goto free;
1328         }
1329         /*
1330          * The surplus_list now contains _at_least_ the number of extra pages
1331          * needed to accommodate the reservation.  Add the appropriate number
1332          * of pages to the hugetlb pool and free the extras back to the buddy
1333          * allocator.  Commit the entire reservation here to prevent another
1334          * process from stealing the pages as they are added to the pool but
1335          * before they are reserved.
1336          */
1337         needed += allocated;
1338         h->resv_huge_pages += delta;
1339         ret = 0;
1340
1341         /* Free the needed pages to the hugetlb pool */
1342         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1343                 if ((--needed) < 0)
1344                         break;
1345                 /*
1346                  * This page is now managed by the hugetlb allocator and has
1347                  * no users -- drop the buddy allocator's reference.
1348                  */
1349                 put_page_testzero(page);
1350                 VM_BUG_ON_PAGE(page_count(page), page);
1351                 enqueue_huge_page(h, page);
1352         }
1353 free:
1354         spin_unlock(&hugetlb_lock);
1355
1356         /* Free unnecessary surplus pages to the buddy allocator */
1357         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1358                 put_page(page);
1359         spin_lock(&hugetlb_lock);
1360
1361         return ret;
1362 }
1363
1364 /*
1365  * When releasing a hugetlb pool reservation, any surplus pages that were
1366  * allocated to satisfy the reservation must be explicitly freed if they were
1367  * never used.
1368  * Called with hugetlb_lock held.
1369  */
1370 static void return_unused_surplus_pages(struct hstate *h,
1371                                         unsigned long unused_resv_pages)
1372 {
1373         unsigned long nr_pages;
1374
1375         /* Uncommit the reservation */
1376         h->resv_huge_pages -= unused_resv_pages;
1377
1378         /* Cannot return gigantic pages currently */
1379         if (hstate_is_gigantic(h))
1380                 return;
1381
1382         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1383
1384         /*
1385          * We want to release as many surplus pages as possible, spread
1386          * evenly across all nodes with memory. Iterate across these nodes
1387          * until we can no longer free unreserved surplus pages. This occurs
1388          * when the nodes with surplus pages have no free pages.
1389          * free_pool_huge_page() will balance the the freed pages across the
1390          * on-line nodes with memory and will handle the hstate accounting.
1391          */
1392         while (nr_pages--) {
1393                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1394                         break;
1395                 cond_resched_lock(&hugetlb_lock);
1396         }
1397 }
1398
1399 /*
1400  * Determine if the huge page at addr within the vma has an associated
1401  * reservation.  Where it does not we will need to logically increase
1402  * reservation and actually increase subpool usage before an allocation
1403  * can occur.  Where any new reservation would be required the
1404  * reservation change is prepared, but not committed.  Once the page
1405  * has been allocated from the subpool and instantiated the change should
1406  * be committed via vma_commit_reservation.  No action is required on
1407  * failure.
1408  */
1409 static long vma_needs_reservation(struct hstate *h,
1410                         struct vm_area_struct *vma, unsigned long addr)
1411 {
1412         struct resv_map *resv;
1413         pgoff_t idx;
1414         long chg;
1415
1416         resv = vma_resv_map(vma);
1417         if (!resv)
1418                 return 1;
1419
1420         idx = vma_hugecache_offset(h, vma, addr);
1421         chg = region_chg(resv, idx, idx + 1);
1422
1423         if (vma->vm_flags & VM_MAYSHARE)
1424                 return chg;
1425         else
1426                 return chg < 0 ? chg : 0;
1427 }
1428 static void vma_commit_reservation(struct hstate *h,
1429                         struct vm_area_struct *vma, unsigned long addr)
1430 {
1431         struct resv_map *resv;
1432         pgoff_t idx;
1433
1434         resv = vma_resv_map(vma);
1435         if (!resv)
1436                 return;
1437
1438         idx = vma_hugecache_offset(h, vma, addr);
1439         region_add(resv, idx, idx + 1);
1440 }
1441
1442 static struct page *alloc_huge_page(struct vm_area_struct *vma,
1443                                     unsigned long addr, int avoid_reserve)
1444 {
1445         struct hugepage_subpool *spool = subpool_vma(vma);
1446         struct hstate *h = hstate_vma(vma);
1447         struct page *page;
1448         long chg;
1449         int ret, idx;
1450         struct hugetlb_cgroup *h_cg;
1451
1452         idx = hstate_index(h);
1453         /*
1454          * Processes that did not create the mapping will have no
1455          * reserves and will not have accounted against subpool
1456          * limit. Check that the subpool limit can be made before
1457          * satisfying the allocation MAP_NORESERVE mappings may also
1458          * need pages and subpool limit allocated allocated if no reserve
1459          * mapping overlaps.
1460          */
1461         chg = vma_needs_reservation(h, vma, addr);
1462         if (chg < 0)
1463                 return ERR_PTR(-ENOMEM);
1464         if (chg || avoid_reserve)
1465                 if (hugepage_subpool_get_pages(spool, 1) < 0)
1466                         return ERR_PTR(-ENOSPC);
1467
1468         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1469         if (ret)
1470                 goto out_subpool_put;
1471
1472         spin_lock(&hugetlb_lock);
1473         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
1474         if (!page) {
1475                 spin_unlock(&hugetlb_lock);
1476                 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1477                 if (!page)
1478                         goto out_uncharge_cgroup;
1479
1480                 spin_lock(&hugetlb_lock);
1481                 list_move(&page->lru, &h->hugepage_activelist);
1482                 /* Fall through */
1483         }
1484         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1485         spin_unlock(&hugetlb_lock);
1486
1487         set_page_private(page, (unsigned long)spool);
1488
1489         vma_commit_reservation(h, vma, addr);
1490         return page;
1491
1492 out_uncharge_cgroup:
1493         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1494 out_subpool_put:
1495         if (chg || avoid_reserve)
1496                 hugepage_subpool_put_pages(spool, 1);
1497         return ERR_PTR(-ENOSPC);
1498 }
1499
1500 /*
1501  * alloc_huge_page()'s wrapper which simply returns the page if allocation
1502  * succeeds, otherwise NULL. This function is called from new_vma_page(),
1503  * where no ERR_VALUE is expected to be returned.
1504  */
1505 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1506                                 unsigned long addr, int avoid_reserve)
1507 {
1508         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1509         if (IS_ERR(page))
1510                 page = NULL;
1511         return page;
1512 }
1513
1514 int __weak alloc_bootmem_huge_page(struct hstate *h)
1515 {
1516         struct huge_bootmem_page *m;
1517         int nr_nodes, node;
1518
1519         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1520                 void *addr;
1521
1522                 addr = memblock_virt_alloc_try_nid_nopanic(
1523                                 huge_page_size(h), huge_page_size(h),
1524                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1525                 if (addr) {
1526                         /*
1527                          * Use the beginning of the huge page to store the
1528                          * huge_bootmem_page struct (until gather_bootmem
1529                          * puts them into the mem_map).
1530                          */
1531                         m = addr;
1532                         goto found;
1533                 }
1534         }
1535         return 0;
1536
1537 found:
1538         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1539         /* Put them into a private list first because mem_map is not up yet */
1540         list_add(&m->list, &huge_boot_pages);
1541         m->hstate = h;
1542         return 1;
1543 }
1544
1545 static void __init prep_compound_huge_page(struct page *page, int order)
1546 {
1547         if (unlikely(order > (MAX_ORDER - 1)))
1548                 prep_compound_gigantic_page(page, order);
1549         else
1550                 prep_compound_page(page, order);
1551 }
1552
1553 /* Put bootmem huge pages into the standard lists after mem_map is up */
1554 static void __init gather_bootmem_prealloc(void)
1555 {
1556         struct huge_bootmem_page *m;
1557
1558         list_for_each_entry(m, &huge_boot_pages, list) {
1559                 struct hstate *h = m->hstate;
1560                 struct page *page;
1561
1562 #ifdef CONFIG_HIGHMEM
1563                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1564                 memblock_free_late(__pa(m),
1565                                    sizeof(struct huge_bootmem_page));
1566 #else
1567                 page = virt_to_page(m);
1568 #endif
1569                 WARN_ON(page_count(page) != 1);
1570                 prep_compound_huge_page(page, h->order);
1571                 WARN_ON(PageReserved(page));
1572                 prep_new_huge_page(h, page, page_to_nid(page));
1573                 /*
1574                  * If we had gigantic hugepages allocated at boot time, we need
1575                  * to restore the 'stolen' pages to totalram_pages in order to
1576                  * fix confusing memory reports from free(1) and another
1577                  * side-effects, like CommitLimit going negative.
1578                  */
1579                 if (hstate_is_gigantic(h))
1580                         adjust_managed_page_count(page, 1 << h->order);
1581         }
1582 }
1583
1584 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1585 {
1586         unsigned long i;
1587
1588         for (i = 0; i < h->max_huge_pages; ++i) {
1589                 if (hstate_is_gigantic(h)) {
1590                         if (!alloc_bootmem_huge_page(h))
1591                                 break;
1592                 } else if (!alloc_fresh_huge_page(h,
1593                                          &node_states[N_MEMORY]))
1594                         break;
1595         }
1596         h->max_huge_pages = i;
1597 }
1598
1599 static void __init hugetlb_init_hstates(void)
1600 {
1601         struct hstate *h;
1602
1603         for_each_hstate(h) {
1604                 /* oversize hugepages were init'ed in early boot */
1605                 if (!hstate_is_gigantic(h))
1606                         hugetlb_hstate_alloc_pages(h);
1607         }
1608 }
1609
1610 static char * __init memfmt(char *buf, unsigned long n)
1611 {
1612         if (n >= (1UL << 30))
1613                 sprintf(buf, "%lu GB", n >> 30);
1614         else if (n >= (1UL << 20))
1615                 sprintf(buf, "%lu MB", n >> 20);
1616         else
1617                 sprintf(buf, "%lu KB", n >> 10);
1618         return buf;
1619 }
1620
1621 static void __init report_hugepages(void)
1622 {
1623         struct hstate *h;
1624
1625         for_each_hstate(h) {
1626                 char buf[32];
1627                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1628                         memfmt(buf, huge_page_size(h)),
1629                         h->free_huge_pages);
1630         }
1631 }
1632
1633 #ifdef CONFIG_HIGHMEM
1634 static void try_to_free_low(struct hstate *h, unsigned long count,
1635                                                 nodemask_t *nodes_allowed)
1636 {
1637         int i;
1638
1639         if (hstate_is_gigantic(h))
1640                 return;
1641
1642         for_each_node_mask(i, *nodes_allowed) {
1643                 struct page *page, *next;
1644                 struct list_head *freel = &h->hugepage_freelists[i];
1645                 list_for_each_entry_safe(page, next, freel, lru) {
1646                         if (count >= h->nr_huge_pages)
1647                                 return;
1648                         if (PageHighMem(page))
1649                                 continue;
1650                         list_del(&page->lru);
1651                         update_and_free_page(h, page);
1652                         h->free_huge_pages--;
1653                         h->free_huge_pages_node[page_to_nid(page)]--;
1654                 }
1655         }
1656 }
1657 #else
1658 static inline void try_to_free_low(struct hstate *h, unsigned long count,
1659                                                 nodemask_t *nodes_allowed)
1660 {
1661 }
1662 #endif
1663
1664 /*
1665  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
1666  * balanced by operating on them in a round-robin fashion.
1667  * Returns 1 if an adjustment was made.
1668  */
1669 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1670                                 int delta)
1671 {
1672         int nr_nodes, node;
1673
1674         VM_BUG_ON(delta != -1 && delta != 1);
1675
1676         if (delta < 0) {
1677                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1678                         if (h->surplus_huge_pages_node[node])
1679                                 goto found;
1680                 }
1681         } else {
1682                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1683                         if (h->surplus_huge_pages_node[node] <
1684                                         h->nr_huge_pages_node[node])
1685                                 goto found;
1686                 }
1687         }
1688         return 0;
1689
1690 found:
1691         h->surplus_huge_pages += delta;
1692         h->surplus_huge_pages_node[node] += delta;
1693         return 1;
1694 }
1695
1696 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1697 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1698                                                 nodemask_t *nodes_allowed)
1699 {
1700         unsigned long min_count, ret;
1701
1702         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1703                 return h->max_huge_pages;
1704
1705         /*
1706          * Increase the pool size
1707          * First take pages out of surplus state.  Then make up the
1708          * remaining difference by allocating fresh huge pages.
1709          *
1710          * We might race with alloc_buddy_huge_page() here and be unable
1711          * to convert a surplus huge page to a normal huge page. That is
1712          * not critical, though, it just means the overall size of the
1713          * pool might be one hugepage larger than it needs to be, but
1714          * within all the constraints specified by the sysctls.
1715          */
1716         spin_lock(&hugetlb_lock);
1717         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1718                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1719                         break;
1720         }
1721
1722         while (count > persistent_huge_pages(h)) {
1723                 /*
1724                  * If this allocation races such that we no longer need the
1725                  * page, free_huge_page will handle it by freeing the page
1726                  * and reducing the surplus.
1727                  */
1728                 spin_unlock(&hugetlb_lock);
1729                 if (hstate_is_gigantic(h))
1730                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
1731                 else
1732                         ret = alloc_fresh_huge_page(h, nodes_allowed);
1733                 spin_lock(&hugetlb_lock);
1734                 if (!ret)
1735                         goto out;
1736
1737                 /* Bail for signals. Probably ctrl-c from user */
1738                 if (signal_pending(current))
1739                         goto out;
1740         }
1741
1742         /*
1743          * Decrease the pool size
1744          * First return free pages to the buddy allocator (being careful
1745          * to keep enough around to satisfy reservations).  Then place
1746          * pages into surplus state as needed so the pool will shrink
1747          * to the desired size as pages become free.
1748          *
1749          * By placing pages into the surplus state independent of the
1750          * overcommit value, we are allowing the surplus pool size to
1751          * exceed overcommit. There are few sane options here. Since
1752          * alloc_buddy_huge_page() is checking the global counter,
1753          * though, we'll note that we're not allowed to exceed surplus
1754          * and won't grow the pool anywhere else. Not until one of the
1755          * sysctls are changed, or the surplus pages go out of use.
1756          */
1757         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1758         min_count = max(count, min_count);
1759         try_to_free_low(h, min_count, nodes_allowed);
1760         while (min_count < persistent_huge_pages(h)) {
1761                 if (!free_pool_huge_page(h, nodes_allowed, 0))
1762                         break;
1763                 cond_resched_lock(&hugetlb_lock);
1764         }
1765         while (count < persistent_huge_pages(h)) {
1766                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1767                         break;
1768         }
1769 out:
1770         ret = persistent_huge_pages(h);
1771         spin_unlock(&hugetlb_lock);
1772         return ret;
1773 }
1774
1775 #define HSTATE_ATTR_RO(_name) \
1776         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1777
1778 #define HSTATE_ATTR(_name) \
1779         static struct kobj_attribute _name##_attr = \
1780                 __ATTR(_name, 0644, _name##_show, _name##_store)
1781
1782 static struct kobject *hugepages_kobj;
1783 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1784
1785 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1786
1787 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1788 {
1789         int i;
1790
1791         for (i = 0; i < HUGE_MAX_HSTATE; i++)
1792                 if (hstate_kobjs[i] == kobj) {
1793                         if (nidp)
1794                                 *nidp = NUMA_NO_NODE;
1795                         return &hstates[i];
1796                 }
1797
1798         return kobj_to_node_hstate(kobj, nidp);
1799 }
1800
1801 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1802                                         struct kobj_attribute *attr, char *buf)
1803 {
1804         struct hstate *h;
1805         unsigned long nr_huge_pages;
1806         int nid;
1807
1808         h = kobj_to_hstate(kobj, &nid);
1809         if (nid == NUMA_NO_NODE)
1810                 nr_huge_pages = h->nr_huge_pages;
1811         else
1812                 nr_huge_pages = h->nr_huge_pages_node[nid];
1813
1814         return sprintf(buf, "%lu\n", nr_huge_pages);
1815 }
1816
1817 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
1818                                            struct hstate *h, int nid,
1819                                            unsigned long count, size_t len)
1820 {
1821         int err;
1822         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1823
1824         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
1825                 err = -EINVAL;
1826                 goto out;
1827         }
1828
1829         if (nid == NUMA_NO_NODE) {
1830                 /*
1831                  * global hstate attribute
1832                  */
1833                 if (!(obey_mempolicy &&
1834                                 init_nodemask_of_mempolicy(nodes_allowed))) {
1835                         NODEMASK_FREE(nodes_allowed);
1836                         nodes_allowed = &node_states[N_MEMORY];
1837                 }
1838         } else if (nodes_allowed) {
1839                 /*
1840                  * per node hstate attribute: adjust count to global,
1841                  * but restrict alloc/free to the specified node.
1842                  */
1843                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1844                 init_nodemask_of_node(nodes_allowed, nid);
1845         } else
1846                 nodes_allowed = &node_states[N_MEMORY];
1847
1848         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1849
1850         if (nodes_allowed != &node_states[N_MEMORY])
1851                 NODEMASK_FREE(nodes_allowed);
1852
1853         return len;
1854 out:
1855         NODEMASK_FREE(nodes_allowed);
1856         return err;
1857 }
1858
1859 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1860                                          struct kobject *kobj, const char *buf,
1861                                          size_t len)
1862 {
1863         struct hstate *h;
1864         unsigned long count;
1865         int nid;
1866         int err;
1867
1868         err = kstrtoul(buf, 10, &count);
1869         if (err)
1870                 return err;
1871
1872         h = kobj_to_hstate(kobj, &nid);
1873         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
1874 }
1875
1876 static ssize_t nr_hugepages_show(struct kobject *kobj,
1877                                        struct kobj_attribute *attr, char *buf)
1878 {
1879         return nr_hugepages_show_common(kobj, attr, buf);
1880 }
1881
1882 static ssize_t nr_hugepages_store(struct kobject *kobj,
1883                struct kobj_attribute *attr, const char *buf, size_t len)
1884 {
1885         return nr_hugepages_store_common(false, kobj, buf, len);
1886 }
1887 HSTATE_ATTR(nr_hugepages);
1888
1889 #ifdef CONFIG_NUMA
1890
1891 /*
1892  * hstate attribute for optionally mempolicy-based constraint on persistent
1893  * huge page alloc/free.
1894  */
1895 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1896                                        struct kobj_attribute *attr, char *buf)
1897 {
1898         return nr_hugepages_show_common(kobj, attr, buf);
1899 }
1900
1901 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1902                struct kobj_attribute *attr, const char *buf, size_t len)
1903 {
1904         return nr_hugepages_store_common(true, kobj, buf, len);
1905 }
1906 HSTATE_ATTR(nr_hugepages_mempolicy);
1907 #endif
1908
1909
1910 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1911                                         struct kobj_attribute *attr, char *buf)
1912 {
1913         struct hstate *h = kobj_to_hstate(kobj, NULL);
1914         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1915 }
1916
1917 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1918                 struct kobj_attribute *attr, const char *buf, size_t count)
1919 {
1920         int err;
1921         unsigned long input;
1922         struct hstate *h = kobj_to_hstate(kobj, NULL);
1923
1924         if (hstate_is_gigantic(h))
1925                 return -EINVAL;
1926
1927         err = kstrtoul(buf, 10, &input);
1928         if (err)
1929                 return err;
1930
1931         spin_lock(&hugetlb_lock);
1932         h->nr_overcommit_huge_pages = input;
1933         spin_unlock(&hugetlb_lock);
1934
1935         return count;
1936 }
1937 HSTATE_ATTR(nr_overcommit_hugepages);
1938
1939 static ssize_t free_hugepages_show(struct kobject *kobj,
1940                                         struct kobj_attribute *attr, char *buf)
1941 {
1942         struct hstate *h;
1943         unsigned long free_huge_pages;
1944         int nid;
1945
1946         h = kobj_to_hstate(kobj, &nid);
1947         if (nid == NUMA_NO_NODE)
1948                 free_huge_pages = h->free_huge_pages;
1949         else
1950                 free_huge_pages = h->free_huge_pages_node[nid];
1951
1952         return sprintf(buf, "%lu\n", free_huge_pages);
1953 }
1954 HSTATE_ATTR_RO(free_hugepages);
1955
1956 static ssize_t resv_hugepages_show(struct kobject *kobj,
1957                                         struct kobj_attribute *attr, char *buf)
1958 {
1959         struct hstate *h = kobj_to_hstate(kobj, NULL);
1960         return sprintf(buf, "%lu\n", h->resv_huge_pages);
1961 }
1962 HSTATE_ATTR_RO(resv_hugepages);
1963
1964 static ssize_t surplus_hugepages_show(struct kobject *kobj,
1965                                         struct kobj_attribute *attr, char *buf)
1966 {
1967         struct hstate *h;
1968         unsigned long surplus_huge_pages;
1969         int nid;
1970
1971         h = kobj_to_hstate(kobj, &nid);
1972         if (nid == NUMA_NO_NODE)
1973                 surplus_huge_pages = h->surplus_huge_pages;
1974         else
1975                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1976
1977         return sprintf(buf, "%lu\n", surplus_huge_pages);
1978 }
1979 HSTATE_ATTR_RO(surplus_hugepages);
1980
1981 static struct attribute *hstate_attrs[] = {
1982         &nr_hugepages_attr.attr,
1983         &nr_overcommit_hugepages_attr.attr,
1984         &free_hugepages_attr.attr,
1985         &resv_hugepages_attr.attr,
1986         &surplus_hugepages_attr.attr,
1987 #ifdef CONFIG_NUMA
1988         &nr_hugepages_mempolicy_attr.attr,
1989 #endif
1990         NULL,
1991 };
1992
1993 static struct attribute_group hstate_attr_group = {
1994         .attrs = hstate_attrs,
1995 };
1996
1997 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1998                                     struct kobject **hstate_kobjs,
1999                                     struct attribute_group *hstate_attr_group)
2000 {
2001         int retval;
2002         int hi = hstate_index(h);
2003
2004         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2005         if (!hstate_kobjs[hi])
2006                 return -ENOMEM;
2007
2008         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2009         if (retval)
2010                 kobject_put(hstate_kobjs[hi]);
2011
2012         return retval;
2013 }
2014
2015 static void __init hugetlb_sysfs_init(void)
2016 {
2017         struct hstate *h;
2018         int err;
2019
2020         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2021         if (!hugepages_kobj)
2022                 return;
2023
2024         for_each_hstate(h) {
2025                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2026                                          hstate_kobjs, &hstate_attr_group);
2027                 if (err)
2028                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2029         }
2030 }
2031
2032 #ifdef CONFIG_NUMA
2033
2034 /*
2035  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2036  * with node devices in node_devices[] using a parallel array.  The array
2037  * index of a node device or _hstate == node id.
2038  * This is here to avoid any static dependency of the node device driver, in
2039  * the base kernel, on the hugetlb module.
2040  */
2041 struct node_hstate {
2042         struct kobject          *hugepages_kobj;
2043         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2044 };
2045 struct node_hstate node_hstates[MAX_NUMNODES];
2046
2047 /*
2048  * A subset of global hstate attributes for node devices
2049  */
2050 static struct attribute *per_node_hstate_attrs[] = {
2051         &nr_hugepages_attr.attr,
2052         &free_hugepages_attr.attr,
2053         &surplus_hugepages_attr.attr,
2054         NULL,
2055 };
2056
2057 static struct attribute_group per_node_hstate_attr_group = {
2058         .attrs = per_node_hstate_attrs,
2059 };
2060
2061 /*
2062  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2063  * Returns node id via non-NULL nidp.
2064  */
2065 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2066 {
2067         int nid;
2068
2069         for (nid = 0; nid < nr_node_ids; nid++) {
2070                 struct node_hstate *nhs = &node_hstates[nid];
2071                 int i;
2072                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2073                         if (nhs->hstate_kobjs[i] == kobj) {
2074                                 if (nidp)
2075                                         *nidp = nid;
2076                                 return &hstates[i];
2077                         }
2078         }
2079
2080         BUG();
2081         return NULL;
2082 }
2083
2084 /*
2085  * Unregister hstate attributes from a single node device.
2086  * No-op if no hstate attributes attached.
2087  */
2088 static void hugetlb_unregister_node(struct node *node)
2089 {
2090         struct hstate *h;
2091         struct node_hstate *nhs = &node_hstates[node->dev.id];
2092
2093         if (!nhs->hugepages_kobj)
2094                 return;         /* no hstate attributes */
2095
2096         for_each_hstate(h) {
2097                 int idx = hstate_index(h);
2098                 if (nhs->hstate_kobjs[idx]) {
2099                         kobject_put(nhs->hstate_kobjs[idx]);
2100                         nhs->hstate_kobjs[idx] = NULL;
2101                 }
2102         }
2103
2104         kobject_put(nhs->hugepages_kobj);
2105         nhs->hugepages_kobj = NULL;
2106 }
2107
2108 /*
2109  * hugetlb module exit:  unregister hstate attributes from node devices
2110  * that have them.
2111  */
2112 static void hugetlb_unregister_all_nodes(void)
2113 {
2114         int nid;
2115
2116         /*
2117          * disable node device registrations.
2118          */
2119         register_hugetlbfs_with_node(NULL, NULL);
2120
2121         /*
2122          * remove hstate attributes from any nodes that have them.
2123          */
2124         for (nid = 0; nid < nr_node_ids; nid++)
2125                 hugetlb_unregister_node(node_devices[nid]);
2126 }
2127
2128 /*
2129  * Register hstate attributes for a single node device.
2130  * No-op if attributes already registered.
2131  */
2132 static void hugetlb_register_node(struct node *node)
2133 {
2134         struct hstate *h;
2135         struct node_hstate *nhs = &node_hstates[node->dev.id];
2136         int err;
2137
2138         if (nhs->hugepages_kobj)
2139                 return;         /* already allocated */
2140
2141         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2142                                                         &node->dev.kobj);
2143         if (!nhs->hugepages_kobj)
2144                 return;
2145
2146         for_each_hstate(h) {
2147                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2148                                                 nhs->hstate_kobjs,
2149                                                 &per_node_hstate_attr_group);
2150                 if (err) {
2151                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2152                                 h->name, node->dev.id);
2153                         hugetlb_unregister_node(node);
2154                         break;
2155                 }
2156         }
2157 }
2158
2159 /*
2160  * hugetlb init time:  register hstate attributes for all registered node
2161  * devices of nodes that have memory.  All on-line nodes should have
2162  * registered their associated device by this time.
2163  */
2164 static void __init hugetlb_register_all_nodes(void)
2165 {
2166         int nid;
2167
2168         for_each_node_state(nid, N_MEMORY) {
2169                 struct node *node = node_devices[nid];
2170                 if (node->dev.id == nid)
2171                         hugetlb_register_node(node);
2172         }
2173
2174         /*
2175          * Let the node device driver know we're here so it can
2176          * [un]register hstate attributes on node hotplug.
2177          */
2178         register_hugetlbfs_with_node(hugetlb_register_node,
2179                                      hugetlb_unregister_node);
2180 }
2181 #else   /* !CONFIG_NUMA */
2182
2183 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2184 {
2185         BUG();
2186         if (nidp)
2187                 *nidp = -1;
2188         return NULL;
2189 }
2190
2191 static void hugetlb_unregister_all_nodes(void) { }
2192
2193 static void hugetlb_register_all_nodes(void) { }
2194
2195 #endif
2196
2197 static void __exit hugetlb_exit(void)
2198 {
2199         struct hstate *h;
2200
2201         hugetlb_unregister_all_nodes();
2202
2203         for_each_hstate(h) {
2204                 kobject_put(hstate_kobjs[hstate_index(h)]);
2205         }
2206
2207         kobject_put(hugepages_kobj);
2208         kfree(htlb_fault_mutex_table);
2209 }
2210 module_exit(hugetlb_exit);
2211
2212 static int __init hugetlb_init(void)
2213 {
2214         int i;
2215
2216         if (!hugepages_supported())
2217                 return 0;
2218
2219         if (!size_to_hstate(default_hstate_size)) {
2220                 default_hstate_size = HPAGE_SIZE;
2221                 if (!size_to_hstate(default_hstate_size))
2222                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2223         }
2224         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2225         if (default_hstate_max_huge_pages)
2226                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2227
2228         hugetlb_init_hstates();
2229         gather_bootmem_prealloc();
2230         report_hugepages();
2231
2232         hugetlb_sysfs_init();
2233         hugetlb_register_all_nodes();
2234         hugetlb_cgroup_file_init();
2235
2236 #ifdef CONFIG_SMP
2237         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2238 #else
2239         num_fault_mutexes = 1;
2240 #endif
2241         htlb_fault_mutex_table =
2242                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2243         BUG_ON(!htlb_fault_mutex_table);
2244
2245         for (i = 0; i < num_fault_mutexes; i++)
2246                 mutex_init(&htlb_fault_mutex_table[i]);
2247         return 0;
2248 }
2249 module_init(hugetlb_init);
2250
2251 /* Should be called on processing a hugepagesz=... option */
2252 void __init hugetlb_add_hstate(unsigned order)
2253 {
2254         struct hstate *h;
2255         unsigned long i;
2256
2257         if (size_to_hstate(PAGE_SIZE << order)) {
2258                 pr_warning("hugepagesz= specified twice, ignoring\n");
2259                 return;
2260         }
2261         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2262         BUG_ON(order == 0);
2263         h = &hstates[hugetlb_max_hstate++];
2264         h->order = order;
2265         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2266         h->nr_huge_pages = 0;
2267         h->free_huge_pages = 0;
2268         for (i = 0; i < MAX_NUMNODES; ++i)
2269                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2270         INIT_LIST_HEAD(&h->hugepage_activelist);
2271         h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2272         h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2273         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2274                                         huge_page_size(h)/1024);
2275
2276         parsed_hstate = h;
2277 }
2278
2279 static int __init hugetlb_nrpages_setup(char *s)
2280 {
2281         unsigned long *mhp;
2282         static unsigned long *last_mhp;
2283
2284         /*
2285          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2286          * so this hugepages= parameter goes to the "default hstate".
2287          */
2288         if (!hugetlb_max_hstate)
2289                 mhp = &default_hstate_max_huge_pages;
2290         else
2291                 mhp = &parsed_hstate->max_huge_pages;
2292
2293         if (mhp == last_mhp) {
2294                 pr_warning("hugepages= specified twice without "
2295                            "interleaving hugepagesz=, ignoring\n");
2296                 return 1;
2297         }
2298
2299         if (sscanf(s, "%lu", mhp) <= 0)
2300                 *mhp = 0;
2301
2302         /*
2303          * Global state is always initialized later in hugetlb_init.
2304          * But we need to allocate >= MAX_ORDER hstates here early to still
2305          * use the bootmem allocator.
2306          */
2307         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2308                 hugetlb_hstate_alloc_pages(parsed_hstate);
2309
2310         last_mhp = mhp;
2311
2312         return 1;
2313 }
2314 __setup("hugepages=", hugetlb_nrpages_setup);
2315
2316 static int __init hugetlb_default_setup(char *s)
2317 {
2318         default_hstate_size = memparse(s, &s);
2319         return 1;
2320 }
2321 __setup("default_hugepagesz=", hugetlb_default_setup);
2322
2323 static unsigned int cpuset_mems_nr(unsigned int *array)
2324 {
2325         int node;
2326         unsigned int nr = 0;
2327
2328         for_each_node_mask(node, cpuset_current_mems_allowed)
2329                 nr += array[node];
2330
2331         return nr;
2332 }
2333
2334 #ifdef CONFIG_SYSCTL
2335 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2336                          struct ctl_table *table, int write,
2337                          void __user *buffer, size_t *length, loff_t *ppos)
2338 {
2339         struct hstate *h = &default_hstate;
2340         unsigned long tmp = h->max_huge_pages;
2341         int ret;
2342
2343         if (!hugepages_supported())
2344                 return -ENOTSUPP;
2345
2346         table->data = &tmp;
2347         table->maxlen = sizeof(unsigned long);
2348         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2349         if (ret)
2350                 goto out;
2351
2352         if (write)
2353                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2354                                                   NUMA_NO_NODE, tmp, *length);
2355 out:
2356         return ret;
2357 }
2358
2359 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2360                           void __user *buffer, size_t *length, loff_t *ppos)
2361 {
2362
2363         return hugetlb_sysctl_handler_common(false, table, write,
2364                                                         buffer, length, ppos);
2365 }
2366
2367 #ifdef CONFIG_NUMA
2368 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2369                           void __user *buffer, size_t *length, loff_t *ppos)
2370 {
2371         return hugetlb_sysctl_handler_common(true, table, write,
2372                                                         buffer, length, ppos);
2373 }
2374 #endif /* CONFIG_NUMA */
2375
2376 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2377                         void __user *buffer,
2378                         size_t *length, loff_t *ppos)
2379 {
2380         struct hstate *h = &default_hstate;
2381         unsigned long tmp;
2382         int ret;
2383
2384         if (!hugepages_supported())
2385                 return -ENOTSUPP;
2386
2387         tmp = h->nr_overcommit_huge_pages;
2388
2389         if (write && hstate_is_gigantic(h))
2390                 return -EINVAL;
2391
2392         table->data = &tmp;
2393         table->maxlen = sizeof(unsigned long);
2394         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2395         if (ret)
2396                 goto out;
2397
2398         if (write) {
2399                 spin_lock(&hugetlb_lock);
2400                 h->nr_overcommit_huge_pages = tmp;
2401                 spin_unlock(&hugetlb_lock);
2402         }
2403 out:
2404         return ret;
2405 }
2406
2407 #endif /* CONFIG_SYSCTL */
2408
2409 void hugetlb_report_meminfo(struct seq_file *m)
2410 {
2411         struct hstate *h = &default_hstate;
2412         if (!hugepages_supported())
2413                 return;
2414         seq_printf(m,
2415                         "HugePages_Total:   %5lu\n"
2416                         "HugePages_Free:    %5lu\n"
2417                         "HugePages_Rsvd:    %5lu\n"
2418                         "HugePages_Surp:    %5lu\n"
2419                         "Hugepagesize:   %8lu kB\n",
2420                         h->nr_huge_pages,
2421                         h->free_huge_pages,
2422                         h->resv_huge_pages,
2423                         h->surplus_huge_pages,
2424                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2425 }
2426
2427 int hugetlb_report_node_meminfo(int nid, char *buf)
2428 {
2429         struct hstate *h = &default_hstate;
2430         if (!hugepages_supported())
2431                 return 0;
2432         return sprintf(buf,
2433                 "Node %d HugePages_Total: %5u\n"
2434                 "Node %d HugePages_Free:  %5u\n"
2435                 "Node %d HugePages_Surp:  %5u\n",
2436                 nid, h->nr_huge_pages_node[nid],
2437                 nid, h->free_huge_pages_node[nid],
2438                 nid, h->surplus_huge_pages_node[nid]);
2439 }
2440
2441 void hugetlb_show_meminfo(void)
2442 {
2443         struct hstate *h;
2444         int nid;
2445
2446         if (!hugepages_supported())
2447                 return;
2448
2449         for_each_node_state(nid, N_MEMORY)
2450                 for_each_hstate(h)
2451                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2452                                 nid,
2453                                 h->nr_huge_pages_node[nid],
2454                                 h->free_huge_pages_node[nid],
2455                                 h->surplus_huge_pages_node[nid],
2456                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2457 }
2458
2459 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
2460 unsigned long hugetlb_total_pages(void)
2461 {
2462         struct hstate *h;
2463         unsigned long nr_total_pages = 0;
2464
2465         for_each_hstate(h)
2466                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2467         return nr_total_pages;
2468 }
2469
2470 static int hugetlb_acct_memory(struct hstate *h, long delta)
2471 {
2472         int ret = -ENOMEM;
2473
2474         spin_lock(&hugetlb_lock);
2475         /*
2476          * When cpuset is configured, it breaks the strict hugetlb page
2477          * reservation as the accounting is done on a global variable. Such
2478          * reservation is completely rubbish in the presence of cpuset because
2479          * the reservation is not checked against page availability for the
2480          * current cpuset. Application can still potentially OOM'ed by kernel
2481          * with lack of free htlb page in cpuset that the task is in.
2482          * Attempt to enforce strict accounting with cpuset is almost
2483          * impossible (or too ugly) because cpuset is too fluid that
2484          * task or memory node can be dynamically moved between cpusets.
2485          *
2486          * The change of semantics for shared hugetlb mapping with cpuset is
2487          * undesirable. However, in order to preserve some of the semantics,
2488          * we fall back to check against current free page availability as
2489          * a best attempt and hopefully to minimize the impact of changing
2490          * semantics that cpuset has.
2491          */
2492         if (delta > 0) {
2493                 if (gather_surplus_pages(h, delta) < 0)
2494                         goto out;
2495
2496                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2497                         return_unused_surplus_pages(h, delta);
2498                         goto out;
2499                 }
2500         }
2501
2502         ret = 0;
2503         if (delta < 0)
2504                 return_unused_surplus_pages(h, (unsigned long) -delta);
2505
2506 out:
2507         spin_unlock(&hugetlb_lock);
2508         return ret;
2509 }
2510
2511 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2512 {
2513         struct resv_map *resv = vma_resv_map(vma);
2514
2515         /*
2516          * This new VMA should share its siblings reservation map if present.
2517          * The VMA will only ever have a valid reservation map pointer where
2518          * it is being copied for another still existing VMA.  As that VMA
2519          * has a reference to the reservation map it cannot disappear until
2520          * after this open call completes.  It is therefore safe to take a
2521          * new reference here without additional locking.
2522          */
2523         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2524                 kref_get(&resv->refs);
2525 }
2526
2527 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2528 {
2529         struct hstate *h = hstate_vma(vma);
2530         struct resv_map *resv = vma_resv_map(vma);
2531         struct hugepage_subpool *spool = subpool_vma(vma);
2532         unsigned long reserve, start, end;
2533         long gbl_reserve;
2534
2535         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2536                 return;
2537
2538         start = vma_hugecache_offset(h, vma, vma->vm_start);
2539         end = vma_hugecache_offset(h, vma, vma->vm_end);
2540
2541         reserve = (end - start) - region_count(resv, start, end);
2542
2543         kref_put(&resv->refs, resv_map_release);
2544
2545         if (reserve) {
2546                 /*
2547                  * Decrement reserve counts.  The global reserve count may be
2548                  * adjusted if the subpool has a minimum size.
2549                  */
2550                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2551                 hugetlb_acct_memory(h, -gbl_reserve);
2552         }
2553 }
2554
2555 /*
2556  * We cannot handle pagefaults against hugetlb pages at all.  They cause
2557  * handle_mm_fault() to try to instantiate regular-sized pages in the
2558  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
2559  * this far.
2560  */
2561 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2562 {
2563         BUG();
2564         return 0;
2565 }
2566
2567 const struct vm_operations_struct hugetlb_vm_ops = {
2568         .fault = hugetlb_vm_op_fault,
2569         .open = hugetlb_vm_op_open,
2570         .close = hugetlb_vm_op_close,
2571 };
2572
2573 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2574                                 int writable)
2575 {
2576         pte_t entry;
2577
2578         if (writable) {
2579                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2580                                          vma->vm_page_prot)));
2581         } else {
2582                 entry = huge_pte_wrprotect(mk_huge_pte(page,
2583                                            vma->vm_page_prot));
2584         }
2585         entry = pte_mkyoung(entry);
2586         entry = pte_mkhuge(entry);
2587         entry = arch_make_huge_pte(entry, vma, page, writable);
2588
2589         return entry;
2590 }
2591
2592 static void set_huge_ptep_writable(struct vm_area_struct *vma,
2593                                    unsigned long address, pte_t *ptep)
2594 {
2595         pte_t entry;
2596
2597         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
2598         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2599                 update_mmu_cache(vma, address, ptep);
2600 }
2601
2602 static int is_hugetlb_entry_migration(pte_t pte)
2603 {
2604         swp_entry_t swp;
2605
2606         if (huge_pte_none(pte) || pte_present(pte))
2607                 return 0;
2608         swp = pte_to_swp_entry(pte);
2609         if (non_swap_entry(swp) && is_migration_entry(swp))
2610                 return 1;
2611         else
2612                 return 0;
2613 }
2614
2615 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2616 {
2617         swp_entry_t swp;
2618
2619         if (huge_pte_none(pte) || pte_present(pte))
2620                 return 0;
2621         swp = pte_to_swp_entry(pte);
2622         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2623                 return 1;
2624         else
2625                 return 0;
2626 }
2627
2628 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2629                             struct vm_area_struct *vma)
2630 {
2631         pte_t *src_pte, *dst_pte, entry;
2632         struct page *ptepage;
2633         unsigned long addr;
2634         int cow;
2635         struct hstate *h = hstate_vma(vma);
2636         unsigned long sz = huge_page_size(h);
2637         unsigned long mmun_start;       /* For mmu_notifiers */
2638         unsigned long mmun_end;         /* For mmu_notifiers */
2639         int ret = 0;
2640
2641         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2642
2643         mmun_start = vma->vm_start;
2644         mmun_end = vma->vm_end;
2645         if (cow)
2646                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
2647
2648         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2649                 spinlock_t *src_ptl, *dst_ptl;
2650                 src_pte = huge_pte_offset(src, addr);
2651                 if (!src_pte)
2652                         continue;
2653                 dst_pte = huge_pte_alloc(dst, addr, sz);
2654                 if (!dst_pte) {
2655                         ret = -ENOMEM;
2656                         break;
2657                 }
2658
2659                 /* If the pagetables are shared don't copy or take references */
2660                 if (dst_pte == src_pte)
2661                         continue;
2662
2663                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2664                 src_ptl = huge_pte_lockptr(h, src, src_pte);
2665                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2666                 entry = huge_ptep_get(src_pte);
2667                 if (huge_pte_none(entry)) { /* skip none entry */
2668                         ;
2669                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2670                                     is_hugetlb_entry_hwpoisoned(entry))) {
2671                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
2672
2673                         if (is_write_migration_entry(swp_entry) && cow) {
2674                                 /*
2675                                  * COW mappings require pages in both
2676                                  * parent and child to be set to read.
2677                                  */
2678                                 make_migration_entry_read(&swp_entry);
2679                                 entry = swp_entry_to_pte(swp_entry);
2680                                 set_huge_pte_at(src, addr, src_pte, entry);
2681                         }
2682                         set_huge_pte_at(dst, addr, dst_pte, entry);
2683                 } else {
2684                         if (cow) {
2685                                 huge_ptep_set_wrprotect(src, addr, src_pte);
2686                                 mmu_notifier_invalidate_range(src, mmun_start,
2687                                                                    mmun_end);
2688                         }
2689                         entry = huge_ptep_get(src_pte);
2690                         ptepage = pte_page(entry);
2691                         get_page(ptepage);
2692                         page_dup_rmap(ptepage);
2693                         set_huge_pte_at(dst, addr, dst_pte, entry);
2694                 }
2695                 spin_unlock(src_ptl);
2696                 spin_unlock(dst_ptl);
2697         }
2698
2699         if (cow)
2700                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
2701
2702         return ret;
2703 }
2704
2705 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2706                             unsigned long start, unsigned long end,
2707                             struct page *ref_page)
2708 {
2709         int force_flush = 0;
2710         struct mm_struct *mm = vma->vm_mm;
2711         unsigned long address;
2712         pte_t *ptep;
2713         pte_t pte;
2714         spinlock_t *ptl;
2715         struct page *page;
2716         struct hstate *h = hstate_vma(vma);
2717         unsigned long sz = huge_page_size(h);
2718         const unsigned long mmun_start = start; /* For mmu_notifiers */
2719         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
2720
2721         WARN_ON(!is_vm_hugetlb_page(vma));
2722         BUG_ON(start & ~huge_page_mask(h));
2723         BUG_ON(end & ~huge_page_mask(h));
2724
2725         tlb_start_vma(tlb, vma);
2726         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2727         address = start;
2728 again:
2729         for (; address < end; address += sz) {
2730                 ptep = huge_pte_offset(mm, address);
2731                 if (!ptep)
2732                         continue;
2733
2734                 ptl = huge_pte_lock(h, mm, ptep);
2735                 if (huge_pmd_unshare(mm, &address, ptep))
2736                         goto unlock;
2737
2738                 pte = huge_ptep_get(ptep);
2739                 if (huge_pte_none(pte))
2740                         goto unlock;
2741
2742                 /*
2743                  * Migrating hugepage or HWPoisoned hugepage is already
2744                  * unmapped and its refcount is dropped, so just clear pte here.
2745                  */
2746                 if (unlikely(!pte_present(pte))) {
2747                         huge_pte_clear(mm, address, ptep);
2748                         goto unlock;
2749                 }
2750
2751                 page = pte_page(pte);
2752                 /*
2753                  * If a reference page is supplied, it is because a specific
2754                  * page is being unmapped, not a range. Ensure the page we
2755                  * are about to unmap is the actual page of interest.
2756                  */
2757                 if (ref_page) {
2758                         if (page != ref_page)
2759                                 goto unlock;
2760
2761                         /*
2762                          * Mark the VMA as having unmapped its page so that
2763                          * future faults in this VMA will fail rather than
2764                          * looking like data was lost
2765                          */
2766                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2767                 }
2768
2769                 pte = huge_ptep_get_and_clear(mm, address, ptep);
2770                 tlb_remove_tlb_entry(tlb, ptep, address);
2771                 if (huge_pte_dirty(pte))
2772                         set_page_dirty(page);
2773
2774                 page_remove_rmap(page);
2775                 force_flush = !__tlb_remove_page(tlb, page);
2776                 if (force_flush) {
2777                         address += sz;
2778                         spin_unlock(ptl);
2779                         break;
2780                 }
2781                 /* Bail out after unmapping reference page if supplied */
2782                 if (ref_page) {
2783                         spin_unlock(ptl);
2784                         break;
2785                 }
2786 unlock:
2787                 spin_unlock(ptl);
2788         }
2789         /*
2790          * mmu_gather ran out of room to batch pages, we break out of
2791          * the PTE lock to avoid doing the potential expensive TLB invalidate
2792          * and page-free while holding it.
2793          */
2794         if (force_flush) {
2795                 force_flush = 0;
2796                 tlb_flush_mmu(tlb);
2797                 if (address < end && !ref_page)
2798                         goto again;
2799         }
2800         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2801         tlb_end_vma(tlb, vma);
2802 }
2803
2804 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2805                           struct vm_area_struct *vma, unsigned long start,
2806                           unsigned long end, struct page *ref_page)
2807 {
2808         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2809
2810         /*
2811          * Clear this flag so that x86's huge_pmd_share page_table_shareable
2812          * test will fail on a vma being torn down, and not grab a page table
2813          * on its way out.  We're lucky that the flag has such an appropriate
2814          * name, and can in fact be safely cleared here. We could clear it
2815          * before the __unmap_hugepage_range above, but all that's necessary
2816          * is to clear it before releasing the i_mmap_rwsem. This works
2817          * because in the context this is called, the VMA is about to be
2818          * destroyed and the i_mmap_rwsem is held.
2819          */
2820         vma->vm_flags &= ~VM_MAYSHARE;
2821 }
2822
2823 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2824                           unsigned long end, struct page *ref_page)
2825 {
2826         struct mm_struct *mm;
2827         struct mmu_gather tlb;
2828
2829         mm = vma->vm_mm;
2830
2831         tlb_gather_mmu(&tlb, mm, start, end);
2832         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2833         tlb_finish_mmu(&tlb, start, end);
2834 }
2835
2836 /*
2837  * This is called when the original mapper is failing to COW a MAP_PRIVATE
2838  * mappping it owns the reserve page for. The intention is to unmap the page
2839  * from other VMAs and let the children be SIGKILLed if they are faulting the
2840  * same region.
2841  */
2842 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2843                               struct page *page, unsigned long address)
2844 {
2845         struct hstate *h = hstate_vma(vma);
2846         struct vm_area_struct *iter_vma;
2847         struct address_space *mapping;
2848         pgoff_t pgoff;
2849
2850         /*
2851          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
2852          * from page cache lookup which is in HPAGE_SIZE units.
2853          */
2854         address = address & huge_page_mask(h);
2855         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2856                         vma->vm_pgoff;
2857         mapping = file_inode(vma->vm_file)->i_mapping;
2858
2859         /*
2860          * Take the mapping lock for the duration of the table walk. As
2861          * this mapping should be shared between all the VMAs,
2862          * __unmap_hugepage_range() is called as the lock is already held
2863          */
2864         i_mmap_lock_write(mapping);
2865         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2866                 /* Do not unmap the current VMA */
2867                 if (iter_vma == vma)
2868                         continue;
2869
2870                 /*
2871                  * Unmap the page from other VMAs without their own reserves.
2872                  * They get marked to be SIGKILLed if they fault in these
2873                  * areas. This is because a future no-page fault on this VMA
2874                  * could insert a zeroed page instead of the data existing
2875                  * from the time of fork. This would look like data corruption
2876                  */
2877                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2878                         unmap_hugepage_range(iter_vma, address,
2879                                              address + huge_page_size(h), page);
2880         }
2881         i_mmap_unlock_write(mapping);
2882 }
2883
2884 /*
2885  * Hugetlb_cow() should be called with page lock of the original hugepage held.
2886  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
2887  * cannot race with other handlers or page migration.
2888  * Keep the pte_same checks anyway to make transition from the mutex easier.
2889  */
2890 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2891                         unsigned long address, pte_t *ptep, pte_t pte,
2892                         struct page *pagecache_page, spinlock_t *ptl)
2893 {
2894         struct hstate *h = hstate_vma(vma);
2895         struct page *old_page, *new_page;
2896         int ret = 0, outside_reserve = 0;
2897         unsigned long mmun_start;       /* For mmu_notifiers */
2898         unsigned long mmun_end;         /* For mmu_notifiers */
2899
2900         old_page = pte_page(pte);
2901
2902 retry_avoidcopy:
2903         /* If no-one else is actually using this page, avoid the copy
2904          * and just make the page writable */
2905         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
2906                 page_move_anon_rmap(old_page, vma, address);
2907                 set_huge_ptep_writable(vma, address, ptep);
2908                 return 0;
2909         }
2910
2911         /*
2912          * If the process that created a MAP_PRIVATE mapping is about to
2913          * perform a COW due to a shared page count, attempt to satisfy
2914          * the allocation without using the existing reserves. The pagecache
2915          * page is used to determine if the reserve at this address was
2916          * consumed or not. If reserves were used, a partial faulted mapping
2917          * at the time of fork() could consume its reserves on COW instead
2918          * of the full address range.
2919          */
2920         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2921                         old_page != pagecache_page)
2922                 outside_reserve = 1;
2923
2924         page_cache_get(old_page);
2925
2926         /*
2927          * Drop page table lock as buddy allocator may be called. It will
2928          * be acquired again before returning to the caller, as expected.
2929          */
2930         spin_unlock(ptl);
2931         new_page = alloc_huge_page(vma, address, outside_reserve);
2932
2933         if (IS_ERR(new_page)) {
2934                 /*
2935                  * If a process owning a MAP_PRIVATE mapping fails to COW,
2936                  * it is due to references held by a child and an insufficient
2937                  * huge page pool. To guarantee the original mappers
2938                  * reliability, unmap the page from child processes. The child
2939                  * may get SIGKILLed if it later faults.
2940                  */
2941                 if (outside_reserve) {
2942                         page_cache_release(old_page);
2943                         BUG_ON(huge_pte_none(pte));
2944                         unmap_ref_private(mm, vma, old_page, address);
2945                         BUG_ON(huge_pte_none(pte));
2946                         spin_lock(ptl);
2947                         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2948                         if (likely(ptep &&
2949                                    pte_same(huge_ptep_get(ptep), pte)))
2950                                 goto retry_avoidcopy;
2951                         /*
2952                          * race occurs while re-acquiring page table
2953                          * lock, and our job is done.
2954                          */
2955                         return 0;
2956                 }
2957
2958                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
2959                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
2960                 goto out_release_old;
2961         }
2962
2963         /*
2964          * When the original hugepage is shared one, it does not have
2965          * anon_vma prepared.
2966          */
2967         if (unlikely(anon_vma_prepare(vma))) {
2968                 ret = VM_FAULT_OOM;
2969                 goto out_release_all;
2970         }
2971
2972         copy_user_huge_page(new_page, old_page, address, vma,
2973                             pages_per_huge_page(h));
2974         __SetPageUptodate(new_page);
2975
2976         mmun_start = address & huge_page_mask(h);
2977         mmun_end = mmun_start + huge_page_size(h);
2978         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2979
2980         /*
2981          * Retake the page table lock to check for racing updates
2982          * before the page tables are altered
2983          */
2984         spin_lock(ptl);
2985         ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2986         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
2987                 ClearPagePrivate(new_page);
2988
2989                 /* Break COW */
2990                 huge_ptep_clear_flush(vma, address, ptep);
2991                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2992                 set_huge_pte_at(mm, address, ptep,
2993                                 make_huge_pte(vma, new_page, 1));
2994                 page_remove_rmap(old_page);
2995                 hugepage_add_new_anon_rmap(new_page, vma, address);
2996                 /* Make the old page be freed below */
2997                 new_page = old_page;
2998         }
2999         spin_unlock(ptl);
3000         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3001 out_release_all:
3002         page_cache_release(new_page);
3003 out_release_old:
3004         page_cache_release(old_page);
3005
3006         spin_lock(ptl); /* Caller expects lock to be held */
3007         return ret;
3008 }
3009
3010 /* Return the pagecache page at a given address within a VMA */
3011 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3012                         struct vm_area_struct *vma, unsigned long address)
3013 {
3014         struct address_space *mapping;
3015         pgoff_t idx;
3016
3017         mapping = vma->vm_file->f_mapping;
3018         idx = vma_hugecache_offset(h, vma, address);
3019
3020         return find_lock_page(mapping, idx);
3021 }
3022
3023 /*
3024  * Return whether there is a pagecache page to back given address within VMA.
3025  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3026  */
3027 static bool hugetlbfs_pagecache_present(struct hstate *h,
3028                         struct vm_area_struct *vma, unsigned long address)
3029 {
3030         struct address_space *mapping;
3031         pgoff_t idx;
3032         struct page *page;
3033
3034         mapping = vma->vm_file->f_mapping;
3035         idx = vma_hugecache_offset(h, vma, address);
3036
3037         page = find_get_page(mapping, idx);
3038         if (page)
3039                 put_page(page);
3040         return page != NULL;
3041 }
3042
3043 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3044                            struct address_space *mapping, pgoff_t idx,
3045                            unsigned long address, pte_t *ptep, unsigned int flags)
3046 {
3047         struct hstate *h = hstate_vma(vma);
3048         int ret = VM_FAULT_SIGBUS;
3049         int anon_rmap = 0;
3050         unsigned long size;
3051         struct page *page;
3052         pte_t new_pte;
3053         spinlock_t *ptl;
3054
3055         /*
3056          * Currently, we are forced to kill the process in the event the
3057          * original mapper has unmapped pages from the child due to a failed
3058          * COW. Warn that such a situation has occurred as it may not be obvious
3059          */
3060         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3061                 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3062                            current->pid);
3063                 return ret;
3064         }
3065
3066         /*
3067          * Use page lock to guard against racing truncation
3068          * before we get page_table_lock.
3069          */
3070 retry:
3071         page = find_lock_page(mapping, idx);
3072         if (!page) {
3073                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3074                 if (idx >= size)
3075                         goto out;
3076                 page = alloc_huge_page(vma, address, 0);
3077                 if (IS_ERR(page)) {
3078                         ret = PTR_ERR(page);
3079                         if (ret == -ENOMEM)
3080                                 ret = VM_FAULT_OOM;
3081                         else
3082                                 ret = VM_FAULT_SIGBUS;
3083                         goto out;
3084                 }
3085                 clear_huge_page(page, address, pages_per_huge_page(h));
3086                 __SetPageUptodate(page);
3087
3088                 if (vma->vm_flags & VM_MAYSHARE) {
3089                         int err;
3090                         struct inode *inode = mapping->host;
3091
3092                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3093                         if (err) {
3094                                 put_page(page);
3095                                 if (err == -EEXIST)
3096                                         goto retry;
3097                                 goto out;
3098                         }
3099                         ClearPagePrivate(page);
3100
3101                         spin_lock(&inode->i_lock);
3102                         inode->i_blocks += blocks_per_huge_page(h);
3103                         spin_unlock(&inode->i_lock);
3104                 } else {
3105                         lock_page(page);
3106                         if (unlikely(anon_vma_prepare(vma))) {
3107                                 ret = VM_FAULT_OOM;
3108                                 goto backout_unlocked;
3109                         }
3110                         anon_rmap = 1;
3111                 }
3112         } else {
3113                 /*
3114                  * If memory error occurs between mmap() and fault, some process
3115                  * don't have hwpoisoned swap entry for errored virtual address.
3116                  * So we need to block hugepage fault by PG_hwpoison bit check.
3117                  */
3118                 if (unlikely(PageHWPoison(page))) {
3119                         ret = VM_FAULT_HWPOISON |
3120                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3121                         goto backout_unlocked;
3122                 }
3123         }
3124
3125         /*
3126          * If we are going to COW a private mapping later, we examine the
3127          * pending reservations for this page now. This will ensure that
3128          * any allocations necessary to record that reservation occur outside
3129          * the spinlock.
3130          */
3131         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
3132                 if (vma_needs_reservation(h, vma, address) < 0) {
3133                         ret = VM_FAULT_OOM;
3134                         goto backout_unlocked;
3135                 }
3136
3137         ptl = huge_pte_lockptr(h, mm, ptep);
3138         spin_lock(ptl);
3139         size = i_size_read(mapping->host) >> huge_page_shift(h);
3140         if (idx >= size)
3141                 goto backout;
3142
3143         ret = 0;
3144         if (!huge_pte_none(huge_ptep_get(ptep)))
3145                 goto backout;
3146
3147         if (anon_rmap) {
3148                 ClearPagePrivate(page);
3149                 hugepage_add_new_anon_rmap(page, vma, address);
3150         } else
3151                 page_dup_rmap(page);
3152         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3153                                 && (vma->vm_flags & VM_SHARED)));
3154         set_huge_pte_at(mm, address, ptep, new_pte);
3155
3156         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3157                 /* Optimization, do the COW without a second fault */
3158                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3159         }
3160
3161         spin_unlock(ptl);
3162         unlock_page(page);
3163 out:
3164         return ret;
3165
3166 backout:
3167         spin_unlock(ptl);
3168 backout_unlocked:
3169         unlock_page(page);
3170         put_page(page);
3171         goto out;
3172 }
3173
3174 #ifdef CONFIG_SMP
3175 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3176                             struct vm_area_struct *vma,
3177                             struct address_space *mapping,
3178                             pgoff_t idx, unsigned long address)
3179 {
3180         unsigned long key[2];
3181         u32 hash;
3182
3183         if (vma->vm_flags & VM_SHARED) {
3184                 key[0] = (unsigned long) mapping;
3185                 key[1] = idx;
3186         } else {
3187                 key[0] = (unsigned long) mm;
3188                 key[1] = address >> huge_page_shift(h);
3189         }
3190
3191         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3192
3193         return hash & (num_fault_mutexes - 1);
3194 }
3195 #else
3196 /*
3197  * For uniprocesor systems we always use a single mutex, so just
3198  * return 0 and avoid the hashing overhead.
3199  */
3200 static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3201                             struct vm_area_struct *vma,
3202                             struct address_space *mapping,
3203                             pgoff_t idx, unsigned long address)
3204 {
3205         return 0;
3206 }
3207 #endif
3208
3209 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3210                         unsigned long address, unsigned int flags)
3211 {
3212         pte_t *ptep, entry;
3213         spinlock_t *ptl;
3214         int ret;
3215         u32 hash;
3216         pgoff_t idx;
3217         struct page *page = NULL;
3218         struct page *pagecache_page = NULL;
3219         struct hstate *h = hstate_vma(vma);
3220         struct address_space *mapping;
3221         int need_wait_lock = 0;
3222
3223         address &= huge_page_mask(h);
3224
3225         ptep = huge_pte_offset(mm, address);
3226         if (ptep) {
3227                 entry = huge_ptep_get(ptep);
3228                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3229                         migration_entry_wait_huge(vma, mm, ptep);
3230                         return 0;
3231                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3232                         return VM_FAULT_HWPOISON_LARGE |
3233                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3234         }
3235
3236         ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3237         if (!ptep)
3238                 return VM_FAULT_OOM;
3239
3240         mapping = vma->vm_file->f_mapping;
3241         idx = vma_hugecache_offset(h, vma, address);
3242
3243         /*
3244          * Serialize hugepage allocation and instantiation, so that we don't
3245          * get spurious allocation failures if two CPUs race to instantiate
3246          * the same page in the page cache.
3247          */
3248         hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
3249         mutex_lock(&htlb_fault_mutex_table[hash]);
3250
3251         entry = huge_ptep_get(ptep);
3252         if (huge_pte_none(entry)) {
3253                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3254                 goto out_mutex;
3255         }
3256
3257         ret = 0;
3258
3259         /*
3260          * entry could be a migration/hwpoison entry at this point, so this
3261          * check prevents the kernel from going below assuming that we have
3262          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3263          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3264          * handle it.
3265          */
3266         if (!pte_present(entry))
3267                 goto out_mutex;
3268
3269         /*
3270          * If we are going to COW the mapping later, we examine the pending
3271          * reservations for this page now. This will ensure that any
3272          * allocations necessary to record that reservation occur outside the
3273          * spinlock. For private mappings, we also lookup the pagecache
3274          * page now as it is used to determine if a reservation has been
3275          * consumed.
3276          */
3277         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3278                 if (vma_needs_reservation(h, vma, address) < 0) {
3279                         ret = VM_FAULT_OOM;
3280                         goto out_mutex;
3281                 }
3282
3283                 if (!(vma->vm_flags & VM_MAYSHARE))
3284                         pagecache_page = hugetlbfs_pagecache_page(h,
3285                                                                 vma, address);
3286         }
3287
3288         ptl = huge_pte_lock(h, mm, ptep);
3289
3290         /* Check for a racing update before calling hugetlb_cow */
3291         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3292                 goto out_ptl;
3293
3294         /*
3295          * hugetlb_cow() requires page locks of pte_page(entry) and
3296          * pagecache_page, so here we need take the former one
3297          * when page != pagecache_page or !pagecache_page.
3298          */
3299         page = pte_page(entry);
3300         if (page != pagecache_page)
3301                 if (!trylock_page(page)) {
3302                         need_wait_lock = 1;
3303                         goto out_ptl;
3304                 }
3305
3306         get_page(page);
3307
3308         if (flags & FAULT_FLAG_WRITE) {
3309                 if (!huge_pte_write(entry)) {
3310                         ret = hugetlb_cow(mm, vma, address, ptep, entry,
3311                                         pagecache_page, ptl);
3312                         goto out_put_page;
3313                 }
3314                 entry = huge_pte_mkdirty(entry);
3315         }
3316         entry = pte_mkyoung(entry);
3317         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3318                                                 flags & FAULT_FLAG_WRITE))
3319                 update_mmu_cache(vma, address, ptep);
3320 out_put_page:
3321         if (page != pagecache_page)
3322                 unlock_page(page);
3323         put_page(page);
3324 out_ptl:
3325         spin_unlock(ptl);
3326
3327         if (pagecache_page) {
3328                 unlock_page(pagecache_page);
3329                 put_page(pagecache_page);
3330         }
3331 out_mutex:
3332         mutex_unlock(&htlb_fault_mutex_table[hash]);
3333         /*
3334          * Generally it's safe to hold refcount during waiting page lock. But
3335          * here we just wait to defer the next page fault to avoid busy loop and
3336          * the page is not used after unlocked before returning from the current
3337          * page fault. So we are safe from accessing freed page, even if we wait
3338          * here without taking refcount.
3339          */
3340         if (need_wait_lock)
3341                 wait_on_page_locked(page);
3342         return ret;
3343 }
3344
3345 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3346                          struct page **pages, struct vm_area_struct **vmas,
3347                          unsigned long *position, unsigned long *nr_pages,
3348                          long i, unsigned int flags)
3349 {
3350         unsigned long pfn_offset;
3351         unsigned long vaddr = *position;
3352         unsigned long remainder = *nr_pages;
3353         struct hstate *h = hstate_vma(vma);
3354
3355         while (vaddr < vma->vm_end && remainder) {
3356                 pte_t *pte;
3357                 spinlock_t *ptl = NULL;
3358                 int absent;
3359                 struct page *page;
3360
3361                 /*
3362                  * If we have a pending SIGKILL, don't keep faulting pages and
3363                  * potentially allocating memory.
3364                  */
3365                 if (unlikely(fatal_signal_pending(current))) {
3366                         remainder = 0;
3367                         break;
3368                 }
3369
3370                 /*
3371                  * Some archs (sparc64, sh*) have multiple pte_ts to
3372                  * each hugepage.  We have to make sure we get the
3373                  * first, for the page indexing below to work.
3374                  *
3375                  * Note that page table lock is not held when pte is null.
3376                  */
3377                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3378                 if (pte)
3379                         ptl = huge_pte_lock(h, mm, pte);
3380                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3381
3382                 /*
3383                  * When coredumping, it suits get_dump_page if we just return
3384                  * an error where there's an empty slot with no huge pagecache
3385                  * to back it.  This way, we avoid allocating a hugepage, and
3386                  * the sparse dumpfile avoids allocating disk blocks, but its
3387                  * huge holes still show up with zeroes where they need to be.
3388                  */
3389                 if (absent && (flags & FOLL_DUMP) &&
3390                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3391                         if (pte)
3392                                 spin_unlock(ptl);
3393                         remainder = 0;
3394                         break;
3395                 }
3396
3397                 /*
3398                  * We need call hugetlb_fault for both hugepages under migration
3399                  * (in which case hugetlb_fault waits for the migration,) and
3400                  * hwpoisoned hugepages (in which case we need to prevent the
3401                  * caller from accessing to them.) In order to do this, we use
3402                  * here is_swap_pte instead of is_hugetlb_entry_migration and
3403                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3404                  * both cases, and because we can't follow correct pages
3405                  * directly from any kind of swap entries.
3406                  */
3407                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3408                     ((flags & FOLL_WRITE) &&
3409                       !huge_pte_write(huge_ptep_get(pte)))) {
3410                         int ret;
3411
3412                         if (pte)
3413                                 spin_unlock(ptl);
3414                         ret = hugetlb_fault(mm, vma, vaddr,
3415                                 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3416                         if (!(ret & VM_FAULT_ERROR))
3417                                 continue;
3418
3419                         remainder = 0;
3420                         break;
3421                 }
3422
3423                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3424                 page = pte_page(huge_ptep_get(pte));
3425 same_page:
3426                 if (pages) {
3427                         pages[i] = mem_map_offset(page, pfn_offset);
3428                         get_page_foll(pages[i]);
3429                 }
3430
3431                 if (vmas)
3432                         vmas[i] = vma;
3433
3434                 vaddr += PAGE_SIZE;
3435                 ++pfn_offset;
3436                 --remainder;
3437                 ++i;
3438                 if (vaddr < vma->vm_end && remainder &&
3439                                 pfn_offset < pages_per_huge_page(h)) {
3440                         /*
3441                          * We use pfn_offset to avoid touching the pageframes
3442                          * of this compound page.
3443                          */
3444                         goto same_page;
3445                 }
3446                 spin_unlock(ptl);
3447         }
3448         *nr_pages = remainder;
3449         *position = vaddr;
3450
3451         return i ? i : -EFAULT;
3452 }
3453
3454 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3455                 unsigned long address, unsigned long end, pgprot_t newprot)
3456 {
3457         struct mm_struct *mm = vma->vm_mm;
3458         unsigned long start = address;
3459         pte_t *ptep;
3460         pte_t pte;
3461         struct hstate *h = hstate_vma(vma);
3462         unsigned long pages = 0;
3463
3464         BUG_ON(address >= end);
3465         flush_cache_range(vma, address, end);
3466
3467         mmu_notifier_invalidate_range_start(mm, start, end);
3468         i_mmap_lock_write(vma->vm_file->f_mapping);
3469         for (; address < end; address += huge_page_size(h)) {
3470                 spinlock_t *ptl;
3471                 ptep = huge_pte_offset(mm, address);
3472                 if (!ptep)
3473                         continue;
3474                 ptl = huge_pte_lock(h, mm, ptep);
3475                 if (huge_pmd_unshare(mm, &address, ptep)) {
3476                         pages++;
3477                         spin_unlock(ptl);
3478                         continue;
3479                 }
3480                 pte = huge_ptep_get(ptep);
3481                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3482                         spin_unlock(ptl);
3483                         continue;
3484                 }
3485                 if (unlikely(is_hugetlb_entry_migration(pte))) {
3486                         swp_entry_t entry = pte_to_swp_entry(pte);
3487
3488                         if (is_write_migration_entry(entry)) {
3489                                 pte_t newpte;
3490
3491                                 make_migration_entry_read(&entry);
3492                                 newpte = swp_entry_to_pte(entry);
3493                                 set_huge_pte_at(mm, address, ptep, newpte);
3494                                 pages++;
3495                         }
3496                         spin_unlock(ptl);
3497                         continue;
3498                 }
3499                 if (!huge_pte_none(pte)) {
3500                         pte = huge_ptep_get_and_clear(mm, address, ptep);
3501                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3502                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
3503                         set_huge_pte_at(mm, address, ptep, pte);
3504                         pages++;
3505                 }
3506                 spin_unlock(ptl);
3507         }
3508         /*
3509          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
3510          * may have cleared our pud entry and done put_page on the page table:
3511          * once we release i_mmap_rwsem, another task can do the final put_page
3512          * and that page table be reused and filled with junk.
3513          */
3514         flush_tlb_range(vma, start, end);
3515         mmu_notifier_invalidate_range(mm, start, end);
3516         i_mmap_unlock_write(vma->vm_file->f_mapping);
3517         mmu_notifier_invalidate_range_end(mm, start, end);
3518
3519         return pages << h->order;
3520 }
3521
3522 int hugetlb_reserve_pages(struct inode *inode,
3523                                         long from, long to,
3524                                         struct vm_area_struct *vma,
3525                                         vm_flags_t vm_flags)
3526 {
3527         long ret, chg;
3528         struct hstate *h = hstate_inode(inode);
3529         struct hugepage_subpool *spool = subpool_inode(inode);
3530         struct resv_map *resv_map;
3531         long gbl_reserve;
3532
3533         /*
3534          * Only apply hugepage reservation if asked. At fault time, an
3535          * attempt will be made for VM_NORESERVE to allocate a page
3536          * without using reserves
3537          */
3538         if (vm_flags & VM_NORESERVE)
3539                 return 0;
3540
3541         /*
3542          * Shared mappings base their reservation on the number of pages that
3543          * are already allocated on behalf of the file. Private mappings need
3544          * to reserve the full area even if read-only as mprotect() may be
3545          * called to make the mapping read-write. Assume !vma is a shm mapping
3546          */
3547         if (!vma || vma->vm_flags & VM_MAYSHARE) {
3548                 resv_map = inode_resv_map(inode);
3549
3550                 chg = region_chg(resv_map, from, to);
3551
3552         } else {
3553                 resv_map = resv_map_alloc();
3554                 if (!resv_map)
3555                         return -ENOMEM;
3556
3557                 chg = to - from;
3558
3559                 set_vma_resv_map(vma, resv_map);
3560                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3561         }
3562
3563         if (chg < 0) {
3564                 ret = chg;
3565                 goto out_err;
3566         }
3567
3568         /*
3569          * There must be enough pages in the subpool for the mapping. If
3570          * the subpool has a minimum size, there may be some global
3571          * reservations already in place (gbl_reserve).
3572          */
3573         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
3574         if (gbl_reserve < 0) {
3575                 ret = -ENOSPC;
3576                 goto out_err;
3577         }
3578
3579         /*
3580          * Check enough hugepages are available for the reservation.
3581          * Hand the pages back to the subpool if there are not
3582          */
3583         ret = hugetlb_acct_memory(h, gbl_reserve);
3584         if (ret < 0) {
3585                 /* put back original number of pages, chg */
3586                 (void)hugepage_subpool_put_pages(spool, chg);
3587                 goto out_err;
3588         }
3589
3590         /*
3591          * Account for the reservations made. Shared mappings record regions
3592          * that have reservations as they are shared by multiple VMAs.
3593          * When the last VMA disappears, the region map says how much
3594          * the reservation was and the page cache tells how much of
3595          * the reservation was consumed. Private mappings are per-VMA and
3596          * only the consumed reservations are tracked. When the VMA
3597          * disappears, the original reservation is the VMA size and the
3598          * consumed reservations are stored in the map. Hence, nothing
3599          * else has to be done for private mappings here
3600          */
3601         if (!vma || vma->vm_flags & VM_MAYSHARE)
3602                 region_add(resv_map, from, to);
3603         return 0;
3604 out_err:
3605         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3606                 kref_put(&resv_map->refs, resv_map_release);
3607         return ret;
3608 }
3609
3610 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3611 {
3612         struct hstate *h = hstate_inode(inode);
3613         struct resv_map *resv_map = inode_resv_map(inode);
3614         long chg = 0;
3615         struct hugepage_subpool *spool = subpool_inode(inode);
3616         long gbl_reserve;
3617
3618         if (resv_map)
3619                 chg = region_truncate(resv_map, offset);
3620         spin_lock(&inode->i_lock);
3621         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3622         spin_unlock(&inode->i_lock);
3623
3624         /*
3625          * If the subpool has a minimum size, the number of global
3626          * reservations to be released may be adjusted.
3627          */
3628         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
3629         hugetlb_acct_memory(h, -gbl_reserve);
3630 }
3631
3632 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
3633 static unsigned long page_table_shareable(struct vm_area_struct *svma,
3634                                 struct vm_area_struct *vma,
3635                                 unsigned long addr, pgoff_t idx)
3636 {
3637         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
3638                                 svma->vm_start;
3639         unsigned long sbase = saddr & PUD_MASK;
3640         unsigned long s_end = sbase + PUD_SIZE;
3641
3642         /* Allow segments to share if only one is marked locked */
3643         unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
3644         unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
3645
3646         /*
3647          * match the virtual addresses, permission and the alignment of the
3648          * page table page.
3649          */
3650         if (pmd_index(addr) != pmd_index(saddr) ||
3651             vm_flags != svm_flags ||
3652             sbase < svma->vm_start || svma->vm_end < s_end)
3653                 return 0;
3654
3655         return saddr;
3656 }
3657
3658 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
3659 {
3660         unsigned long base = addr & PUD_MASK;
3661         unsigned long end = base + PUD_SIZE;
3662
3663         /*
3664          * check on proper vm_flags and page table alignment
3665          */
3666         if (vma->vm_flags & VM_MAYSHARE &&
3667             vma->vm_start <= base && end <= vma->vm_end)
3668                 return 1;
3669         return 0;
3670 }
3671
3672 /*
3673  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
3674  * and returns the corresponding pte. While this is not necessary for the
3675  * !shared pmd case because we can allocate the pmd later as well, it makes the
3676  * code much cleaner. pmd allocation is essential for the shared case because
3677  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
3678  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
3679  * bad pmd for sharing.
3680  */
3681 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3682 {
3683         struct vm_area_struct *vma = find_vma(mm, addr);
3684         struct address_space *mapping = vma->vm_file->f_mapping;
3685         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
3686                         vma->vm_pgoff;
3687         struct vm_area_struct *svma;
3688         unsigned long saddr;
3689         pte_t *spte = NULL;
3690         pte_t *pte;
3691         spinlock_t *ptl;
3692
3693         if (!vma_shareable(vma, addr))
3694                 return (pte_t *)pmd_alloc(mm, pud, addr);
3695
3696         i_mmap_lock_write(mapping);
3697         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3698                 if (svma == vma)
3699                         continue;
3700
3701                 saddr = page_table_shareable(svma, vma, addr, idx);
3702                 if (saddr) {
3703                         spte = huge_pte_offset(svma->vm_mm, saddr);
3704                         if (spte) {
3705                                 mm_inc_nr_pmds(mm);
3706                                 get_page(virt_to_page(spte));
3707                                 break;
3708                         }
3709                 }
3710         }
3711
3712         if (!spte)
3713                 goto out;
3714
3715         ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
3716         spin_lock(ptl);
3717         if (pud_none(*pud)) {
3718                 pud_populate(mm, pud,
3719                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
3720         } else {
3721                 put_page(virt_to_page(spte));
3722                 mm_inc_nr_pmds(mm);
3723         }
3724         spin_unlock(ptl);
3725 out:
3726         pte = (pte_t *)pmd_alloc(mm, pud, addr);
3727         i_mmap_unlock_write(mapping);
3728         return pte;
3729 }
3730
3731 /*
3732  * unmap huge page backed by shared pte.
3733  *
3734  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
3735  * indicated by page_count > 1, unmap is achieved by clearing pud and
3736  * decrementing the ref count. If count == 1, the pte page is not shared.
3737  *
3738  * called with page table lock held.
3739  *
3740  * returns: 1 successfully unmapped a shared pte page
3741  *          0 the underlying pte page is not shared, or it is the last user
3742  */
3743 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
3744 {
3745         pgd_t *pgd = pgd_offset(mm, *addr);
3746         pud_t *pud = pud_offset(pgd, *addr);
3747
3748         BUG_ON(page_count(virt_to_page(ptep)) == 0);
3749         if (page_count(virt_to_page(ptep)) == 1)
3750                 return 0;
3751
3752         pud_clear(pud);
3753         put_page(virt_to_page(ptep));
3754         mm_dec_nr_pmds(mm);
3755         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
3756         return 1;
3757 }
3758 #define want_pmd_share()        (1)
3759 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3760 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3761 {
3762         return NULL;
3763 }
3764 #define want_pmd_share()        (0)
3765 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
3766
3767 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
3768 pte_t *huge_pte_alloc(struct mm_struct *mm,
3769                         unsigned long addr, unsigned long sz)
3770 {
3771         pgd_t *pgd;
3772         pud_t *pud;
3773         pte_t *pte = NULL;
3774
3775         pgd = pgd_offset(mm, addr);
3776         pud = pud_alloc(mm, pgd, addr);
3777         if (pud) {
3778                 if (sz == PUD_SIZE) {
3779                         pte = (pte_t *)pud;
3780                 } else {
3781                         BUG_ON(sz != PMD_SIZE);
3782                         if (want_pmd_share() && pud_none(*pud))
3783                                 pte = huge_pmd_share(mm, addr, pud);
3784                         else
3785                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3786                 }
3787         }
3788         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
3789
3790         return pte;
3791 }
3792
3793 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
3794 {
3795         pgd_t *pgd;
3796         pud_t *pud;
3797         pmd_t *pmd = NULL;
3798
3799         pgd = pgd_offset(mm, addr);
3800         if (pgd_present(*pgd)) {
3801                 pud = pud_offset(pgd, addr);
3802                 if (pud_present(*pud)) {
3803                         if (pud_huge(*pud))
3804                                 return (pte_t *)pud;
3805                         pmd = pmd_offset(pud, addr);
3806                 }
3807         }
3808         return (pte_t *) pmd;
3809 }
3810
3811 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
3812
3813 /*
3814  * These functions are overwritable if your architecture needs its own
3815  * behavior.
3816  */
3817 struct page * __weak
3818 follow_huge_addr(struct mm_struct *mm, unsigned long address,
3819                               int write)
3820 {
3821         return ERR_PTR(-EINVAL);
3822 }
3823
3824 struct page * __weak
3825 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
3826                 pmd_t *pmd, int flags)
3827 {
3828         struct page *page = NULL;
3829         spinlock_t *ptl;
3830 retry:
3831         ptl = pmd_lockptr(mm, pmd);
3832         spin_lock(ptl);
3833         /*
3834          * make sure that the address range covered by this pmd is not
3835          * unmapped from other threads.
3836          */
3837         if (!pmd_huge(*pmd))
3838                 goto out;
3839         if (pmd_present(*pmd)) {
3840                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
3841                 if (flags & FOLL_GET)
3842                         get_page(page);
3843         } else {
3844                 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
3845                         spin_unlock(ptl);
3846                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
3847                         goto retry;
3848                 }
3849                 /*
3850                  * hwpoisoned entry is treated as no_page_table in
3851                  * follow_page_mask().
3852                  */
3853         }
3854 out:
3855         spin_unlock(ptl);
3856         return page;
3857 }
3858
3859 struct page * __weak
3860 follow_huge_pud(struct mm_struct *mm, unsigned long address,
3861                 pud_t *pud, int flags)
3862 {
3863         if (flags & FOLL_GET)
3864                 return NULL;
3865
3866         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
3867 }
3868
3869 #ifdef CONFIG_MEMORY_FAILURE
3870
3871 /* Should be called in hugetlb_lock */
3872 static int is_hugepage_on_freelist(struct page *hpage)
3873 {
3874         struct page *page;
3875         struct page *tmp;
3876         struct hstate *h = page_hstate(hpage);
3877         int nid = page_to_nid(hpage);
3878
3879         list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3880                 if (page == hpage)
3881                         return 1;
3882         return 0;
3883 }
3884
3885 /*
3886  * This function is called from memory failure code.
3887  * Assume the caller holds page lock of the head page.
3888  */
3889 int dequeue_hwpoisoned_huge_page(struct page *hpage)
3890 {
3891         struct hstate *h = page_hstate(hpage);
3892         int nid = page_to_nid(hpage);
3893         int ret = -EBUSY;
3894
3895         spin_lock(&hugetlb_lock);
3896         if (is_hugepage_on_freelist(hpage)) {
3897                 /*
3898                  * Hwpoisoned hugepage isn't linked to activelist or freelist,
3899                  * but dangling hpage->lru can trigger list-debug warnings
3900                  * (this happens when we call unpoison_memory() on it),
3901                  * so let it point to itself with list_del_init().
3902                  */
3903                 list_del_init(&hpage->lru);
3904                 set_page_refcounted(hpage);
3905                 h->free_huge_pages--;
3906                 h->free_huge_pages_node[nid]--;
3907                 ret = 0;
3908         }
3909         spin_unlock(&hugetlb_lock);
3910         return ret;
3911 }
3912 #endif
3913
3914 bool isolate_huge_page(struct page *page, struct list_head *list)
3915 {
3916         VM_BUG_ON_PAGE(!PageHead(page), page);
3917         if (!get_page_unless_zero(page))
3918                 return false;
3919         spin_lock(&hugetlb_lock);
3920         list_move_tail(&page->lru, list);
3921         spin_unlock(&hugetlb_lock);
3922         return true;
3923 }
3924
3925 void putback_active_hugepage(struct page *page)
3926 {
3927         VM_BUG_ON_PAGE(!PageHead(page), page);
3928         spin_lock(&hugetlb_lock);
3929         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
3930         spin_unlock(&hugetlb_lock);
3931         put_page(page);
3932 }
3933
3934 bool is_hugepage_active(struct page *page)
3935 {
3936         VM_BUG_ON_PAGE(!PageHuge(page), page);
3937         /*
3938          * This function can be called for a tail page because the caller,
3939          * scan_movable_pages, scans through a given pfn-range which typically
3940          * covers one memory block. In systems using gigantic hugepage (1GB
3941          * for x86_64,) a hugepage is larger than a memory block, and we don't
3942          * support migrating such large hugepages for now, so return false
3943          * when called for tail pages.
3944          */
3945         if (PageTail(page))
3946                 return false;
3947         /*
3948          * Refcount of a hwpoisoned hugepages is 1, but they are not active,
3949          * so we should return false for them.
3950          */
3951         if (unlikely(PageHWPoison(page)))
3952                 return false;
3953         return page_count(page) > 0;
3954 }