1de0f43a68e5822dd9c2fe172bcaab162b69d5de
[linux-2.6-block.git] / arch / powerpc / mm / hugetlbpage.c
1 /*
2  * PPC Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2003 David Gibson, IBM Corporation.
5  * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
6  *
7  * Based on the IA-32 version:
8  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/io.h>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/moduleparam.h>
19 #include <linux/swap.h>
20 #include <linux/swapops.h>
21 #include <linux/kmemleak.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlb.h>
25 #include <asm/setup.h>
26 #include <asm/hugetlb.h>
27 #include <asm/pte-walk.h>
28
29 bool hugetlb_disabled = false;
30
31 #define hugepd_none(hpd)        (hpd_val(hpd) == 0)
32
33 #define PTE_T_ORDER     (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))
34
35 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
36 {
37         /*
38          * Only called for hugetlbfs pages, hence can ignore THP and the
39          * irq disabled walk.
40          */
41         return __find_linux_pte(mm->pgd, addr, NULL, NULL);
42 }
43
44 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
45                            unsigned long address, unsigned int pdshift,
46                            unsigned int pshift, spinlock_t *ptl)
47 {
48         struct kmem_cache *cachep;
49         pte_t *new;
50         int i;
51         int num_hugepd;
52
53         if (pshift >= pdshift) {
54                 cachep = PGT_CACHE(PTE_T_ORDER);
55                 num_hugepd = 1 << (pshift - pdshift);
56         } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
57                 cachep = PGT_CACHE(PTE_INDEX_SIZE);
58                 num_hugepd = 1;
59         } else {
60                 cachep = PGT_CACHE(pdshift - pshift);
61                 num_hugepd = 1;
62         }
63
64         new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
65
66         BUG_ON(pshift > HUGEPD_SHIFT_MASK);
67         BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
68
69         if (! new)
70                 return -ENOMEM;
71
72         /*
73          * Make sure other cpus find the hugepd set only after a
74          * properly initialized page table is visible to them.
75          * For more details look for comment in __pte_alloc().
76          */
77         smp_wmb();
78
79         spin_lock(ptl);
80         /*
81          * We have multiple higher-level entries that point to the same
82          * actual pte location.  Fill in each as we go and backtrack on error.
83          * We need all of these so the DTLB pgtable walk code can find the
84          * right higher-level entry without knowing if it's a hugepage or not.
85          */
86         for (i = 0; i < num_hugepd; i++, hpdp++) {
87                 if (unlikely(!hugepd_none(*hpdp)))
88                         break;
89                 hugepd_populate(hpdp, new, pshift);
90         }
91         /* If we bailed from the for loop early, an error occurred, clean up */
92         if (i < num_hugepd) {
93                 for (i = i - 1 ; i >= 0; i--, hpdp--)
94                         *hpdp = __hugepd(0);
95                 kmem_cache_free(cachep, new);
96         } else {
97                 kmemleak_ignore(new);
98         }
99         spin_unlock(ptl);
100         return 0;
101 }
102
103 /*
104  * At this point we do the placement change only for BOOK3S 64. This would
105  * possibly work on other subarchs.
106  */
107 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
108 {
109         pgd_t *pg;
110         pud_t *pu;
111         pmd_t *pm;
112         hugepd_t *hpdp = NULL;
113         unsigned pshift = __ffs(sz);
114         unsigned pdshift = PGDIR_SHIFT;
115         spinlock_t *ptl;
116
117         addr &= ~(sz-1);
118         pg = pgd_offset(mm, addr);
119
120 #ifdef CONFIG_PPC_BOOK3S_64
121         if (pshift == PGDIR_SHIFT)
122                 /* 16GB huge page */
123                 return (pte_t *) pg;
124         else if (pshift > PUD_SHIFT) {
125                 /*
126                  * We need to use hugepd table
127                  */
128                 ptl = &mm->page_table_lock;
129                 hpdp = (hugepd_t *)pg;
130         } else {
131                 pdshift = PUD_SHIFT;
132                 pu = pud_alloc(mm, pg, addr);
133                 if (!pu)
134                         return NULL;
135                 if (pshift == PUD_SHIFT)
136                         return (pte_t *)pu;
137                 else if (pshift > PMD_SHIFT) {
138                         ptl = pud_lockptr(mm, pu);
139                         hpdp = (hugepd_t *)pu;
140                 } else {
141                         pdshift = PMD_SHIFT;
142                         pm = pmd_alloc(mm, pu, addr);
143                         if (!pm)
144                                 return NULL;
145                         if (pshift == PMD_SHIFT)
146                                 /* 16MB hugepage */
147                                 return (pte_t *)pm;
148                         else {
149                                 ptl = pmd_lockptr(mm, pm);
150                                 hpdp = (hugepd_t *)pm;
151                         }
152                 }
153         }
154 #else
155         if (pshift >= PGDIR_SHIFT) {
156                 ptl = &mm->page_table_lock;
157                 hpdp = (hugepd_t *)pg;
158         } else {
159                 pdshift = PUD_SHIFT;
160                 pu = pud_alloc(mm, pg, addr);
161                 if (!pu)
162                         return NULL;
163                 if (pshift >= PUD_SHIFT) {
164                         ptl = pud_lockptr(mm, pu);
165                         hpdp = (hugepd_t *)pu;
166                 } else {
167                         pdshift = PMD_SHIFT;
168                         pm = pmd_alloc(mm, pu, addr);
169                         if (!pm)
170                                 return NULL;
171                         ptl = pmd_lockptr(mm, pm);
172                         hpdp = (hugepd_t *)pm;
173                 }
174         }
175 #endif
176         if (!hpdp)
177                 return NULL;
178
179         BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
180
181         if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
182                                                   pdshift, pshift, ptl))
183                 return NULL;
184
185         return hugepte_offset(*hpdp, addr, pdshift);
186 }
187
188 #ifdef CONFIG_PPC_BOOK3S_64
189 /*
190  * Tracks gpages after the device tree is scanned and before the
191  * huge_boot_pages list is ready on pseries.
192  */
193 #define MAX_NUMBER_GPAGES       1024
194 __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
195 __initdata static unsigned nr_gpages;
196
197 /*
198  * Build list of addresses of gigantic pages.  This function is used in early
199  * boot before the buddy allocator is setup.
200  */
201 void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
202 {
203         if (!addr)
204                 return;
205         while (number_of_pages > 0) {
206                 gpage_freearray[nr_gpages] = addr;
207                 nr_gpages++;
208                 number_of_pages--;
209                 addr += page_size;
210         }
211 }
212
213 int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
214 {
215         struct huge_bootmem_page *m;
216         if (nr_gpages == 0)
217                 return 0;
218         m = phys_to_virt(gpage_freearray[--nr_gpages]);
219         gpage_freearray[nr_gpages] = 0;
220         list_add(&m->list, &huge_boot_pages);
221         m->hstate = hstate;
222         return 1;
223 }
224 #endif
225
226
227 int __init alloc_bootmem_huge_page(struct hstate *h)
228 {
229
230 #ifdef CONFIG_PPC_BOOK3S_64
231         if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
232                 return pseries_alloc_bootmem_huge_page(h);
233 #endif
234         return __alloc_bootmem_huge_page(h);
235 }
236
237 #ifndef CONFIG_PPC_BOOK3S_64
238 #define HUGEPD_FREELIST_SIZE \
239         ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
240
241 struct hugepd_freelist {
242         struct rcu_head rcu;
243         unsigned int index;
244         void *ptes[0];
245 };
246
247 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
248
249 static void hugepd_free_rcu_callback(struct rcu_head *head)
250 {
251         struct hugepd_freelist *batch =
252                 container_of(head, struct hugepd_freelist, rcu);
253         unsigned int i;
254
255         for (i = 0; i < batch->index; i++)
256                 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
257
258         free_page((unsigned long)batch);
259 }
260
261 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
262 {
263         struct hugepd_freelist **batchp;
264
265         batchp = &get_cpu_var(hugepd_freelist_cur);
266
267         if (atomic_read(&tlb->mm->mm_users) < 2 ||
268             mm_is_thread_local(tlb->mm)) {
269                 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
270                 put_cpu_var(hugepd_freelist_cur);
271                 return;
272         }
273
274         if (*batchp == NULL) {
275                 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
276                 (*batchp)->index = 0;
277         }
278
279         (*batchp)->ptes[(*batchp)->index++] = hugepte;
280         if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
281                 call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
282                 *batchp = NULL;
283         }
284         put_cpu_var(hugepd_freelist_cur);
285 }
286 #else
287 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
288 #endif
289
290 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
291                               unsigned long start, unsigned long end,
292                               unsigned long floor, unsigned long ceiling)
293 {
294         pte_t *hugepte = hugepd_page(*hpdp);
295         int i;
296
297         unsigned long pdmask = ~((1UL << pdshift) - 1);
298         unsigned int num_hugepd = 1;
299         unsigned int shift = hugepd_shift(*hpdp);
300
301         /* Note: On fsl the hpdp may be the first of several */
302         if (shift > pdshift)
303                 num_hugepd = 1 << (shift - pdshift);
304
305         start &= pdmask;
306         if (start < floor)
307                 return;
308         if (ceiling) {
309                 ceiling &= pdmask;
310                 if (! ceiling)
311                         return;
312         }
313         if (end - 1 > ceiling - 1)
314                 return;
315
316         for (i = 0; i < num_hugepd; i++, hpdp++)
317                 *hpdp = __hugepd(0);
318
319         if (shift >= pdshift)
320                 hugepd_free(tlb, hugepte);
321         else if (IS_ENABLED(CONFIG_PPC_8xx))
322                 pgtable_free_tlb(tlb, hugepte,
323                                  get_hugepd_cache_index(PTE_INDEX_SIZE));
324         else
325                 pgtable_free_tlb(tlb, hugepte,
326                                  get_hugepd_cache_index(pdshift - shift));
327 }
328
329 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
330                                    unsigned long addr, unsigned long end,
331                                    unsigned long floor, unsigned long ceiling)
332 {
333         pmd_t *pmd;
334         unsigned long next;
335         unsigned long start;
336
337         start = addr;
338         do {
339                 unsigned long more;
340
341                 pmd = pmd_offset(pud, addr);
342                 next = pmd_addr_end(addr, end);
343                 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
344                         /*
345                          * if it is not hugepd pointer, we should already find
346                          * it cleared.
347                          */
348                         WARN_ON(!pmd_none_or_clear_bad(pmd));
349                         continue;
350                 }
351                 /*
352                  * Increment next by the size of the huge mapping since
353                  * there may be more than one entry at this level for a
354                  * single hugepage, but all of them point to
355                  * the same kmem cache that holds the hugepte.
356                  */
357                 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
358                 if (more > next)
359                         next = more;
360
361                 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
362                                   addr, next, floor, ceiling);
363         } while (addr = next, addr != end);
364
365         start &= PUD_MASK;
366         if (start < floor)
367                 return;
368         if (ceiling) {
369                 ceiling &= PUD_MASK;
370                 if (!ceiling)
371                         return;
372         }
373         if (end - 1 > ceiling - 1)
374                 return;
375
376         pmd = pmd_offset(pud, start);
377         pud_clear(pud);
378         pmd_free_tlb(tlb, pmd, start);
379         mm_dec_nr_pmds(tlb->mm);
380 }
381
382 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
383                                    unsigned long addr, unsigned long end,
384                                    unsigned long floor, unsigned long ceiling)
385 {
386         pud_t *pud;
387         unsigned long next;
388         unsigned long start;
389
390         start = addr;
391         do {
392                 pud = pud_offset(pgd, addr);
393                 next = pud_addr_end(addr, end);
394                 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
395                         if (pud_none_or_clear_bad(pud))
396                                 continue;
397                         hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
398                                                ceiling);
399                 } else {
400                         unsigned long more;
401                         /*
402                          * Increment next by the size of the huge mapping since
403                          * there may be more than one entry at this level for a
404                          * single hugepage, but all of them point to
405                          * the same kmem cache that holds the hugepte.
406                          */
407                         more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
408                         if (more > next)
409                                 next = more;
410
411                         free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
412                                           addr, next, floor, ceiling);
413                 }
414         } while (addr = next, addr != end);
415
416         start &= PGDIR_MASK;
417         if (start < floor)
418                 return;
419         if (ceiling) {
420                 ceiling &= PGDIR_MASK;
421                 if (!ceiling)
422                         return;
423         }
424         if (end - 1 > ceiling - 1)
425                 return;
426
427         pud = pud_offset(pgd, start);
428         pgd_clear(pgd);
429         pud_free_tlb(tlb, pud, start);
430         mm_dec_nr_puds(tlb->mm);
431 }
432
433 /*
434  * This function frees user-level page tables of a process.
435  */
436 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
437                             unsigned long addr, unsigned long end,
438                             unsigned long floor, unsigned long ceiling)
439 {
440         pgd_t *pgd;
441         unsigned long next;
442
443         /*
444          * Because there are a number of different possible pagetable
445          * layouts for hugepage ranges, we limit knowledge of how
446          * things should be laid out to the allocation path
447          * (huge_pte_alloc(), above).  Everything else works out the
448          * structure as it goes from information in the hugepd
449          * pointers.  That means that we can't here use the
450          * optimization used in the normal page free_pgd_range(), of
451          * checking whether we're actually covering a large enough
452          * range to have to do anything at the top level of the walk
453          * instead of at the bottom.
454          *
455          * To make sense of this, you should probably go read the big
456          * block comment at the top of the normal free_pgd_range(),
457          * too.
458          */
459
460         do {
461                 next = pgd_addr_end(addr, end);
462                 pgd = pgd_offset(tlb->mm, addr);
463                 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
464                         if (pgd_none_or_clear_bad(pgd))
465                                 continue;
466                         hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
467                 } else {
468                         unsigned long more;
469                         /*
470                          * Increment next by the size of the huge mapping since
471                          * there may be more than one entry at the pgd level
472                          * for a single hugepage, but all of them point to the
473                          * same kmem cache that holds the hugepte.
474                          */
475                         more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
476                         if (more > next)
477                                 next = more;
478
479                         free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
480                                           addr, next, floor, ceiling);
481                 }
482         } while (addr = next, addr != end);
483 }
484
485 struct page *follow_huge_pd(struct vm_area_struct *vma,
486                             unsigned long address, hugepd_t hpd,
487                             int flags, int pdshift)
488 {
489         pte_t *ptep;
490         spinlock_t *ptl;
491         struct page *page = NULL;
492         unsigned long mask;
493         int shift = hugepd_shift(hpd);
494         struct mm_struct *mm = vma->vm_mm;
495
496 retry:
497         /*
498          * hugepage directory entries are protected by mm->page_table_lock
499          * Use this instead of huge_pte_lockptr
500          */
501         ptl = &mm->page_table_lock;
502         spin_lock(ptl);
503
504         ptep = hugepte_offset(hpd, address, pdshift);
505         if (pte_present(*ptep)) {
506                 mask = (1UL << shift) - 1;
507                 page = pte_page(*ptep);
508                 page += ((address & mask) >> PAGE_SHIFT);
509                 if (flags & FOLL_GET)
510                         get_page(page);
511         } else {
512                 if (is_hugetlb_entry_migration(*ptep)) {
513                         spin_unlock(ptl);
514                         __migration_entry_wait(mm, ptep, ptl);
515                         goto retry;
516                 }
517         }
518         spin_unlock(ptl);
519         return page;
520 }
521
522 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
523                                       unsigned long sz)
524 {
525         unsigned long __boundary = (addr + sz) & ~(sz-1);
526         return (__boundary - 1 < end - 1) ? __boundary : end;
527 }
528
529 #ifdef CONFIG_PPC_MM_SLICES
530 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
531                                         unsigned long len, unsigned long pgoff,
532                                         unsigned long flags)
533 {
534         struct hstate *hstate = hstate_file(file);
535         int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
536
537 #ifdef CONFIG_PPC_RADIX_MMU
538         if (radix_enabled())
539                 return radix__hugetlb_get_unmapped_area(file, addr, len,
540                                                        pgoff, flags);
541 #endif
542         return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
543 }
544 #endif
545
546 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
547 {
548         /* With radix we don't use slice, so derive it from vma*/
549         if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
550                 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
551
552                 return 1UL << mmu_psize_to_shift(psize);
553         }
554         return vma_kernel_pagesize(vma);
555 }
556
557 static int __init add_huge_page_size(unsigned long long size)
558 {
559         int shift = __ffs(size);
560         int mmu_psize;
561
562         /* Check that it is a page size supported by the hardware and
563          * that it fits within pagetable and slice limits. */
564         if (size <= PAGE_SIZE || !is_power_of_2(size))
565                 return -EINVAL;
566
567         mmu_psize = check_and_get_huge_psize(shift);
568         if (mmu_psize < 0)
569                 return -EINVAL;
570
571         BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
572
573         /* Return if huge page size has already been setup */
574         if (size_to_hstate(size))
575                 return 0;
576
577         hugetlb_add_hstate(shift - PAGE_SHIFT);
578
579         return 0;
580 }
581
582 static int __init hugepage_setup_sz(char *str)
583 {
584         unsigned long long size;
585
586         size = memparse(str, &str);
587
588         if (add_huge_page_size(size) != 0) {
589                 hugetlb_bad_size();
590                 pr_err("Invalid huge page size specified(%llu)\n", size);
591         }
592
593         return 1;
594 }
595 __setup("hugepagesz=", hugepage_setup_sz);
596
597 static int __init hugetlbpage_init(void)
598 {
599         int psize;
600
601         if (hugetlb_disabled) {
602                 pr_info("HugeTLB support is disabled!\n");
603                 return 0;
604         }
605
606         if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
607             !mmu_has_feature(MMU_FTR_16M_PAGE))
608                 return -ENODEV;
609
610         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
611                 unsigned shift;
612                 unsigned pdshift;
613
614                 if (!mmu_psize_defs[psize].shift)
615                         continue;
616
617                 shift = mmu_psize_to_shift(psize);
618
619 #ifdef CONFIG_PPC_BOOK3S_64
620                 if (shift > PGDIR_SHIFT)
621                         continue;
622                 else if (shift > PUD_SHIFT)
623                         pdshift = PGDIR_SHIFT;
624                 else if (shift > PMD_SHIFT)
625                         pdshift = PUD_SHIFT;
626                 else
627                         pdshift = PMD_SHIFT;
628 #else
629                 if (shift < PUD_SHIFT)
630                         pdshift = PMD_SHIFT;
631                 else if (shift < PGDIR_SHIFT)
632                         pdshift = PUD_SHIFT;
633                 else
634                         pdshift = PGDIR_SHIFT;
635 #endif
636
637                 if (add_huge_page_size(1ULL << shift) < 0)
638                         continue;
639                 /*
640                  * if we have pdshift and shift value same, we don't
641                  * use pgt cache for hugepd.
642                  */
643                 if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
644                         pgtable_cache_add(PTE_INDEX_SIZE);
645                 else if (pdshift > shift)
646                         pgtable_cache_add(pdshift - shift);
647                 else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
648                         pgtable_cache_add(PTE_T_ORDER);
649         }
650
651         if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
652                 hugetlbpage_init_default();
653
654         return 0;
655 }
656
657 arch_initcall(hugetlbpage_init);
658
659 void flush_dcache_icache_hugepage(struct page *page)
660 {
661         int i;
662         void *start;
663
664         BUG_ON(!PageCompound(page));
665
666         for (i = 0; i < (1UL << compound_order(page)); i++) {
667                 if (!PageHighMem(page)) {
668                         __flush_dcache_icache(page_address(page+i));
669                 } else {
670                         start = kmap_atomic(page+i);
671                         __flush_dcache_icache(start);
672                         kunmap_atomic(start);
673                 }
674         }
675 }
676
677 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
678                        unsigned long end, int write, struct page **pages, int *nr)
679 {
680         unsigned long pte_end;
681         struct page *head, *page;
682         pte_t pte;
683         int refs;
684
685         pte_end = (addr + sz) & ~(sz-1);
686         if (pte_end < end)
687                 end = pte_end;
688
689         pte = READ_ONCE(*ptep);
690
691         if (!pte_access_permitted(pte, write))
692                 return 0;
693
694         /* hugepages are never "special" */
695         VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
696
697         refs = 0;
698         head = pte_page(pte);
699
700         page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
701         do {
702                 VM_BUG_ON(compound_head(page) != head);
703                 pages[*nr] = page;
704                 (*nr)++;
705                 page++;
706                 refs++;
707         } while (addr += PAGE_SIZE, addr != end);
708
709         if (!page_cache_add_speculative(head, refs)) {
710                 *nr -= refs;
711                 return 0;
712         }
713
714         if (unlikely(pte_val(pte) != pte_val(*ptep))) {
715                 /* Could be optimized better */
716                 *nr -= refs;
717                 while (refs--)
718                         put_page(head);
719                 return 0;
720         }
721
722         return 1;
723 }
724
725 int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift,
726                 unsigned long end, int write, struct page **pages, int *nr)
727 {
728         pte_t *ptep;
729         unsigned long sz = 1UL << hugepd_shift(hugepd);
730         unsigned long next;
731
732         ptep = hugepte_offset(hugepd, addr, pdshift);
733         do {
734                 next = hugepte_addr_end(addr, end, sz);
735                 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
736                         return 0;
737         } while (ptep++, addr = next, addr != end);
738
739         return 1;
740 }