powerpc/mm/hugetlb: remove follow_huge_addr for powerpc
[linux-2.6-block.git] / arch / powerpc / mm / hugetlbpage.c
1 /*
2  * PPC Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2003 David Gibson, IBM Corporation.
5  * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
6  *
7  * Based on the IA-32 version:
8  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/io.h>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <linux/swap.h>
21 #include <linux/swapops.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlb.h>
25 #include <asm/setup.h>
26 #include <asm/hugetlb.h>
27
28 #ifdef CONFIG_HUGETLB_PAGE
29
30 #define PAGE_SHIFT_64K  16
31 #define PAGE_SHIFT_512K 19
32 #define PAGE_SHIFT_8M   23
33 #define PAGE_SHIFT_16M  24
34 #define PAGE_SHIFT_16G  34
35
36 unsigned int HPAGE_SHIFT;
37
38 /*
39  * Tracks gpages after the device tree is scanned and before the
40  * huge_boot_pages list is ready.  On non-Freescale implementations, this is
41  * just used to track 16G pages and so is a single array.  FSL-based
42  * implementations may have more than one gpage size, so we need multiple
43  * arrays
44  */
45 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
46 #define MAX_NUMBER_GPAGES       128
47 struct psize_gpages {
48         u64 gpage_list[MAX_NUMBER_GPAGES];
49         unsigned int nr_gpages;
50 };
51 static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
52 #else
53 #define MAX_NUMBER_GPAGES       1024
54 static u64 gpage_freearray[MAX_NUMBER_GPAGES];
55 static unsigned nr_gpages;
56 #endif
57
58 #define hugepd_none(hpd)        (hpd_val(hpd) == 0)
59
60 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
61 {
62         /* Only called for hugetlbfs pages, hence can ignore THP */
63         return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
64 }
65
66 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
67                            unsigned long address, unsigned pdshift, unsigned pshift)
68 {
69         struct kmem_cache *cachep;
70         pte_t *new;
71         int i;
72         int num_hugepd;
73
74         if (pshift >= pdshift) {
75                 cachep = hugepte_cache;
76                 num_hugepd = 1 << (pshift - pdshift);
77         } else {
78                 cachep = PGT_CACHE(pdshift - pshift);
79                 num_hugepd = 1;
80         }
81
82         new = kmem_cache_zalloc(cachep, GFP_KERNEL);
83
84         BUG_ON(pshift > HUGEPD_SHIFT_MASK);
85         BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
86
87         if (! new)
88                 return -ENOMEM;
89
90         /*
91          * Make sure other cpus find the hugepd set only after a
92          * properly initialized page table is visible to them.
93          * For more details look for comment in __pte_alloc().
94          */
95         smp_wmb();
96
97         spin_lock(&mm->page_table_lock);
98
99         /*
100          * We have multiple higher-level entries that point to the same
101          * actual pte location.  Fill in each as we go and backtrack on error.
102          * We need all of these so the DTLB pgtable walk code can find the
103          * right higher-level entry without knowing if it's a hugepage or not.
104          */
105         for (i = 0; i < num_hugepd; i++, hpdp++) {
106                 if (unlikely(!hugepd_none(*hpdp)))
107                         break;
108                 else {
109 #ifdef CONFIG_PPC_BOOK3S_64
110                         *hpdp = __hugepd(__pa(new) |
111                                          (shift_to_mmu_psize(pshift) << 2));
112 #elif defined(CONFIG_PPC_8xx)
113                         *hpdp = __hugepd(__pa(new) |
114                                          (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
115                                           _PMD_PAGE_512K) | _PMD_PRESENT);
116 #else
117                         /* We use the old format for PPC_FSL_BOOK3E */
118                         *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
119 #endif
120                 }
121         }
122         /* If we bailed from the for loop early, an error occurred, clean up */
123         if (i < num_hugepd) {
124                 for (i = i - 1 ; i >= 0; i--, hpdp--)
125                         *hpdp = __hugepd(0);
126                 kmem_cache_free(cachep, new);
127         }
128         spin_unlock(&mm->page_table_lock);
129         return 0;
130 }
131
132 /*
133  * These macros define how to determine which level of the page table holds
134  * the hpdp.
135  */
136 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
137 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
138 #define HUGEPD_PUD_SHIFT PUD_SHIFT
139 #else
140 #define HUGEPD_PGD_SHIFT PUD_SHIFT
141 #define HUGEPD_PUD_SHIFT PMD_SHIFT
142 #endif
143
144 /*
145  * At this point we do the placement change only for BOOK3S 64. This would
146  * possibly work on other subarchs.
147  */
148 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
149 {
150         pgd_t *pg;
151         pud_t *pu;
152         pmd_t *pm;
153         hugepd_t *hpdp = NULL;
154         unsigned pshift = __ffs(sz);
155         unsigned pdshift = PGDIR_SHIFT;
156
157         addr &= ~(sz-1);
158         pg = pgd_offset(mm, addr);
159
160 #ifdef CONFIG_PPC_BOOK3S_64
161         if (pshift == PGDIR_SHIFT)
162                 /* 16GB huge page */
163                 return (pte_t *) pg;
164         else if (pshift > PUD_SHIFT)
165                 /*
166                  * We need to use hugepd table
167                  */
168                 hpdp = (hugepd_t *)pg;
169         else {
170                 pdshift = PUD_SHIFT;
171                 pu = pud_alloc(mm, pg, addr);
172                 if (pshift == PUD_SHIFT)
173                         return (pte_t *)pu;
174                 else if (pshift > PMD_SHIFT)
175                         hpdp = (hugepd_t *)pu;
176                 else {
177                         pdshift = PMD_SHIFT;
178                         pm = pmd_alloc(mm, pu, addr);
179                         if (pshift == PMD_SHIFT)
180                                 /* 16MB hugepage */
181                                 return (pte_t *)pm;
182                         else
183                                 hpdp = (hugepd_t *)pm;
184                 }
185         }
186 #else
187         if (pshift >= HUGEPD_PGD_SHIFT) {
188                 hpdp = (hugepd_t *)pg;
189         } else {
190                 pdshift = PUD_SHIFT;
191                 pu = pud_alloc(mm, pg, addr);
192                 if (pshift >= HUGEPD_PUD_SHIFT) {
193                         hpdp = (hugepd_t *)pu;
194                 } else {
195                         pdshift = PMD_SHIFT;
196                         pm = pmd_alloc(mm, pu, addr);
197                         hpdp = (hugepd_t *)pm;
198                 }
199         }
200 #endif
201         if (!hpdp)
202                 return NULL;
203
204         BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
205
206         if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
207                 return NULL;
208
209         return hugepte_offset(*hpdp, addr, pdshift);
210 }
211
212 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
213 /* Build list of addresses of gigantic pages.  This function is used in early
214  * boot before the buddy allocator is setup.
215  */
216 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
217 {
218         unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
219         int i;
220
221         if (addr == 0)
222                 return;
223
224         gpage_freearray[idx].nr_gpages = number_of_pages;
225
226         for (i = 0; i < number_of_pages; i++) {
227                 gpage_freearray[idx].gpage_list[i] = addr;
228                 addr += page_size;
229         }
230 }
231
232 /*
233  * Moves the gigantic page addresses from the temporary list to the
234  * huge_boot_pages list.
235  */
236 int alloc_bootmem_huge_page(struct hstate *hstate)
237 {
238         struct huge_bootmem_page *m;
239         int idx = shift_to_mmu_psize(huge_page_shift(hstate));
240         int nr_gpages = gpage_freearray[idx].nr_gpages;
241
242         if (nr_gpages == 0)
243                 return 0;
244
245 #ifdef CONFIG_HIGHMEM
246         /*
247          * If gpages can be in highmem we can't use the trick of storing the
248          * data structure in the page; allocate space for this
249          */
250         m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
251         m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
252 #else
253         m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
254 #endif
255
256         list_add(&m->list, &huge_boot_pages);
257         gpage_freearray[idx].nr_gpages = nr_gpages;
258         gpage_freearray[idx].gpage_list[nr_gpages] = 0;
259         m->hstate = hstate;
260
261         return 1;
262 }
263 /*
264  * Scan the command line hugepagesz= options for gigantic pages; store those in
265  * a list that we use to allocate the memory once all options are parsed.
266  */
267
268 unsigned long gpage_npages[MMU_PAGE_COUNT];
269
270 static int __init do_gpage_early_setup(char *param, char *val,
271                                        const char *unused, void *arg)
272 {
273         static phys_addr_t size;
274         unsigned long npages;
275
276         /*
277          * The hugepagesz and hugepages cmdline options are interleaved.  We
278          * use the size variable to keep track of whether or not this was done
279          * properly and skip over instances where it is incorrect.  Other
280          * command-line parsing code will issue warnings, so we don't need to.
281          *
282          */
283         if ((strcmp(param, "default_hugepagesz") == 0) ||
284             (strcmp(param, "hugepagesz") == 0)) {
285                 size = memparse(val, NULL);
286         } else if (strcmp(param, "hugepages") == 0) {
287                 if (size != 0) {
288                         if (sscanf(val, "%lu", &npages) <= 0)
289                                 npages = 0;
290                         if (npages > MAX_NUMBER_GPAGES) {
291                                 pr_warn("MMU: %lu pages requested for page "
292 #ifdef CONFIG_PHYS_ADDR_T_64BIT
293                                         "size %llu KB, limiting to "
294 #else
295                                         "size %u KB, limiting to "
296 #endif
297                                         __stringify(MAX_NUMBER_GPAGES) "\n",
298                                         npages, size / 1024);
299                                 npages = MAX_NUMBER_GPAGES;
300                         }
301                         gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
302                         size = 0;
303                 }
304         }
305         return 0;
306 }
307
308
309 /*
310  * This function allocates physical space for pages that are larger than the
311  * buddy allocator can handle.  We want to allocate these in highmem because
312  * the amount of lowmem is limited.  This means that this function MUST be
313  * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
314  * allocate to grab highmem.
315  */
316 void __init reserve_hugetlb_gpages(void)
317 {
318         static __initdata char cmdline[COMMAND_LINE_SIZE];
319         phys_addr_t size, base;
320         int i;
321
322         strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
323         parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
324                         NULL, &do_gpage_early_setup);
325
326         /*
327          * Walk gpage list in reverse, allocating larger page sizes first.
328          * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
329          * When we reach the point in the list where pages are no longer
330          * considered gpages, we're done.
331          */
332         for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
333                 if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
334                         continue;
335                 else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
336                         break;
337
338                 size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
339                 base = memblock_alloc_base(size * gpage_npages[i], size,
340                                            MEMBLOCK_ALLOC_ANYWHERE);
341                 add_gpage(base, size, gpage_npages[i]);
342         }
343 }
344
345 #else /* !PPC_FSL_BOOK3E */
346
347 /* Build list of addresses of gigantic pages.  This function is used in early
348  * boot before the buddy allocator is setup.
349  */
350 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
351 {
352         if (!addr)
353                 return;
354         while (number_of_pages > 0) {
355                 gpage_freearray[nr_gpages] = addr;
356                 nr_gpages++;
357                 number_of_pages--;
358                 addr += page_size;
359         }
360 }
361
362 /* Moves the gigantic page addresses from the temporary list to the
363  * huge_boot_pages list.
364  */
365 int alloc_bootmem_huge_page(struct hstate *hstate)
366 {
367         struct huge_bootmem_page *m;
368         if (nr_gpages == 0)
369                 return 0;
370         m = phys_to_virt(gpage_freearray[--nr_gpages]);
371         gpage_freearray[nr_gpages] = 0;
372         list_add(&m->list, &huge_boot_pages);
373         m->hstate = hstate;
374         return 1;
375 }
376 #endif
377
378 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
379 #define HUGEPD_FREELIST_SIZE \
380         ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
381
382 struct hugepd_freelist {
383         struct rcu_head rcu;
384         unsigned int index;
385         void *ptes[0];
386 };
387
388 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
389
390 static void hugepd_free_rcu_callback(struct rcu_head *head)
391 {
392         struct hugepd_freelist *batch =
393                 container_of(head, struct hugepd_freelist, rcu);
394         unsigned int i;
395
396         for (i = 0; i < batch->index; i++)
397                 kmem_cache_free(hugepte_cache, batch->ptes[i]);
398
399         free_page((unsigned long)batch);
400 }
401
402 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
403 {
404         struct hugepd_freelist **batchp;
405
406         batchp = &get_cpu_var(hugepd_freelist_cur);
407
408         if (atomic_read(&tlb->mm->mm_users) < 2 ||
409             cpumask_equal(mm_cpumask(tlb->mm),
410                           cpumask_of(smp_processor_id()))) {
411                 kmem_cache_free(hugepte_cache, hugepte);
412                 put_cpu_var(hugepd_freelist_cur);
413                 return;
414         }
415
416         if (*batchp == NULL) {
417                 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
418                 (*batchp)->index = 0;
419         }
420
421         (*batchp)->ptes[(*batchp)->index++] = hugepte;
422         if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
423                 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
424                 *batchp = NULL;
425         }
426         put_cpu_var(hugepd_freelist_cur);
427 }
428 #else
429 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
430 #endif
431
432 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
433                               unsigned long start, unsigned long end,
434                               unsigned long floor, unsigned long ceiling)
435 {
436         pte_t *hugepte = hugepd_page(*hpdp);
437         int i;
438
439         unsigned long pdmask = ~((1UL << pdshift) - 1);
440         unsigned int num_hugepd = 1;
441         unsigned int shift = hugepd_shift(*hpdp);
442
443         /* Note: On fsl the hpdp may be the first of several */
444         if (shift > pdshift)
445                 num_hugepd = 1 << (shift - pdshift);
446
447         start &= pdmask;
448         if (start < floor)
449                 return;
450         if (ceiling) {
451                 ceiling &= pdmask;
452                 if (! ceiling)
453                         return;
454         }
455         if (end - 1 > ceiling - 1)
456                 return;
457
458         for (i = 0; i < num_hugepd; i++, hpdp++)
459                 *hpdp = __hugepd(0);
460
461         if (shift >= pdshift)
462                 hugepd_free(tlb, hugepte);
463         else
464                 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
465 }
466
467 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
468                                    unsigned long addr, unsigned long end,
469                                    unsigned long floor, unsigned long ceiling)
470 {
471         pmd_t *pmd;
472         unsigned long next;
473         unsigned long start;
474
475         start = addr;
476         do {
477                 unsigned long more;
478
479                 pmd = pmd_offset(pud, addr);
480                 next = pmd_addr_end(addr, end);
481                 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
482                         /*
483                          * if it is not hugepd pointer, we should already find
484                          * it cleared.
485                          */
486                         WARN_ON(!pmd_none_or_clear_bad(pmd));
487                         continue;
488                 }
489                 /*
490                  * Increment next by the size of the huge mapping since
491                  * there may be more than one entry at this level for a
492                  * single hugepage, but all of them point to
493                  * the same kmem cache that holds the hugepte.
494                  */
495                 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
496                 if (more > next)
497                         next = more;
498
499                 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
500                                   addr, next, floor, ceiling);
501         } while (addr = next, addr != end);
502
503         start &= PUD_MASK;
504         if (start < floor)
505                 return;
506         if (ceiling) {
507                 ceiling &= PUD_MASK;
508                 if (!ceiling)
509                         return;
510         }
511         if (end - 1 > ceiling - 1)
512                 return;
513
514         pmd = pmd_offset(pud, start);
515         pud_clear(pud);
516         pmd_free_tlb(tlb, pmd, start);
517         mm_dec_nr_pmds(tlb->mm);
518 }
519
520 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
521                                    unsigned long addr, unsigned long end,
522                                    unsigned long floor, unsigned long ceiling)
523 {
524         pud_t *pud;
525         unsigned long next;
526         unsigned long start;
527
528         start = addr;
529         do {
530                 pud = pud_offset(pgd, addr);
531                 next = pud_addr_end(addr, end);
532                 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
533                         if (pud_none_or_clear_bad(pud))
534                                 continue;
535                         hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
536                                                ceiling);
537                 } else {
538                         unsigned long more;
539                         /*
540                          * Increment next by the size of the huge mapping since
541                          * there may be more than one entry at this level for a
542                          * single hugepage, but all of them point to
543                          * the same kmem cache that holds the hugepte.
544                          */
545                         more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
546                         if (more > next)
547                                 next = more;
548
549                         free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
550                                           addr, next, floor, ceiling);
551                 }
552         } while (addr = next, addr != end);
553
554         start &= PGDIR_MASK;
555         if (start < floor)
556                 return;
557         if (ceiling) {
558                 ceiling &= PGDIR_MASK;
559                 if (!ceiling)
560                         return;
561         }
562         if (end - 1 > ceiling - 1)
563                 return;
564
565         pud = pud_offset(pgd, start);
566         pgd_clear(pgd);
567         pud_free_tlb(tlb, pud, start);
568 }
569
570 /*
571  * This function frees user-level page tables of a process.
572  */
573 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
574                             unsigned long addr, unsigned long end,
575                             unsigned long floor, unsigned long ceiling)
576 {
577         pgd_t *pgd;
578         unsigned long next;
579
580         /*
581          * Because there are a number of different possible pagetable
582          * layouts for hugepage ranges, we limit knowledge of how
583          * things should be laid out to the allocation path
584          * (huge_pte_alloc(), above).  Everything else works out the
585          * structure as it goes from information in the hugepd
586          * pointers.  That means that we can't here use the
587          * optimization used in the normal page free_pgd_range(), of
588          * checking whether we're actually covering a large enough
589          * range to have to do anything at the top level of the walk
590          * instead of at the bottom.
591          *
592          * To make sense of this, you should probably go read the big
593          * block comment at the top of the normal free_pgd_range(),
594          * too.
595          */
596
597         do {
598                 next = pgd_addr_end(addr, end);
599                 pgd = pgd_offset(tlb->mm, addr);
600                 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
601                         if (pgd_none_or_clear_bad(pgd))
602                                 continue;
603                         hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
604                 } else {
605                         unsigned long more;
606                         /*
607                          * Increment next by the size of the huge mapping since
608                          * there may be more than one entry at the pgd level
609                          * for a single hugepage, but all of them point to the
610                          * same kmem cache that holds the hugepte.
611                          */
612                         more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
613                         if (more > next)
614                                 next = more;
615
616                         free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
617                                           addr, next, floor, ceiling);
618                 }
619         } while (addr = next, addr != end);
620 }
621
622 struct page *follow_huge_pd(struct vm_area_struct *vma,
623                             unsigned long address, hugepd_t hpd,
624                             int flags, int pdshift)
625 {
626         pte_t *ptep;
627         spinlock_t *ptl;
628         struct page *page = NULL;
629         unsigned long mask;
630         int shift = hugepd_shift(hpd);
631         struct mm_struct *mm = vma->vm_mm;
632
633 retry:
634         ptl = &mm->page_table_lock;
635         spin_lock(ptl);
636
637         ptep = hugepte_offset(hpd, address, pdshift);
638         if (pte_present(*ptep)) {
639                 mask = (1UL << shift) - 1;
640                 page = pte_page(*ptep);
641                 page += ((address & mask) >> PAGE_SHIFT);
642                 if (flags & FOLL_GET)
643                         get_page(page);
644         } else {
645                 if (is_hugetlb_entry_migration(*ptep)) {
646                         spin_unlock(ptl);
647                         __migration_entry_wait(mm, ptep, ptl);
648                         goto retry;
649                 }
650         }
651         spin_unlock(ptl);
652         return page;
653 }
654
655 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
656                                       unsigned long sz)
657 {
658         unsigned long __boundary = (addr + sz) & ~(sz-1);
659         return (__boundary - 1 < end - 1) ? __boundary : end;
660 }
661
662 int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
663                 unsigned long end, int write, struct page **pages, int *nr)
664 {
665         pte_t *ptep;
666         unsigned long sz = 1UL << hugepd_shift(hugepd);
667         unsigned long next;
668
669         ptep = hugepte_offset(hugepd, addr, pdshift);
670         do {
671                 next = hugepte_addr_end(addr, end, sz);
672                 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
673                         return 0;
674         } while (ptep++, addr = next, addr != end);
675
676         return 1;
677 }
678
679 #ifdef CONFIG_PPC_MM_SLICES
680 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
681                                         unsigned long len, unsigned long pgoff,
682                                         unsigned long flags)
683 {
684         struct hstate *hstate = hstate_file(file);
685         int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
686
687         if (radix_enabled())
688                 return radix__hugetlb_get_unmapped_area(file, addr, len,
689                                                        pgoff, flags);
690         return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
691 }
692 #endif
693
694 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
695 {
696 #ifdef CONFIG_PPC_MM_SLICES
697         unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
698         /* With radix we don't use slice, so derive it from vma*/
699         if (!radix_enabled())
700                 return 1UL << mmu_psize_to_shift(psize);
701 #endif
702         if (!is_vm_hugetlb_page(vma))
703                 return PAGE_SIZE;
704
705         return huge_page_size(hstate_vma(vma));
706 }
707
708 static inline bool is_power_of_4(unsigned long x)
709 {
710         if (is_power_of_2(x))
711                 return (__ilog2(x) % 2) ? false : true;
712         return false;
713 }
714
715 static int __init add_huge_page_size(unsigned long long size)
716 {
717         int shift = __ffs(size);
718         int mmu_psize;
719
720         /* Check that it is a page size supported by the hardware and
721          * that it fits within pagetable and slice limits. */
722         if (size <= PAGE_SIZE)
723                 return -EINVAL;
724 #if defined(CONFIG_PPC_FSL_BOOK3E)
725         if (!is_power_of_4(size))
726                 return -EINVAL;
727 #elif !defined(CONFIG_PPC_8xx)
728         if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
729                 return -EINVAL;
730 #endif
731
732         if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
733                 return -EINVAL;
734
735 #ifdef CONFIG_PPC_BOOK3S_64
736         /*
737          * We need to make sure that for different page sizes reported by
738          * firmware we only add hugetlb support for page sizes that can be
739          * supported by linux page table layout.
740          * For now we have
741          * Radix: 2M
742          * Hash: 16M and 16G
743          */
744         if (radix_enabled()) {
745                 if (mmu_psize != MMU_PAGE_2M)
746                         return -EINVAL;
747         } else {
748                 if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
749                         return -EINVAL;
750         }
751 #endif
752
753         BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
754
755         /* Return if huge page size has already been setup */
756         if (size_to_hstate(size))
757                 return 0;
758
759         hugetlb_add_hstate(shift - PAGE_SHIFT);
760
761         return 0;
762 }
763
764 static int __init hugepage_setup_sz(char *str)
765 {
766         unsigned long long size;
767
768         size = memparse(str, &str);
769
770         if (add_huge_page_size(size) != 0) {
771                 hugetlb_bad_size();
772                 pr_err("Invalid huge page size specified(%llu)\n", size);
773         }
774
775         return 1;
776 }
777 __setup("hugepagesz=", hugepage_setup_sz);
778
779 struct kmem_cache *hugepte_cache;
780 static int __init hugetlbpage_init(void)
781 {
782         int psize;
783
784 #if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
785         if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
786                 return -ENODEV;
787 #endif
788         for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
789                 unsigned shift;
790                 unsigned pdshift;
791
792                 if (!mmu_psize_defs[psize].shift)
793                         continue;
794
795                 shift = mmu_psize_to_shift(psize);
796
797                 if (add_huge_page_size(1ULL << shift) < 0)
798                         continue;
799
800                 if (shift < HUGEPD_PUD_SHIFT)
801                         pdshift = PMD_SHIFT;
802                 else if (shift < HUGEPD_PGD_SHIFT)
803                         pdshift = PUD_SHIFT;
804                 else
805                         pdshift = PGDIR_SHIFT;
806                 /*
807                  * if we have pdshift and shift value same, we don't
808                  * use pgt cache for hugepd.
809                  */
810                 if (pdshift > shift)
811                         pgtable_cache_add(pdshift - shift, NULL);
812 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
813                 else if (!hugepte_cache) {
814                         /*
815                          * Create a kmem cache for hugeptes.  The bottom bits in
816                          * the pte have size information encoded in them, so
817                          * align them to allow this
818                          */
819                         hugepte_cache = kmem_cache_create("hugepte-cache",
820                                                           sizeof(pte_t),
821                                                           HUGEPD_SHIFT_MASK + 1,
822                                                           0, NULL);
823                         if (hugepte_cache == NULL)
824                                 panic("%s: Unable to create kmem cache "
825                                       "for hugeptes\n", __func__);
826
827                 }
828 #endif
829         }
830
831 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
832         /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
833         if (mmu_psize_defs[MMU_PAGE_4M].shift)
834                 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
835         else if (mmu_psize_defs[MMU_PAGE_512K].shift)
836                 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
837 #else
838         /* Set default large page size. Currently, we pick 16M or 1M
839          * depending on what is available
840          */
841         if (mmu_psize_defs[MMU_PAGE_16M].shift)
842                 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
843         else if (mmu_psize_defs[MMU_PAGE_1M].shift)
844                 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
845         else if (mmu_psize_defs[MMU_PAGE_2M].shift)
846                 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
847 #endif
848         return 0;
849 }
850
851 arch_initcall(hugetlbpage_init);
852
853 void flush_dcache_icache_hugepage(struct page *page)
854 {
855         int i;
856         void *start;
857
858         BUG_ON(!PageCompound(page));
859
860         for (i = 0; i < (1UL << compound_order(page)); i++) {
861                 if (!PageHighMem(page)) {
862                         __flush_dcache_icache(page_address(page+i));
863                 } else {
864                         start = kmap_atomic(page+i);
865                         __flush_dcache_icache(start);
866                         kunmap_atomic(start);
867                 }
868         }
869 }
870
871 #endif /* CONFIG_HUGETLB_PAGE */
872
873 /*
874  * We have 4 cases for pgds and pmds:
875  * (1) invalid (all zeroes)
876  * (2) pointer to next table, as normal; bottom 6 bits == 0
877  * (3) leaf pte for huge page _PAGE_PTE set
878  * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
879  *
880  * So long as we atomically load page table pointers we are safe against teardown,
881  * we can follow the address down to the the page and take a ref on it.
882  * This function need to be called with interrupts disabled. We use this variant
883  * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
884  */
885
886 pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
887                                    bool *is_thp, unsigned *shift)
888 {
889         pgd_t pgd, *pgdp;
890         pud_t pud, *pudp;
891         pmd_t pmd, *pmdp;
892         pte_t *ret_pte;
893         hugepd_t *hpdp = NULL;
894         unsigned pdshift = PGDIR_SHIFT;
895
896         if (shift)
897                 *shift = 0;
898
899         if (is_thp)
900                 *is_thp = false;
901
902         pgdp = pgdir + pgd_index(ea);
903         pgd  = READ_ONCE(*pgdp);
904         /*
905          * Always operate on the local stack value. This make sure the
906          * value don't get updated by a parallel THP split/collapse,
907          * page fault or a page unmap. The return pte_t * is still not
908          * stable. So should be checked there for above conditions.
909          */
910         if (pgd_none(pgd))
911                 return NULL;
912         else if (pgd_huge(pgd)) {
913                 ret_pte = (pte_t *) pgdp;
914                 goto out;
915         } else if (is_hugepd(__hugepd(pgd_val(pgd))))
916                 hpdp = (hugepd_t *)&pgd;
917         else {
918                 /*
919                  * Even if we end up with an unmap, the pgtable will not
920                  * be freed, because we do an rcu free and here we are
921                  * irq disabled
922                  */
923                 pdshift = PUD_SHIFT;
924                 pudp = pud_offset(&pgd, ea);
925                 pud  = READ_ONCE(*pudp);
926
927                 if (pud_none(pud))
928                         return NULL;
929                 else if (pud_huge(pud)) {
930                         ret_pte = (pte_t *) pudp;
931                         goto out;
932                 } else if (is_hugepd(__hugepd(pud_val(pud))))
933                         hpdp = (hugepd_t *)&pud;
934                 else {
935                         pdshift = PMD_SHIFT;
936                         pmdp = pmd_offset(&pud, ea);
937                         pmd  = READ_ONCE(*pmdp);
938                         /*
939                          * A hugepage collapse is captured by pmd_none, because
940                          * it mark the pmd none and do a hpte invalidate.
941                          */
942                         if (pmd_none(pmd))
943                                 return NULL;
944
945                         if (pmd_trans_huge(pmd)) {
946                                 if (is_thp)
947                                         *is_thp = true;
948                                 ret_pte = (pte_t *) pmdp;
949                                 goto out;
950                         }
951
952                         if (pmd_huge(pmd)) {
953                                 ret_pte = (pte_t *) pmdp;
954                                 goto out;
955                         } else if (is_hugepd(__hugepd(pmd_val(pmd))))
956                                 hpdp = (hugepd_t *)&pmd;
957                         else
958                                 return pte_offset_kernel(&pmd, ea);
959                 }
960         }
961         if (!hpdp)
962                 return NULL;
963
964         ret_pte = hugepte_offset(*hpdp, ea, pdshift);
965         pdshift = hugepd_shift(*hpdp);
966 out:
967         if (shift)
968                 *shift = pdshift;
969         return ret_pte;
970 }
971 EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
972
973 int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
974                 unsigned long end, int write, struct page **pages, int *nr)
975 {
976         unsigned long mask;
977         unsigned long pte_end;
978         struct page *head, *page;
979         pte_t pte;
980         int refs;
981
982         pte_end = (addr + sz) & ~(sz-1);
983         if (pte_end < end)
984                 end = pte_end;
985
986         pte = READ_ONCE(*ptep);
987         mask = _PAGE_PRESENT | _PAGE_READ;
988
989         /*
990          * On some CPUs like the 8xx, _PAGE_RW hence _PAGE_WRITE is defined
991          * as 0 and _PAGE_RO has to be set when a page is not writable
992          */
993         if (write)
994                 mask |= _PAGE_WRITE;
995         else
996                 mask |= _PAGE_RO;
997
998         if ((pte_val(pte) & mask) != mask)
999                 return 0;
1000
1001         /* hugepages are never "special" */
1002         VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1003
1004         refs = 0;
1005         head = pte_page(pte);
1006
1007         page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
1008         do {
1009                 VM_BUG_ON(compound_head(page) != head);
1010                 pages[*nr] = page;
1011                 (*nr)++;
1012                 page++;
1013                 refs++;
1014         } while (addr += PAGE_SIZE, addr != end);
1015
1016         if (!page_cache_add_speculative(head, refs)) {
1017                 *nr -= refs;
1018                 return 0;
1019         }
1020
1021         if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1022                 /* Could be optimized better */
1023                 *nr -= refs;
1024                 while (refs--)
1025                         put_page(head);
1026                 return 0;
1027         }
1028
1029         return 1;
1030 }