powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / mm / hugetlbpage.c
CommitLineData
1da177e4 1/*
41151e77 2 * PPC Huge TLB Page Support for Kernel.
1da177e4
LT
3 *
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
41151e77 5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
1da177e4
LT
6 *
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 */
10
1da177e4 11#include <linux/mm.h>
883a3e52 12#include <linux/io.h>
5a0e3ad6 13#include <linux/slab.h>
1da177e4 14#include <linux/hugetlb.h>
342d3db7 15#include <linux/export.h>
41151e77
BB
16#include <linux/of_fdt.h>
17#include <linux/memblock.h>
13020be8 18#include <linux/moduleparam.h>
50791e6d
AK
19#include <linux/swap.h>
20#include <linux/swapops.h>
803d690e 21#include <linux/kmemleak.h>
883a3e52 22#include <asm/pgtable.h>
1da177e4
LT
23#include <asm/pgalloc.h>
24#include <asm/tlb.h>
41151e77 25#include <asm/setup.h>
29409997 26#include <asm/hugetlb.h>
94171b19
AK
27#include <asm/pte-walk.h>
28
29409997
AK
29
30#ifdef CONFIG_HUGETLB_PAGE
1da177e4 31
91224346 32#define PAGE_SHIFT_64K 16
4b914286
CL
33#define PAGE_SHIFT_512K 19
34#define PAGE_SHIFT_8M 23
91224346
JT
35#define PAGE_SHIFT_16M 24
36#define PAGE_SHIFT_16G 34
4ec161cf 37
85975387
HB
38bool hugetlb_disabled = false;
39
41151e77 40unsigned int HPAGE_SHIFT;
7a849a6c 41EXPORT_SYMBOL(HPAGE_SHIFT);
ec4b2c0c 42
20717e1f 43#define hugepd_none(hpd) (hpd_val(hpd) == 0)
a4fe3ce7 44
03566562
CL
45#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))
46
7868a208 47pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
a4fe3ce7 48{
94171b19
AK
49 /*
50 * Only called for hugetlbfs pages, hence can ignore THP and the
51 * irq disabled walk.
52 */
53 return __find_linux_pte(mm->pgd, addr, NULL, NULL);
a4fe3ce7
DG
54}
55
f10a04c0 56static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
ed515b68
AK
57 unsigned long address, unsigned int pdshift,
58 unsigned int pshift, spinlock_t *ptl)
f10a04c0 59{
41151e77
BB
60 struct kmem_cache *cachep;
61 pte_t *new;
41151e77 62 int i;
03bb2d65
CL
63 int num_hugepd;
64
65 if (pshift >= pdshift) {
03566562 66 cachep = PGT_CACHE(PTE_T_ORDER);
03bb2d65 67 num_hugepd = 1 << (pshift - pdshift);
3fb69c6a
CL
68 } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
69 cachep = PGT_CACHE(PTE_INDEX_SIZE);
70 num_hugepd = 1;
03bb2d65
CL
71 } else {
72 cachep = PGT_CACHE(pdshift - pshift);
73 num_hugepd = 1;
74 }
41151e77 75
1e03c7e2 76 new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
f10a04c0 77
a4fe3ce7
DG
78 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
79 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
80
f10a04c0
DG
81 if (! new)
82 return -ENOMEM;
83
0eab46be
SB
84 /*
85 * Make sure other cpus find the hugepd set only after a
86 * properly initialized page table is visible to them.
87 * For more details look for comment in __pte_alloc().
88 */
89 smp_wmb();
90
ed515b68 91 spin_lock(ptl);
41151e77
BB
92 /*
93 * We have multiple higher-level entries that point to the same
94 * actual pte location. Fill in each as we go and backtrack on error.
95 * We need all of these so the DTLB pgtable walk code can find the
96 * right higher-level entry without knowing if it's a hugepage or not.
97 */
98 for (i = 0; i < num_hugepd; i++, hpdp++) {
99 if (unlikely(!hugepd_none(*hpdp)))
100 break;
20717e1f 101 else {
03bb2d65 102#ifdef CONFIG_PPC_BOOK3S_64
f1981b5b 103 *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS |
20717e1f 104 (shift_to_mmu_psize(pshift) << 2));
4b914286 105#elif defined(CONFIG_PPC_8xx)
de0f9387 106 *hpdp = __hugepd(__pa(new) | _PMD_USER |
20717e1f
AK
107 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
108 _PMD_PAGE_512K) | _PMD_PRESENT);
03bb2d65 109#else
cf9427b8 110 /* We use the old format for PPC_FSL_BOOK3E */
20717e1f 111 *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
03bb2d65 112#endif
20717e1f 113 }
41151e77
BB
114 }
115 /* If we bailed from the for loop early, an error occurred, clean up */
116 if (i < num_hugepd) {
117 for (i = i - 1 ; i >= 0; i--, hpdp--)
20717e1f 118 *hpdp = __hugepd(0);
41151e77 119 kmem_cache_free(cachep, new);
803d690e
CL
120 } else {
121 kmemleak_ignore(new);
41151e77 122 }
ed515b68 123 spin_unlock(ptl);
f10a04c0
DG
124 return 0;
125}
126
e2b3d202
AK
127/*
128 * At this point we do the placement change only for BOOK3S 64. This would
129 * possibly work on other subarchs.
130 */
131pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
132{
133 pgd_t *pg;
134 pud_t *pu;
135 pmd_t *pm;
136 hugepd_t *hpdp = NULL;
137 unsigned pshift = __ffs(sz);
138 unsigned pdshift = PGDIR_SHIFT;
ed515b68 139 spinlock_t *ptl;
e2b3d202
AK
140
141 addr &= ~(sz-1);
142 pg = pgd_offset(mm, addr);
143
03bb2d65 144#ifdef CONFIG_PPC_BOOK3S_64
e2b3d202
AK
145 if (pshift == PGDIR_SHIFT)
146 /* 16GB huge page */
147 return (pte_t *) pg;
ed515b68 148 else if (pshift > PUD_SHIFT) {
e2b3d202
AK
149 /*
150 * We need to use hugepd table
151 */
ed515b68 152 ptl = &mm->page_table_lock;
e2b3d202 153 hpdp = (hugepd_t *)pg;
ed515b68 154 } else {
e2b3d202
AK
155 pdshift = PUD_SHIFT;
156 pu = pud_alloc(mm, pg, addr);
157 if (pshift == PUD_SHIFT)
158 return (pte_t *)pu;
ed515b68
AK
159 else if (pshift > PMD_SHIFT) {
160 ptl = pud_lockptr(mm, pu);
e2b3d202 161 hpdp = (hugepd_t *)pu;
ed515b68 162 } else {
e2b3d202
AK
163 pdshift = PMD_SHIFT;
164 pm = pmd_alloc(mm, pu, addr);
165 if (pshift == PMD_SHIFT)
166 /* 16MB hugepage */
167 return (pte_t *)pm;
ed515b68
AK
168 else {
169 ptl = pmd_lockptr(mm, pm);
e2b3d202 170 hpdp = (hugepd_t *)pm;
ed515b68 171 }
e2b3d202
AK
172 }
173 }
e2b3d202 174#else
fdf743c5 175 if (pshift >= PGDIR_SHIFT) {
ed515b68 176 ptl = &mm->page_table_lock;
a4fe3ce7
DG
177 hpdp = (hugepd_t *)pg;
178 } else {
179 pdshift = PUD_SHIFT;
180 pu = pud_alloc(mm, pg, addr);
fdf743c5 181 if (pshift >= PUD_SHIFT) {
ed515b68 182 ptl = pud_lockptr(mm, pu);
a4fe3ce7
DG
183 hpdp = (hugepd_t *)pu;
184 } else {
185 pdshift = PMD_SHIFT;
186 pm = pmd_alloc(mm, pu, addr);
ed515b68 187 ptl = pmd_lockptr(mm, pm);
a4fe3ce7
DG
188 hpdp = (hugepd_t *)pm;
189 }
190 }
03bb2d65 191#endif
a4fe3ce7
DG
192 if (!hpdp)
193 return NULL;
194
195 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
196
ed515b68
AK
197 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
198 pdshift, pshift, ptl))
a4fe3ce7
DG
199 return NULL;
200
b30e7590 201 return hugepte_offset(*hpdp, addr, pdshift);
4ec161cf 202}
4ec161cf 203
79cc38de 204#ifdef CONFIG_PPC_BOOK3S_64
41151e77 205/*
79cc38de
AK
206 * Tracks gpages after the device tree is scanned and before the
207 * huge_boot_pages list is ready on pseries.
41151e77 208 */
79cc38de
AK
209#define MAX_NUMBER_GPAGES 1024
210__initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
211__initdata static unsigned nr_gpages;
41151e77
BB
212
213/*
79cc38de 214 * Build list of addresses of gigantic pages. This function is used in early
14ed7409 215 * boot before the buddy allocator is setup.
41151e77 216 */
79cc38de 217void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
658013e9
JT
218{
219 if (!addr)
220 return;
221 while (number_of_pages > 0) {
222 gpage_freearray[nr_gpages] = addr;
223 nr_gpages++;
224 number_of_pages--;
225 addr += page_size;
226 }
227}
228
79cc38de 229int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
ec4b2c0c
JT
230{
231 struct huge_bootmem_page *m;
232 if (nr_gpages == 0)
233 return 0;
234 m = phys_to_virt(gpage_freearray[--nr_gpages]);
235 gpage_freearray[nr_gpages] = 0;
236 list_add(&m->list, &huge_boot_pages);
0d9ea754 237 m->hstate = hstate;
ec4b2c0c
JT
238 return 1;
239}
41151e77 240#endif
ec4b2c0c 241
79cc38de
AK
242
243int __init alloc_bootmem_huge_page(struct hstate *h)
244{
245
246#ifdef CONFIG_PPC_BOOK3S_64
247 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
248 return pseries_alloc_bootmem_huge_page(h);
249#endif
250 return __alloc_bootmem_huge_page(h);
251}
252
4b914286 253#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
41151e77
BB
254#define HUGEPD_FREELIST_SIZE \
255 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
256
257struct hugepd_freelist {
258 struct rcu_head rcu;
259 unsigned int index;
260 void *ptes[0];
261};
262
263static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
264
265static void hugepd_free_rcu_callback(struct rcu_head *head)
266{
267 struct hugepd_freelist *batch =
268 container_of(head, struct hugepd_freelist, rcu);
269 unsigned int i;
270
271 for (i = 0; i < batch->index; i++)
03566562 272 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
41151e77
BB
273
274 free_page((unsigned long)batch);
275}
276
277static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
278{
279 struct hugepd_freelist **batchp;
280
08a5bb29 281 batchp = &get_cpu_var(hugepd_freelist_cur);
41151e77
BB
282
283 if (atomic_read(&tlb->mm->mm_users) < 2 ||
b426e4bd 284 mm_is_thread_local(tlb->mm)) {
03566562 285 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
08a5bb29 286 put_cpu_var(hugepd_freelist_cur);
41151e77
BB
287 return;
288 }
289
290 if (*batchp == NULL) {
291 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
292 (*batchp)->index = 0;
293 }
294
295 (*batchp)->ptes[(*batchp)->index++] = hugepte;
296 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
04229110 297 call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
41151e77
BB
298 *batchp = NULL;
299 }
94b09d75 300 put_cpu_var(hugepd_freelist_cur);
41151e77 301}
03bb2d65
CL
302#else
303static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
41151e77
BB
304#endif
305
a4fe3ce7
DG
306static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
307 unsigned long start, unsigned long end,
308 unsigned long floor, unsigned long ceiling)
f10a04c0
DG
309{
310 pte_t *hugepte = hugepd_page(*hpdp);
41151e77
BB
311 int i;
312
a4fe3ce7 313 unsigned long pdmask = ~((1UL << pdshift) - 1);
41151e77 314 unsigned int num_hugepd = 1;
03bb2d65 315 unsigned int shift = hugepd_shift(*hpdp);
41151e77 316
881fde1d 317 /* Note: On fsl the hpdp may be the first of several */
03bb2d65
CL
318 if (shift > pdshift)
319 num_hugepd = 1 << (shift - pdshift);
a4fe3ce7
DG
320
321 start &= pdmask;
322 if (start < floor)
323 return;
324 if (ceiling) {
325 ceiling &= pdmask;
326 if (! ceiling)
327 return;
328 }
329 if (end - 1 > ceiling - 1)
330 return;
f10a04c0 331
41151e77 332 for (i = 0; i < num_hugepd; i++, hpdp++)
20717e1f 333 *hpdp = __hugepd(0);
41151e77 334
03bb2d65
CL
335 if (shift >= pdshift)
336 hugepd_free(tlb, hugepte);
3fb69c6a
CL
337 else if (IS_ENABLED(CONFIG_PPC_8xx))
338 pgtable_free_tlb(tlb, hugepte,
339 get_hugepd_cache_index(PTE_INDEX_SIZE));
03bb2d65 340 else
fadd03c6
AK
341 pgtable_free_tlb(tlb, hugepte,
342 get_hugepd_cache_index(pdshift - shift));
f10a04c0
DG
343}
344
f10a04c0
DG
345static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
346 unsigned long addr, unsigned long end,
a4fe3ce7 347 unsigned long floor, unsigned long ceiling)
f10a04c0
DG
348{
349 pmd_t *pmd;
350 unsigned long next;
351 unsigned long start;
352
353 start = addr;
f10a04c0 354 do {
03bb2d65
CL
355 unsigned long more;
356
a1cd5419 357 pmd = pmd_offset(pud, addr);
f10a04c0 358 next = pmd_addr_end(addr, end);
b30e7590 359 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
8bbd9f04
AK
360 /*
361 * if it is not hugepd pointer, we should already find
362 * it cleared.
363 */
364 WARN_ON(!pmd_none_or_clear_bad(pmd));
f10a04c0 365 continue;
8bbd9f04 366 }
a1cd5419
BB
367 /*
368 * Increment next by the size of the huge mapping since
369 * there may be more than one entry at this level for a
370 * single hugepage, but all of them point to
371 * the same kmem cache that holds the hugepte.
372 */
03bb2d65
CL
373 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
374 if (more > next)
375 next = more;
376
a4fe3ce7
DG
377 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
378 addr, next, floor, ceiling);
a1cd5419 379 } while (addr = next, addr != end);
f10a04c0
DG
380
381 start &= PUD_MASK;
382 if (start < floor)
383 return;
384 if (ceiling) {
385 ceiling &= PUD_MASK;
386 if (!ceiling)
387 return;
1da177e4 388 }
f10a04c0
DG
389 if (end - 1 > ceiling - 1)
390 return;
1da177e4 391
f10a04c0
DG
392 pmd = pmd_offset(pud, start);
393 pud_clear(pud);
9e1b32ca 394 pmd_free_tlb(tlb, pmd, start);
50c6a665 395 mm_dec_nr_pmds(tlb->mm);
f10a04c0 396}
f10a04c0
DG
397
398static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
399 unsigned long addr, unsigned long end,
400 unsigned long floor, unsigned long ceiling)
401{
402 pud_t *pud;
403 unsigned long next;
404 unsigned long start;
405
406 start = addr;
f10a04c0 407 do {
a1cd5419 408 pud = pud_offset(pgd, addr);
f10a04c0 409 next = pud_addr_end(addr, end);
b30e7590 410 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
4ec161cf
JT
411 if (pud_none_or_clear_bad(pud))
412 continue;
0d9ea754 413 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
a4fe3ce7 414 ceiling);
4ec161cf 415 } else {
03bb2d65 416 unsigned long more;
a1cd5419
BB
417 /*
418 * Increment next by the size of the huge mapping since
419 * there may be more than one entry at this level for a
420 * single hugepage, but all of them point to
421 * the same kmem cache that holds the hugepte.
422 */
03bb2d65
CL
423 more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
424 if (more > next)
425 next = more;
426
a4fe3ce7
DG
427 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
428 addr, next, floor, ceiling);
4ec161cf 429 }
a1cd5419 430 } while (addr = next, addr != end);
f10a04c0
DG
431
432 start &= PGDIR_MASK;
433 if (start < floor)
434 return;
435 if (ceiling) {
436 ceiling &= PGDIR_MASK;
437 if (!ceiling)
438 return;
439 }
440 if (end - 1 > ceiling - 1)
441 return;
442
443 pud = pud_offset(pgd, start);
444 pgd_clear(pgd);
9e1b32ca 445 pud_free_tlb(tlb, pud, start);
b4e98d9a 446 mm_dec_nr_puds(tlb->mm);
f10a04c0
DG
447}
448
449/*
450 * This function frees user-level page tables of a process.
f10a04c0 451 */
42b77728 452void hugetlb_free_pgd_range(struct mmu_gather *tlb,
f10a04c0
DG
453 unsigned long addr, unsigned long end,
454 unsigned long floor, unsigned long ceiling)
455{
456 pgd_t *pgd;
457 unsigned long next;
f10a04c0
DG
458
459 /*
a4fe3ce7
DG
460 * Because there are a number of different possible pagetable
461 * layouts for hugepage ranges, we limit knowledge of how
462 * things should be laid out to the allocation path
463 * (huge_pte_alloc(), above). Everything else works out the
464 * structure as it goes from information in the hugepd
465 * pointers. That means that we can't here use the
466 * optimization used in the normal page free_pgd_range(), of
467 * checking whether we're actually covering a large enough
468 * range to have to do anything at the top level of the walk
469 * instead of at the bottom.
f10a04c0 470 *
a4fe3ce7
DG
471 * To make sense of this, you should probably go read the big
472 * block comment at the top of the normal free_pgd_range(),
473 * too.
f10a04c0 474 */
f10a04c0 475
f10a04c0 476 do {
f10a04c0 477 next = pgd_addr_end(addr, end);
41151e77 478 pgd = pgd_offset(tlb->mm, addr);
b30e7590 479 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
0b26425c
DG
480 if (pgd_none_or_clear_bad(pgd))
481 continue;
482 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
483 } else {
03bb2d65 484 unsigned long more;
41151e77
BB
485 /*
486 * Increment next by the size of the huge mapping since
881fde1d
BB
487 * there may be more than one entry at the pgd level
488 * for a single hugepage, but all of them point to the
489 * same kmem cache that holds the hugepte.
41151e77 490 */
03bb2d65
CL
491 more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
492 if (more > next)
493 next = more;
494
a4fe3ce7
DG
495 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
496 addr, next, floor, ceiling);
0b26425c 497 }
41151e77 498 } while (addr = next, addr != end);
1da177e4
LT
499}
500
50791e6d
AK
501struct page *follow_huge_pd(struct vm_area_struct *vma,
502 unsigned long address, hugepd_t hpd,
503 int flags, int pdshift)
504{
505 pte_t *ptep;
506 spinlock_t *ptl;
507 struct page *page = NULL;
508 unsigned long mask;
509 int shift = hugepd_shift(hpd);
510 struct mm_struct *mm = vma->vm_mm;
511
512retry:
ed515b68
AK
513 /*
514 * hugepage directory entries are protected by mm->page_table_lock
515 * Use this instead of huge_pte_lockptr
516 */
50791e6d
AK
517 ptl = &mm->page_table_lock;
518 spin_lock(ptl);
519
520 ptep = hugepte_offset(hpd, address, pdshift);
521 if (pte_present(*ptep)) {
522 mask = (1UL << shift) - 1;
523 page = pte_page(*ptep);
524 page += ((address & mask) >> PAGE_SHIFT);
525 if (flags & FOLL_GET)
526 get_page(page);
527 } else {
528 if (is_hugetlb_entry_migration(*ptep)) {
529 spin_unlock(ptl);
530 __migration_entry_wait(mm, ptep, ptl);
531 goto retry;
532 }
533 }
534 spin_unlock(ptl);
535 return page;
536}
537
39adfa54
DG
538static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
539 unsigned long sz)
540{
541 unsigned long __boundary = (addr + sz) & ~(sz-1);
542 return (__boundary - 1 < end - 1) ? __boundary : end;
543}
544
b30e7590
AK
545int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
546 unsigned long end, int write, struct page **pages, int *nr)
a4fe3ce7
DG
547{
548 pte_t *ptep;
b30e7590 549 unsigned long sz = 1UL << hugepd_shift(hugepd);
39adfa54 550 unsigned long next;
a4fe3ce7
DG
551
552 ptep = hugepte_offset(hugepd, addr, pdshift);
553 do {
39adfa54 554 next = hugepte_addr_end(addr, end, sz);
a4fe3ce7
DG
555 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
556 return 0;
39adfa54 557 } while (ptep++, addr = next, addr != end);
a4fe3ce7
DG
558
559 return 1;
560}
1da177e4 561
76512959 562#ifdef CONFIG_PPC_MM_SLICES
1da177e4
LT
563unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
564 unsigned long len, unsigned long pgoff,
565 unsigned long flags)
566{
0d9ea754
JT
567 struct hstate *hstate = hstate_file(file);
568 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
48f797de 569
aa0ab02b 570#ifdef CONFIG_PPC_RADIX_MMU
48483760
AK
571 if (radix_enabled())
572 return radix__hugetlb_get_unmapped_area(file, addr, len,
573 pgoff, flags);
aa0ab02b 574#endif
34d07177 575 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
1da177e4 576}
76512959 577#endif
1da177e4 578
3340289d
MG
579unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
580{
25c29f9e 581#ifdef CONFIG_PPC_MM_SLICES
2f5f0dfd 582 /* With radix we don't use slice, so derive it from vma*/
014a32b3
NP
583 if (!radix_enabled()) {
584 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
585
2f5f0dfd 586 return 1UL << mmu_psize_to_shift(psize);
014a32b3 587 }
2f5f0dfd 588#endif
09135cc5 589 return vma_kernel_pagesize(vma);
41151e77
BB
590}
591
592static inline bool is_power_of_4(unsigned long x)
593{
594 if (is_power_of_2(x))
595 return (__ilog2(x) % 2) ? false : true;
596 return false;
3340289d
MG
597}
598
d1837cba 599static int __init add_huge_page_size(unsigned long long size)
4ec161cf 600{
d1837cba
DG
601 int shift = __ffs(size);
602 int mmu_psize;
a4fe3ce7 603
4ec161cf 604 /* Check that it is a page size supported by the hardware and
d1837cba 605 * that it fits within pagetable and slice limits. */
03bb2d65
CL
606 if (size <= PAGE_SIZE)
607 return -EINVAL;
4b914286 608#if defined(CONFIG_PPC_FSL_BOOK3E)
03bb2d65 609 if (!is_power_of_4(size))
41151e77 610 return -EINVAL;
4b914286 611#elif !defined(CONFIG_PPC_8xx)
03bb2d65 612 if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
d1837cba 613 return -EINVAL;
41151e77 614#endif
91224346 615
d1837cba
DG
616 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
617 return -EINVAL;
618
a525108c
AK
619#ifdef CONFIG_PPC_BOOK3S_64
620 /*
621 * We need to make sure that for different page sizes reported by
622 * firmware we only add hugetlb support for page sizes that can be
623 * supported by linux page table layout.
624 * For now we have
2bf1071a 625 * Radix: 2M and 1G
a525108c
AK
626 * Hash: 16M and 16G
627 */
628 if (radix_enabled()) {
2bf1071a
NP
629 if (mmu_psize != MMU_PAGE_2M && mmu_psize != MMU_PAGE_1G)
630 return -EINVAL;
a525108c
AK
631 } else {
632 if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
633 return -EINVAL;
634 }
635#endif
636
d1837cba
DG
637 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
638
639 /* Return if huge page size has already been setup */
640 if (size_to_hstate(size))
641 return 0;
642
643 hugetlb_add_hstate(shift - PAGE_SHIFT);
644
645 return 0;
4ec161cf
JT
646}
647
648static int __init hugepage_setup_sz(char *str)
649{
650 unsigned long long size;
4ec161cf
JT
651
652 size = memparse(str, &str);
653
71bf79cc
VT
654 if (add_huge_page_size(size) != 0) {
655 hugetlb_bad_size();
656 pr_err("Invalid huge page size specified(%llu)\n", size);
657 }
4ec161cf
JT
658
659 return 1;
660}
661__setup("hugepagesz=", hugepage_setup_sz);
662
41151e77
BB
663static int __init hugetlbpage_init(void)
664{
665 int psize;
666
85975387
HB
667 if (hugetlb_disabled) {
668 pr_info("HugeTLB support is disabled!\n");
669 return 0;
670 }
671
4b914286 672#if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
48483760 673 if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
f10a04c0 674 return -ENODEV;
03bb2d65 675#endif
d1837cba
DG
676 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
677 unsigned shift;
678 unsigned pdshift;
0d9ea754 679
d1837cba
DG
680 if (!mmu_psize_defs[psize].shift)
681 continue;
00df438e 682
d1837cba
DG
683 shift = mmu_psize_to_shift(psize);
684
6fa50483
AK
685#ifdef CONFIG_PPC_BOOK3S_64
686 if (shift > PGDIR_SHIFT)
d1837cba 687 continue;
6fa50483
AK
688 else if (shift > PUD_SHIFT)
689 pdshift = PGDIR_SHIFT;
690 else if (shift > PMD_SHIFT)
691 pdshift = PUD_SHIFT;
692 else
693 pdshift = PMD_SHIFT;
694#else
fdf743c5 695 if (shift < PUD_SHIFT)
d1837cba 696 pdshift = PMD_SHIFT;
fdf743c5 697 else if (shift < PGDIR_SHIFT)
d1837cba
DG
698 pdshift = PUD_SHIFT;
699 else
700 pdshift = PGDIR_SHIFT;
6fa50483
AK
701#endif
702
703 if (add_huge_page_size(1ULL << shift) < 0)
704 continue;
e2b3d202
AK
705 /*
706 * if we have pdshift and shift value same, we don't
707 * use pgt cache for hugepd.
708 */
3fb69c6a
CL
709 if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
710 pgtable_cache_add(PTE_INDEX_SIZE);
711 else if (pdshift > shift)
1e03c7e2 712 pgtable_cache_add(pdshift - shift);
4b914286 713#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
03566562 714 else
1e03c7e2 715 pgtable_cache_add(PTE_T_ORDER);
03bb2d65 716#endif
0d9ea754 717 }
f10a04c0 718
4b914286
CL
719#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
720 /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
03bb2d65
CL
721 if (mmu_psize_defs[MMU_PAGE_4M].shift)
722 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
4b914286
CL
723 else if (mmu_psize_defs[MMU_PAGE_512K].shift)
724 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
03bb2d65 725#else
d1837cba
DG
726 /* Set default large page size. Currently, we pick 16M or 1M
727 * depending on what is available
728 */
729 if (mmu_psize_defs[MMU_PAGE_16M].shift)
730 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
731 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
732 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
48483760
AK
733 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
734 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
03bb2d65 735#endif
f10a04c0
DG
736 return 0;
737}
03bb2d65 738
6f114281 739arch_initcall(hugetlbpage_init);
0895ecda
DG
740
741void flush_dcache_icache_hugepage(struct page *page)
742{
743 int i;
41151e77 744 void *start;
0895ecda
DG
745
746 BUG_ON(!PageCompound(page));
747
41151e77
BB
748 for (i = 0; i < (1UL << compound_order(page)); i++) {
749 if (!PageHighMem(page)) {
750 __flush_dcache_icache(page_address(page+i));
751 } else {
2480b208 752 start = kmap_atomic(page+i);
41151e77 753 __flush_dcache_icache(start);
2480b208 754 kunmap_atomic(start);
41151e77
BB
755 }
756 }
0895ecda 757}
29409997
AK
758
759#endif /* CONFIG_HUGETLB_PAGE */
760
761/*
762 * We have 4 cases for pgds and pmds:
763 * (1) invalid (all zeroes)
764 * (2) pointer to next table, as normal; bottom 6 bits == 0
6a119eae
AK
765 * (3) leaf pte for huge page _PAGE_PTE set
766 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
0ac52dd7
AK
767 *
768 * So long as we atomically load page table pointers we are safe against teardown,
769 * we can follow the address down to the the page and take a ref on it.
691e95fd 770 * This function need to be called with interrupts disabled. We use this variant
4e26bc4a 771 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
29409997 772 */
94171b19
AK
773pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
774 bool *is_thp, unsigned *hpage_shift)
29409997 775{
0ac52dd7
AK
776 pgd_t pgd, *pgdp;
777 pud_t pud, *pudp;
778 pmd_t pmd, *pmdp;
29409997
AK
779 pte_t *ret_pte;
780 hugepd_t *hpdp = NULL;
781 unsigned pdshift = PGDIR_SHIFT;
782
94171b19
AK
783 if (hpage_shift)
784 *hpage_shift = 0;
29409997 785
891121e6
AK
786 if (is_thp)
787 *is_thp = false;
788
0ac52dd7 789 pgdp = pgdir + pgd_index(ea);
4f9c53c8 790 pgd = READ_ONCE(*pgdp);
ac52ae47 791 /*
0ac52dd7
AK
792 * Always operate on the local stack value. This make sure the
793 * value don't get updated by a parallel THP split/collapse,
794 * page fault or a page unmap. The return pte_t * is still not
795 * stable. So should be checked there for above conditions.
ac52ae47 796 */
0ac52dd7 797 if (pgd_none(pgd))
ac52ae47 798 return NULL;
0ac52dd7
AK
799 else if (pgd_huge(pgd)) {
800 ret_pte = (pte_t *) pgdp;
29409997 801 goto out;
b30e7590 802 } else if (is_hugepd(__hugepd(pgd_val(pgd))))
0ac52dd7 803 hpdp = (hugepd_t *)&pgd;
ac52ae47 804 else {
0ac52dd7
AK
805 /*
806 * Even if we end up with an unmap, the pgtable will not
807 * be freed, because we do an rcu free and here we are
808 * irq disabled
809 */
29409997 810 pdshift = PUD_SHIFT;
0ac52dd7 811 pudp = pud_offset(&pgd, ea);
da1a288d 812 pud = READ_ONCE(*pudp);
29409997 813
0ac52dd7 814 if (pud_none(pud))
ac52ae47 815 return NULL;
0ac52dd7
AK
816 else if (pud_huge(pud)) {
817 ret_pte = (pte_t *) pudp;
29409997 818 goto out;
b30e7590 819 } else if (is_hugepd(__hugepd(pud_val(pud))))
0ac52dd7 820 hpdp = (hugepd_t *)&pud;
ac52ae47 821 else {
29409997 822 pdshift = PMD_SHIFT;
0ac52dd7 823 pmdp = pmd_offset(&pud, ea);
da1a288d 824 pmd = READ_ONCE(*pmdp);
ac52ae47
AK
825 /*
826 * A hugepage collapse is captured by pmd_none, because
827 * it mark the pmd none and do a hpte invalidate.
ac52ae47 828 */
7d6e7f7f 829 if (pmd_none(pmd))
ac52ae47 830 return NULL;
29409997 831
ebd31197 832 if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
891121e6
AK
833 if (is_thp)
834 *is_thp = true;
835 ret_pte = (pte_t *) pmdp;
836 goto out;
837 }
ae28f17b
AK
838 /*
839 * pmd_large check below will handle the swap pmd pte
840 * we need to do both the check because they are config
841 * dependent.
842 */
843 if (pmd_huge(pmd) || pmd_large(pmd)) {
0ac52dd7 844 ret_pte = (pte_t *) pmdp;
29409997 845 goto out;
b30e7590 846 } else if (is_hugepd(__hugepd(pmd_val(pmd))))
0ac52dd7 847 hpdp = (hugepd_t *)&pmd;
ac52ae47 848 else
0ac52dd7 849 return pte_offset_kernel(&pmd, ea);
29409997
AK
850 }
851 }
852 if (!hpdp)
853 return NULL;
854
b30e7590 855 ret_pte = hugepte_offset(*hpdp, ea, pdshift);
29409997
AK
856 pdshift = hugepd_shift(*hpdp);
857out:
94171b19
AK
858 if (hpage_shift)
859 *hpage_shift = pdshift;
29409997
AK
860 return ret_pte;
861}
94171b19 862EXPORT_SYMBOL_GPL(__find_linux_pte);
29409997
AK
863
864int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
865 unsigned long end, int write, struct page **pages, int *nr)
866{
29409997 867 unsigned long pte_end;
ddc58f27 868 struct page *head, *page;
29409997
AK
869 pte_t pte;
870 int refs;
871
872 pte_end = (addr + sz) & ~(sz-1);
873 if (pte_end < end)
874 end = pte_end;
875
4f9c53c8 876 pte = READ_ONCE(*ptep);
29409997 877
5fa5b16b 878 if (!pte_access_permitted(pte, write))
29409997
AK
879 return 0;
880
881 /* hugepages are never "special" */
882 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
883
884 refs = 0;
885 head = pte_page(pte);
886
887 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
29409997
AK
888 do {
889 VM_BUG_ON(compound_head(page) != head);
890 pages[*nr] = page;
891 (*nr)++;
892 page++;
893 refs++;
894 } while (addr += PAGE_SIZE, addr != end);
895
896 if (!page_cache_add_speculative(head, refs)) {
897 *nr -= refs;
898 return 0;
899 }
900
901 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
902 /* Could be optimized better */
903 *nr -= refs;
904 while (refs--)
905 put_page(head);
906 return 0;
907 }
908
29409997
AK
909 return 1;
910}