2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/moduleparam.h>
19 #include <linux/swap.h>
20 #include <linux/swapops.h>
21 #include <linux/kmemleak.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
25 #include <asm/setup.h>
26 #include <asm/hugetlb.h>
27 #include <asm/pte-walk.h>
29 bool hugetlb_disabled = false;
31 #define hugepd_none(hpd) (hpd_val(hpd) == 0)
33 #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))
35 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
38 * Only called for hugetlbfs pages, hence can ignore THP and the
41 return __find_linux_pte(mm->pgd, addr, NULL, NULL);
44 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
45 unsigned long address, unsigned int pdshift,
46 unsigned int pshift, spinlock_t *ptl)
48 struct kmem_cache *cachep;
53 if (pshift >= pdshift) {
54 cachep = PGT_CACHE(PTE_T_ORDER);
55 num_hugepd = 1 << (pshift - pdshift);
56 } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
57 cachep = PGT_CACHE(PTE_INDEX_SIZE);
60 cachep = PGT_CACHE(pdshift - pshift);
64 new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
66 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
67 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
73 * Make sure other cpus find the hugepd set only after a
74 * properly initialized page table is visible to them.
75 * For more details look for comment in __pte_alloc().
81 * We have multiple higher-level entries that point to the same
82 * actual pte location. Fill in each as we go and backtrack on error.
83 * We need all of these so the DTLB pgtable walk code can find the
84 * right higher-level entry without knowing if it's a hugepage or not.
86 for (i = 0; i < num_hugepd; i++, hpdp++) {
87 if (unlikely(!hugepd_none(*hpdp)))
89 hugepd_populate(hpdp, new, pshift);
91 /* If we bailed from the for loop early, an error occurred, clean up */
93 for (i = i - 1 ; i >= 0; i--, hpdp--)
95 kmem_cache_free(cachep, new);
104 * At this point we do the placement change only for BOOK3S 64. This would
105 * possibly work on other subarchs.
107 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
112 hugepd_t *hpdp = NULL;
113 unsigned pshift = __ffs(sz);
114 unsigned pdshift = PGDIR_SHIFT;
118 pg = pgd_offset(mm, addr);
120 #ifdef CONFIG_PPC_BOOK3S_64
121 if (pshift == PGDIR_SHIFT)
124 else if (pshift > PUD_SHIFT) {
126 * We need to use hugepd table
128 ptl = &mm->page_table_lock;
129 hpdp = (hugepd_t *)pg;
132 pu = pud_alloc(mm, pg, addr);
135 if (pshift == PUD_SHIFT)
137 else if (pshift > PMD_SHIFT) {
138 ptl = pud_lockptr(mm, pu);
139 hpdp = (hugepd_t *)pu;
142 pm = pmd_alloc(mm, pu, addr);
145 if (pshift == PMD_SHIFT)
149 ptl = pmd_lockptr(mm, pm);
150 hpdp = (hugepd_t *)pm;
155 if (pshift >= PGDIR_SHIFT) {
156 ptl = &mm->page_table_lock;
157 hpdp = (hugepd_t *)pg;
160 pu = pud_alloc(mm, pg, addr);
163 if (pshift >= PUD_SHIFT) {
164 ptl = pud_lockptr(mm, pu);
165 hpdp = (hugepd_t *)pu;
168 pm = pmd_alloc(mm, pu, addr);
171 ptl = pmd_lockptr(mm, pm);
172 hpdp = (hugepd_t *)pm;
179 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
181 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
182 pdshift, pshift, ptl))
185 return hugepte_offset(*hpdp, addr, pdshift);
188 #ifdef CONFIG_PPC_BOOK3S_64
190 * Tracks gpages after the device tree is scanned and before the
191 * huge_boot_pages list is ready on pseries.
193 #define MAX_NUMBER_GPAGES 1024
194 __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
195 __initdata static unsigned nr_gpages;
198 * Build list of addresses of gigantic pages. This function is used in early
199 * boot before the buddy allocator is setup.
201 void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
205 while (number_of_pages > 0) {
206 gpage_freearray[nr_gpages] = addr;
213 int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
215 struct huge_bootmem_page *m;
218 m = phys_to_virt(gpage_freearray[--nr_gpages]);
219 gpage_freearray[nr_gpages] = 0;
220 list_add(&m->list, &huge_boot_pages);
227 int __init alloc_bootmem_huge_page(struct hstate *h)
230 #ifdef CONFIG_PPC_BOOK3S_64
231 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
232 return pseries_alloc_bootmem_huge_page(h);
234 return __alloc_bootmem_huge_page(h);
237 #ifndef CONFIG_PPC_BOOK3S_64
238 #define HUGEPD_FREELIST_SIZE \
239 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
241 struct hugepd_freelist {
247 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
249 static void hugepd_free_rcu_callback(struct rcu_head *head)
251 struct hugepd_freelist *batch =
252 container_of(head, struct hugepd_freelist, rcu);
255 for (i = 0; i < batch->index; i++)
256 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
258 free_page((unsigned long)batch);
261 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
263 struct hugepd_freelist **batchp;
265 batchp = &get_cpu_var(hugepd_freelist_cur);
267 if (atomic_read(&tlb->mm->mm_users) < 2 ||
268 mm_is_thread_local(tlb->mm)) {
269 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
270 put_cpu_var(hugepd_freelist_cur);
274 if (*batchp == NULL) {
275 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
276 (*batchp)->index = 0;
279 (*batchp)->ptes[(*batchp)->index++] = hugepte;
280 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
281 call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
284 put_cpu_var(hugepd_freelist_cur);
287 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
290 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
291 unsigned long start, unsigned long end,
292 unsigned long floor, unsigned long ceiling)
294 pte_t *hugepte = hugepd_page(*hpdp);
297 unsigned long pdmask = ~((1UL << pdshift) - 1);
298 unsigned int num_hugepd = 1;
299 unsigned int shift = hugepd_shift(*hpdp);
301 /* Note: On fsl the hpdp may be the first of several */
303 num_hugepd = 1 << (shift - pdshift);
313 if (end - 1 > ceiling - 1)
316 for (i = 0; i < num_hugepd; i++, hpdp++)
319 if (shift >= pdshift)
320 hugepd_free(tlb, hugepte);
321 else if (IS_ENABLED(CONFIG_PPC_8xx))
322 pgtable_free_tlb(tlb, hugepte,
323 get_hugepd_cache_index(PTE_INDEX_SIZE));
325 pgtable_free_tlb(tlb, hugepte,
326 get_hugepd_cache_index(pdshift - shift));
329 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
330 unsigned long addr, unsigned long end,
331 unsigned long floor, unsigned long ceiling)
341 pmd = pmd_offset(pud, addr);
342 next = pmd_addr_end(addr, end);
343 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
345 * if it is not hugepd pointer, we should already find
348 WARN_ON(!pmd_none_or_clear_bad(pmd));
352 * Increment next by the size of the huge mapping since
353 * there may be more than one entry at this level for a
354 * single hugepage, but all of them point to
355 * the same kmem cache that holds the hugepte.
357 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
361 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
362 addr, next, floor, ceiling);
363 } while (addr = next, addr != end);
373 if (end - 1 > ceiling - 1)
376 pmd = pmd_offset(pud, start);
378 pmd_free_tlb(tlb, pmd, start);
379 mm_dec_nr_pmds(tlb->mm);
382 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
383 unsigned long addr, unsigned long end,
384 unsigned long floor, unsigned long ceiling)
392 pud = pud_offset(pgd, addr);
393 next = pud_addr_end(addr, end);
394 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
395 if (pud_none_or_clear_bad(pud))
397 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
402 * Increment next by the size of the huge mapping since
403 * there may be more than one entry at this level for a
404 * single hugepage, but all of them point to
405 * the same kmem cache that holds the hugepte.
407 more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
411 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
412 addr, next, floor, ceiling);
414 } while (addr = next, addr != end);
420 ceiling &= PGDIR_MASK;
424 if (end - 1 > ceiling - 1)
427 pud = pud_offset(pgd, start);
429 pud_free_tlb(tlb, pud, start);
430 mm_dec_nr_puds(tlb->mm);
434 * This function frees user-level page tables of a process.
436 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
437 unsigned long addr, unsigned long end,
438 unsigned long floor, unsigned long ceiling)
444 * Because there are a number of different possible pagetable
445 * layouts for hugepage ranges, we limit knowledge of how
446 * things should be laid out to the allocation path
447 * (huge_pte_alloc(), above). Everything else works out the
448 * structure as it goes from information in the hugepd
449 * pointers. That means that we can't here use the
450 * optimization used in the normal page free_pgd_range(), of
451 * checking whether we're actually covering a large enough
452 * range to have to do anything at the top level of the walk
453 * instead of at the bottom.
455 * To make sense of this, you should probably go read the big
456 * block comment at the top of the normal free_pgd_range(),
461 next = pgd_addr_end(addr, end);
462 pgd = pgd_offset(tlb->mm, addr);
463 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
464 if (pgd_none_or_clear_bad(pgd))
466 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
470 * Increment next by the size of the huge mapping since
471 * there may be more than one entry at the pgd level
472 * for a single hugepage, but all of them point to the
473 * same kmem cache that holds the hugepte.
475 more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
479 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
480 addr, next, floor, ceiling);
482 } while (addr = next, addr != end);
485 struct page *follow_huge_pd(struct vm_area_struct *vma,
486 unsigned long address, hugepd_t hpd,
487 int flags, int pdshift)
491 struct page *page = NULL;
493 int shift = hugepd_shift(hpd);
494 struct mm_struct *mm = vma->vm_mm;
498 * hugepage directory entries are protected by mm->page_table_lock
499 * Use this instead of huge_pte_lockptr
501 ptl = &mm->page_table_lock;
504 ptep = hugepte_offset(hpd, address, pdshift);
505 if (pte_present(*ptep)) {
506 mask = (1UL << shift) - 1;
507 page = pte_page(*ptep);
508 page += ((address & mask) >> PAGE_SHIFT);
509 if (flags & FOLL_GET)
512 if (is_hugetlb_entry_migration(*ptep)) {
514 __migration_entry_wait(mm, ptep, ptl);
522 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
525 unsigned long __boundary = (addr + sz) & ~(sz-1);
526 return (__boundary - 1 < end - 1) ? __boundary : end;
529 #ifdef CONFIG_PPC_MM_SLICES
530 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
531 unsigned long len, unsigned long pgoff,
534 struct hstate *hstate = hstate_file(file);
535 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
537 #ifdef CONFIG_PPC_RADIX_MMU
539 return radix__hugetlb_get_unmapped_area(file, addr, len,
542 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
546 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
548 /* With radix we don't use slice, so derive it from vma*/
549 if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
550 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
552 return 1UL << mmu_psize_to_shift(psize);
554 return vma_kernel_pagesize(vma);
557 static int __init add_huge_page_size(unsigned long long size)
559 int shift = __ffs(size);
562 /* Check that it is a page size supported by the hardware and
563 * that it fits within pagetable and slice limits. */
564 if (size <= PAGE_SIZE || !is_power_of_2(size))
567 mmu_psize = check_and_get_huge_psize(shift);
571 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
573 /* Return if huge page size has already been setup */
574 if (size_to_hstate(size))
577 hugetlb_add_hstate(shift - PAGE_SHIFT);
582 static int __init hugepage_setup_sz(char *str)
584 unsigned long long size;
586 size = memparse(str, &str);
588 if (add_huge_page_size(size) != 0) {
590 pr_err("Invalid huge page size specified(%llu)\n", size);
595 __setup("hugepagesz=", hugepage_setup_sz);
597 static int __init hugetlbpage_init(void)
601 if (hugetlb_disabled) {
602 pr_info("HugeTLB support is disabled!\n");
606 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
607 !mmu_has_feature(MMU_FTR_16M_PAGE))
610 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
614 if (!mmu_psize_defs[psize].shift)
617 shift = mmu_psize_to_shift(psize);
619 #ifdef CONFIG_PPC_BOOK3S_64
620 if (shift > PGDIR_SHIFT)
622 else if (shift > PUD_SHIFT)
623 pdshift = PGDIR_SHIFT;
624 else if (shift > PMD_SHIFT)
629 if (shift < PUD_SHIFT)
631 else if (shift < PGDIR_SHIFT)
634 pdshift = PGDIR_SHIFT;
637 if (add_huge_page_size(1ULL << shift) < 0)
640 * if we have pdshift and shift value same, we don't
641 * use pgt cache for hugepd.
643 if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
644 pgtable_cache_add(PTE_INDEX_SIZE);
645 else if (pdshift > shift)
646 pgtable_cache_add(pdshift - shift);
647 else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
648 pgtable_cache_add(PTE_T_ORDER);
651 if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
652 hugetlbpage_init_default();
657 arch_initcall(hugetlbpage_init);
659 void flush_dcache_icache_hugepage(struct page *page)
664 BUG_ON(!PageCompound(page));
666 for (i = 0; i < (1UL << compound_order(page)); i++) {
667 if (!PageHighMem(page)) {
668 __flush_dcache_icache(page_address(page+i));
670 start = kmap_atomic(page+i);
671 __flush_dcache_icache(start);
672 kunmap_atomic(start);
677 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
678 unsigned long end, int write, struct page **pages, int *nr)
680 unsigned long pte_end;
681 struct page *head, *page;
685 pte_end = (addr + sz) & ~(sz-1);
689 pte = READ_ONCE(*ptep);
691 if (!pte_access_permitted(pte, write))
694 /* hugepages are never "special" */
695 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
698 head = pte_page(pte);
700 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
702 VM_BUG_ON(compound_head(page) != head);
707 } while (addr += PAGE_SIZE, addr != end);
709 if (!page_cache_add_speculative(head, refs)) {
714 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
715 /* Could be optimized better */
725 int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift,
726 unsigned long end, int write, struct page **pages, int *nr)
729 unsigned long sz = 1UL << hugepd_shift(hugepd);
732 ptep = hugepte_offset(hugepd, addr, pdshift);
734 next = hugepte_addr_end(addr, end, sz);
735 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
737 } while (ptep++, addr = next, addr != end);