2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/bootmem.h>
19 #include <linux/moduleparam.h>
20 #include <linux/swap.h>
21 #include <linux/swapops.h>
22 #include <asm/pgtable.h>
23 #include <asm/pgalloc.h>
25 #include <asm/setup.h>
26 #include <asm/hugetlb.h>
28 #ifdef CONFIG_HUGETLB_PAGE
30 #define PAGE_SHIFT_64K 16
31 #define PAGE_SHIFT_512K 19
32 #define PAGE_SHIFT_8M 23
33 #define PAGE_SHIFT_16M 24
34 #define PAGE_SHIFT_16G 34
36 unsigned int HPAGE_SHIFT;
39 * Tracks gpages after the device tree is scanned and before the
40 * huge_boot_pages list is ready. On non-Freescale implementations, this is
41 * just used to track 16G pages and so is a single array. FSL-based
42 * implementations may have more than one gpage size, so we need multiple
45 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
46 #define MAX_NUMBER_GPAGES 128
48 u64 gpage_list[MAX_NUMBER_GPAGES];
49 unsigned int nr_gpages;
51 static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
53 #define MAX_NUMBER_GPAGES 1024
54 static u64 gpage_freearray[MAX_NUMBER_GPAGES];
55 static unsigned nr_gpages;
58 #define hugepd_none(hpd) (hpd_val(hpd) == 0)
60 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
62 /* Only called for hugetlbfs pages, hence can ignore THP */
63 return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
66 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
67 unsigned long address, unsigned pdshift, unsigned pshift)
69 struct kmem_cache *cachep;
74 if (pshift >= pdshift) {
75 cachep = hugepte_cache;
76 num_hugepd = 1 << (pshift - pdshift);
78 cachep = PGT_CACHE(pdshift - pshift);
82 new = kmem_cache_zalloc(cachep, GFP_KERNEL);
84 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
85 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
91 * Make sure other cpus find the hugepd set only after a
92 * properly initialized page table is visible to them.
93 * For more details look for comment in __pte_alloc().
97 spin_lock(&mm->page_table_lock);
100 * We have multiple higher-level entries that point to the same
101 * actual pte location. Fill in each as we go and backtrack on error.
102 * We need all of these so the DTLB pgtable walk code can find the
103 * right higher-level entry without knowing if it's a hugepage or not.
105 for (i = 0; i < num_hugepd; i++, hpdp++) {
106 if (unlikely(!hugepd_none(*hpdp)))
109 #ifdef CONFIG_PPC_BOOK3S_64
110 *hpdp = __hugepd(__pa(new) |
111 (shift_to_mmu_psize(pshift) << 2));
112 #elif defined(CONFIG_PPC_8xx)
113 *hpdp = __hugepd(__pa(new) |
114 (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
115 _PMD_PAGE_512K) | _PMD_PRESENT);
117 /* We use the old format for PPC_FSL_BOOK3E */
118 *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
122 /* If we bailed from the for loop early, an error occurred, clean up */
123 if (i < num_hugepd) {
124 for (i = i - 1 ; i >= 0; i--, hpdp--)
126 kmem_cache_free(cachep, new);
128 spin_unlock(&mm->page_table_lock);
133 * These macros define how to determine which level of the page table holds
136 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
137 #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
138 #define HUGEPD_PUD_SHIFT PUD_SHIFT
140 #define HUGEPD_PGD_SHIFT PUD_SHIFT
141 #define HUGEPD_PUD_SHIFT PMD_SHIFT
145 * At this point we do the placement change only for BOOK3S 64. This would
146 * possibly work on other subarchs.
148 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
153 hugepd_t *hpdp = NULL;
154 unsigned pshift = __ffs(sz);
155 unsigned pdshift = PGDIR_SHIFT;
158 pg = pgd_offset(mm, addr);
160 #ifdef CONFIG_PPC_BOOK3S_64
161 if (pshift == PGDIR_SHIFT)
164 else if (pshift > PUD_SHIFT)
166 * We need to use hugepd table
168 hpdp = (hugepd_t *)pg;
171 pu = pud_alloc(mm, pg, addr);
172 if (pshift == PUD_SHIFT)
174 else if (pshift > PMD_SHIFT)
175 hpdp = (hugepd_t *)pu;
178 pm = pmd_alloc(mm, pu, addr);
179 if (pshift == PMD_SHIFT)
183 hpdp = (hugepd_t *)pm;
187 if (pshift >= HUGEPD_PGD_SHIFT) {
188 hpdp = (hugepd_t *)pg;
191 pu = pud_alloc(mm, pg, addr);
192 if (pshift >= HUGEPD_PUD_SHIFT) {
193 hpdp = (hugepd_t *)pu;
196 pm = pmd_alloc(mm, pu, addr);
197 hpdp = (hugepd_t *)pm;
204 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
206 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
209 return hugepte_offset(*hpdp, addr, pdshift);
212 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
213 /* Build list of addresses of gigantic pages. This function is used in early
214 * boot before the buddy allocator is setup.
216 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
218 unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
224 gpage_freearray[idx].nr_gpages = number_of_pages;
226 for (i = 0; i < number_of_pages; i++) {
227 gpage_freearray[idx].gpage_list[i] = addr;
233 * Moves the gigantic page addresses from the temporary list to the
234 * huge_boot_pages list.
236 int alloc_bootmem_huge_page(struct hstate *hstate)
238 struct huge_bootmem_page *m;
239 int idx = shift_to_mmu_psize(huge_page_shift(hstate));
240 int nr_gpages = gpage_freearray[idx].nr_gpages;
245 #ifdef CONFIG_HIGHMEM
247 * If gpages can be in highmem we can't use the trick of storing the
248 * data structure in the page; allocate space for this
250 m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
251 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
253 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
256 list_add(&m->list, &huge_boot_pages);
257 gpage_freearray[idx].nr_gpages = nr_gpages;
258 gpage_freearray[idx].gpage_list[nr_gpages] = 0;
264 * Scan the command line hugepagesz= options for gigantic pages; store those in
265 * a list that we use to allocate the memory once all options are parsed.
268 unsigned long gpage_npages[MMU_PAGE_COUNT];
270 static int __init do_gpage_early_setup(char *param, char *val,
271 const char *unused, void *arg)
273 static phys_addr_t size;
274 unsigned long npages;
277 * The hugepagesz and hugepages cmdline options are interleaved. We
278 * use the size variable to keep track of whether or not this was done
279 * properly and skip over instances where it is incorrect. Other
280 * command-line parsing code will issue warnings, so we don't need to.
283 if ((strcmp(param, "default_hugepagesz") == 0) ||
284 (strcmp(param, "hugepagesz") == 0)) {
285 size = memparse(val, NULL);
286 } else if (strcmp(param, "hugepages") == 0) {
288 if (sscanf(val, "%lu", &npages) <= 0)
290 if (npages > MAX_NUMBER_GPAGES) {
291 pr_warn("MMU: %lu pages requested for page "
292 #ifdef CONFIG_PHYS_ADDR_T_64BIT
293 "size %llu KB, limiting to "
295 "size %u KB, limiting to "
297 __stringify(MAX_NUMBER_GPAGES) "\n",
298 npages, size / 1024);
299 npages = MAX_NUMBER_GPAGES;
301 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
310 * This function allocates physical space for pages that are larger than the
311 * buddy allocator can handle. We want to allocate these in highmem because
312 * the amount of lowmem is limited. This means that this function MUST be
313 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
314 * allocate to grab highmem.
316 void __init reserve_hugetlb_gpages(void)
318 static __initdata char cmdline[COMMAND_LINE_SIZE];
319 phys_addr_t size, base;
322 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
323 parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
324 NULL, &do_gpage_early_setup);
327 * Walk gpage list in reverse, allocating larger page sizes first.
328 * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
329 * When we reach the point in the list where pages are no longer
330 * considered gpages, we're done.
332 for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
333 if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
335 else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
338 size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
339 base = memblock_alloc_base(size * gpage_npages[i], size,
340 MEMBLOCK_ALLOC_ANYWHERE);
341 add_gpage(base, size, gpage_npages[i]);
345 #else /* !PPC_FSL_BOOK3E */
347 /* Build list of addresses of gigantic pages. This function is used in early
348 * boot before the buddy allocator is setup.
350 void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
354 while (number_of_pages > 0) {
355 gpage_freearray[nr_gpages] = addr;
362 /* Moves the gigantic page addresses from the temporary list to the
363 * huge_boot_pages list.
365 int alloc_bootmem_huge_page(struct hstate *hstate)
367 struct huge_bootmem_page *m;
370 m = phys_to_virt(gpage_freearray[--nr_gpages]);
371 gpage_freearray[nr_gpages] = 0;
372 list_add(&m->list, &huge_boot_pages);
378 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
379 #define HUGEPD_FREELIST_SIZE \
380 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
382 struct hugepd_freelist {
388 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
390 static void hugepd_free_rcu_callback(struct rcu_head *head)
392 struct hugepd_freelist *batch =
393 container_of(head, struct hugepd_freelist, rcu);
396 for (i = 0; i < batch->index; i++)
397 kmem_cache_free(hugepte_cache, batch->ptes[i]);
399 free_page((unsigned long)batch);
402 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
404 struct hugepd_freelist **batchp;
406 batchp = &get_cpu_var(hugepd_freelist_cur);
408 if (atomic_read(&tlb->mm->mm_users) < 2 ||
409 cpumask_equal(mm_cpumask(tlb->mm),
410 cpumask_of(smp_processor_id()))) {
411 kmem_cache_free(hugepte_cache, hugepte);
412 put_cpu_var(hugepd_freelist_cur);
416 if (*batchp == NULL) {
417 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
418 (*batchp)->index = 0;
421 (*batchp)->ptes[(*batchp)->index++] = hugepte;
422 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
423 call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
426 put_cpu_var(hugepd_freelist_cur);
429 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
432 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
433 unsigned long start, unsigned long end,
434 unsigned long floor, unsigned long ceiling)
436 pte_t *hugepte = hugepd_page(*hpdp);
439 unsigned long pdmask = ~((1UL << pdshift) - 1);
440 unsigned int num_hugepd = 1;
441 unsigned int shift = hugepd_shift(*hpdp);
443 /* Note: On fsl the hpdp may be the first of several */
445 num_hugepd = 1 << (shift - pdshift);
455 if (end - 1 > ceiling - 1)
458 for (i = 0; i < num_hugepd; i++, hpdp++)
461 if (shift >= pdshift)
462 hugepd_free(tlb, hugepte);
464 pgtable_free_tlb(tlb, hugepte, pdshift - shift);
467 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
468 unsigned long addr, unsigned long end,
469 unsigned long floor, unsigned long ceiling)
479 pmd = pmd_offset(pud, addr);
480 next = pmd_addr_end(addr, end);
481 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
483 * if it is not hugepd pointer, we should already find
486 WARN_ON(!pmd_none_or_clear_bad(pmd));
490 * Increment next by the size of the huge mapping since
491 * there may be more than one entry at this level for a
492 * single hugepage, but all of them point to
493 * the same kmem cache that holds the hugepte.
495 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
499 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
500 addr, next, floor, ceiling);
501 } while (addr = next, addr != end);
511 if (end - 1 > ceiling - 1)
514 pmd = pmd_offset(pud, start);
516 pmd_free_tlb(tlb, pmd, start);
517 mm_dec_nr_pmds(tlb->mm);
520 static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
521 unsigned long addr, unsigned long end,
522 unsigned long floor, unsigned long ceiling)
530 pud = pud_offset(pgd, addr);
531 next = pud_addr_end(addr, end);
532 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
533 if (pud_none_or_clear_bad(pud))
535 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
540 * Increment next by the size of the huge mapping since
541 * there may be more than one entry at this level for a
542 * single hugepage, but all of them point to
543 * the same kmem cache that holds the hugepte.
545 more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
549 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
550 addr, next, floor, ceiling);
552 } while (addr = next, addr != end);
558 ceiling &= PGDIR_MASK;
562 if (end - 1 > ceiling - 1)
565 pud = pud_offset(pgd, start);
567 pud_free_tlb(tlb, pud, start);
571 * This function frees user-level page tables of a process.
573 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
574 unsigned long addr, unsigned long end,
575 unsigned long floor, unsigned long ceiling)
581 * Because there are a number of different possible pagetable
582 * layouts for hugepage ranges, we limit knowledge of how
583 * things should be laid out to the allocation path
584 * (huge_pte_alloc(), above). Everything else works out the
585 * structure as it goes from information in the hugepd
586 * pointers. That means that we can't here use the
587 * optimization used in the normal page free_pgd_range(), of
588 * checking whether we're actually covering a large enough
589 * range to have to do anything at the top level of the walk
590 * instead of at the bottom.
592 * To make sense of this, you should probably go read the big
593 * block comment at the top of the normal free_pgd_range(),
598 next = pgd_addr_end(addr, end);
599 pgd = pgd_offset(tlb->mm, addr);
600 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
601 if (pgd_none_or_clear_bad(pgd))
603 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
607 * Increment next by the size of the huge mapping since
608 * there may be more than one entry at the pgd level
609 * for a single hugepage, but all of them point to the
610 * same kmem cache that holds the hugepte.
612 more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
616 free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
617 addr, next, floor, ceiling);
619 } while (addr = next, addr != end);
623 * 64 bit book3s use generic follow_page_mask
625 #ifdef CONFIG_PPC_BOOK3S_64
627 struct page *follow_huge_pd(struct vm_area_struct *vma,
628 unsigned long address, hugepd_t hpd,
629 int flags, int pdshift)
633 struct page *page = NULL;
635 int shift = hugepd_shift(hpd);
636 struct mm_struct *mm = vma->vm_mm;
639 ptl = &mm->page_table_lock;
642 ptep = hugepte_offset(hpd, address, pdshift);
643 if (pte_present(*ptep)) {
644 mask = (1UL << shift) - 1;
645 page = pte_page(*ptep);
646 page += ((address & mask) >> PAGE_SHIFT);
647 if (flags & FOLL_GET)
650 if (is_hugetlb_entry_migration(*ptep)) {
652 __migration_entry_wait(mm, ptep, ptl);
660 #else /* !CONFIG_PPC_BOOK3S_64 */
663 * We are holding mmap_sem, so a parallel huge page collapse cannot run.
664 * To prevent hugepage split, disable irq.
667 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
672 unsigned long mask, flags;
673 struct page *page = ERR_PTR(-EINVAL);
675 local_irq_save(flags);
676 ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift);
679 pte = READ_ONCE(*ptep);
681 * Verify it is a huge page else bail.
682 * Transparent hugepages are handled by generic code. We can skip them
685 if (!shift || is_thp)
688 if (!pte_present(pte)) {
692 mask = (1UL << shift) - 1;
693 page = pte_page(pte);
695 page += (address & mask) / PAGE_SIZE;
698 local_irq_restore(flags);
703 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
704 pmd_t *pmd, int write)
711 follow_huge_pud(struct mm_struct *mm, unsigned long address,
712 pud_t *pud, int write)
719 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
722 unsigned long __boundary = (addr + sz) & ~(sz-1);
723 return (__boundary - 1 < end - 1) ? __boundary : end;
726 int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
727 unsigned long end, int write, struct page **pages, int *nr)
730 unsigned long sz = 1UL << hugepd_shift(hugepd);
733 ptep = hugepte_offset(hugepd, addr, pdshift);
735 next = hugepte_addr_end(addr, end, sz);
736 if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
738 } while (ptep++, addr = next, addr != end);
743 #ifdef CONFIG_PPC_MM_SLICES
744 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
745 unsigned long len, unsigned long pgoff,
748 struct hstate *hstate = hstate_file(file);
749 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
752 return radix__hugetlb_get_unmapped_area(file, addr, len,
754 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
758 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
760 #ifdef CONFIG_PPC_MM_SLICES
761 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
762 /* With radix we don't use slice, so derive it from vma*/
763 if (!radix_enabled())
764 return 1UL << mmu_psize_to_shift(psize);
766 if (!is_vm_hugetlb_page(vma))
769 return huge_page_size(hstate_vma(vma));
772 static inline bool is_power_of_4(unsigned long x)
774 if (is_power_of_2(x))
775 return (__ilog2(x) % 2) ? false : true;
779 static int __init add_huge_page_size(unsigned long long size)
781 int shift = __ffs(size);
784 /* Check that it is a page size supported by the hardware and
785 * that it fits within pagetable and slice limits. */
786 if (size <= PAGE_SIZE)
788 #if defined(CONFIG_PPC_FSL_BOOK3E)
789 if (!is_power_of_4(size))
791 #elif !defined(CONFIG_PPC_8xx)
792 if (!is_power_of_2(size) || (shift > SLICE_HIGH_SHIFT))
796 if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
799 #ifdef CONFIG_PPC_BOOK3S_64
801 * We need to make sure that for different page sizes reported by
802 * firmware we only add hugetlb support for page sizes that can be
803 * supported by linux page table layout.
808 if (radix_enabled()) {
809 if (mmu_psize != MMU_PAGE_2M)
812 if (mmu_psize != MMU_PAGE_16M && mmu_psize != MMU_PAGE_16G)
817 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
819 /* Return if huge page size has already been setup */
820 if (size_to_hstate(size))
823 hugetlb_add_hstate(shift - PAGE_SHIFT);
828 static int __init hugepage_setup_sz(char *str)
830 unsigned long long size;
832 size = memparse(str, &str);
834 if (add_huge_page_size(size) != 0) {
836 pr_err("Invalid huge page size specified(%llu)\n", size);
841 __setup("hugepagesz=", hugepage_setup_sz);
843 struct kmem_cache *hugepte_cache;
844 static int __init hugetlbpage_init(void)
848 #if !defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_PPC_8xx)
849 if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
852 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
856 if (!mmu_psize_defs[psize].shift)
859 shift = mmu_psize_to_shift(psize);
861 if (add_huge_page_size(1ULL << shift) < 0)
864 if (shift < HUGEPD_PUD_SHIFT)
866 else if (shift < HUGEPD_PGD_SHIFT)
869 pdshift = PGDIR_SHIFT;
871 * if we have pdshift and shift value same, we don't
872 * use pgt cache for hugepd.
875 pgtable_cache_add(pdshift - shift, NULL);
876 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
877 else if (!hugepte_cache) {
879 * Create a kmem cache for hugeptes. The bottom bits in
880 * the pte have size information encoded in them, so
881 * align them to allow this
883 hugepte_cache = kmem_cache_create("hugepte-cache",
885 HUGEPD_SHIFT_MASK + 1,
887 if (hugepte_cache == NULL)
888 panic("%s: Unable to create kmem cache "
889 "for hugeptes\n", __func__);
895 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
896 /* Default hpage size = 4M on FSL_BOOK3E and 512k on 8xx */
897 if (mmu_psize_defs[MMU_PAGE_4M].shift)
898 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
899 else if (mmu_psize_defs[MMU_PAGE_512K].shift)
900 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_512K].shift;
902 /* Set default large page size. Currently, we pick 16M or 1M
903 * depending on what is available
905 if (mmu_psize_defs[MMU_PAGE_16M].shift)
906 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
907 else if (mmu_psize_defs[MMU_PAGE_1M].shift)
908 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
909 else if (mmu_psize_defs[MMU_PAGE_2M].shift)
910 HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
915 arch_initcall(hugetlbpage_init);
917 void flush_dcache_icache_hugepage(struct page *page)
922 BUG_ON(!PageCompound(page));
924 for (i = 0; i < (1UL << compound_order(page)); i++) {
925 if (!PageHighMem(page)) {
926 __flush_dcache_icache(page_address(page+i));
928 start = kmap_atomic(page+i);
929 __flush_dcache_icache(start);
930 kunmap_atomic(start);
935 #endif /* CONFIG_HUGETLB_PAGE */
938 * We have 4 cases for pgds and pmds:
939 * (1) invalid (all zeroes)
940 * (2) pointer to next table, as normal; bottom 6 bits == 0
941 * (3) leaf pte for huge page _PAGE_PTE set
942 * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
944 * So long as we atomically load page table pointers we are safe against teardown,
945 * we can follow the address down to the the page and take a ref on it.
946 * This function need to be called with interrupts disabled. We use this variant
947 * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
950 pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
951 bool *is_thp, unsigned *shift)
957 hugepd_t *hpdp = NULL;
958 unsigned pdshift = PGDIR_SHIFT;
966 pgdp = pgdir + pgd_index(ea);
967 pgd = READ_ONCE(*pgdp);
969 * Always operate on the local stack value. This make sure the
970 * value don't get updated by a parallel THP split/collapse,
971 * page fault or a page unmap. The return pte_t * is still not
972 * stable. So should be checked there for above conditions.
976 else if (pgd_huge(pgd)) {
977 ret_pte = (pte_t *) pgdp;
979 } else if (is_hugepd(__hugepd(pgd_val(pgd))))
980 hpdp = (hugepd_t *)&pgd;
983 * Even if we end up with an unmap, the pgtable will not
984 * be freed, because we do an rcu free and here we are
988 pudp = pud_offset(&pgd, ea);
989 pud = READ_ONCE(*pudp);
993 else if (pud_huge(pud)) {
994 ret_pte = (pte_t *) pudp;
996 } else if (is_hugepd(__hugepd(pud_val(pud))))
997 hpdp = (hugepd_t *)&pud;
1000 pmdp = pmd_offset(&pud, ea);
1001 pmd = READ_ONCE(*pmdp);
1003 * A hugepage collapse is captured by pmd_none, because
1004 * it mark the pmd none and do a hpte invalidate.
1009 if (pmd_trans_huge(pmd)) {
1012 ret_pte = (pte_t *) pmdp;
1016 if (pmd_huge(pmd)) {
1017 ret_pte = (pte_t *) pmdp;
1019 } else if (is_hugepd(__hugepd(pmd_val(pmd))))
1020 hpdp = (hugepd_t *)&pmd;
1022 return pte_offset_kernel(&pmd, ea);
1028 ret_pte = hugepte_offset(*hpdp, ea, pdshift);
1029 pdshift = hugepd_shift(*hpdp);
1035 EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
1037 int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1038 unsigned long end, int write, struct page **pages, int *nr)
1041 unsigned long pte_end;
1042 struct page *head, *page;
1046 pte_end = (addr + sz) & ~(sz-1);
1050 pte = READ_ONCE(*ptep);
1051 mask = _PAGE_PRESENT | _PAGE_READ;
1054 * On some CPUs like the 8xx, _PAGE_RW hence _PAGE_WRITE is defined
1055 * as 0 and _PAGE_RO has to be set when a page is not writable
1058 mask |= _PAGE_WRITE;
1062 if ((pte_val(pte) & mask) != mask)
1065 /* hugepages are never "special" */
1066 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1069 head = pte_page(pte);
1071 page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
1073 VM_BUG_ON(compound_head(page) != head);
1078 } while (addr += PAGE_SIZE, addr != end);
1080 if (!page_cache_add_speculative(head, refs)) {
1085 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1086 /* Could be optimized better */