1 #ifndef _ASM_POWERPC_HUGETLB_H
2 #define _ASM_POWERPC_HUGETLB_H
4 #ifdef CONFIG_HUGETLB_PAGE
6 #include <asm-generic/hugetlb.h>
8 extern struct kmem_cache *hugepte_cache;
10 #ifdef CONFIG_PPC_BOOK3S_64
12 #include <asm/book3s/64/hugetlb.h>
14 * This should work for other subarchs too. But right now we use the
15 * new format only for 64bit book3s
17 static inline pte_t *hugepd_page(hugepd_t hpd)
19 BUG_ON(!hugepd_ok(hpd));
21 * We have only four bits to encode, MMU page size
23 BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
24 return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
27 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
29 return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
32 static inline unsigned int hugepd_shift(hugepd_t hpd)
34 return mmu_psize_to_shift(hugepd_mmu_psize(hpd));
36 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
40 return radix__flush_hugetlb_page(vma, vmaddr);
43 static inline void __local_flush_hugetlb_page(struct vm_area_struct *vma,
47 return radix__local_flush_hugetlb_page(vma, vmaddr);
51 static inline pte_t *hugepd_page(hugepd_t hpd)
53 BUG_ON(!hugepd_ok(hpd));
55 return (pte_t *)__va(hpd_val(hpd) &
56 ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
58 return (pte_t *)((hpd_val(hpd) &
59 ~HUGEPD_SHIFT_MASK) | PD_HUGE);
63 static inline unsigned int hugepd_shift(hugepd_t hpd)
66 return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
68 return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
72 #endif /* CONFIG_PPC_BOOK3S_64 */
75 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
79 * On FSL BookE, we have multiple higher-level table entries that
80 * point to the same hugepte. Just use the first one since they're all
81 * identical. So for that case, idx=0.
83 unsigned long idx = 0;
85 pte_t *dir = hugepd_page(hpd);
86 #ifndef CONFIG_PPC_FSL_BOOK3E
87 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
93 pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
94 unsigned long addr, unsigned *shift);
96 void flush_dcache_icache_hugepage(struct page *page);
98 #if defined(CONFIG_PPC_MM_SLICES)
99 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
102 static inline int is_hugepage_only_range(struct mm_struct *mm,
110 void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
112 #ifdef CONFIG_PPC_8xx
113 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
114 unsigned long vmaddr)
116 flush_tlb_page(vma, vmaddr);
119 void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
122 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
123 unsigned long end, unsigned long floor,
124 unsigned long ceiling);
127 * The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
128 * to override the version in mm/hugetlb.c
130 #define vma_mmu_pagesize vma_mmu_pagesize
133 * If the arch doesn't supply something else, assume that hugepage
134 * size aligned regions are ok without further preparation.
136 static inline int prepare_hugepage_range(struct file *file,
137 unsigned long addr, unsigned long len)
139 struct hstate *h = hstate_file(file);
140 if (len & ~huge_page_mask(h))
142 if (addr & ~huge_page_mask(h))
147 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
148 pte_t *ptep, pte_t pte)
150 set_pte_at(mm, addr, ptep, pte);
153 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
154 unsigned long addr, pte_t *ptep)
157 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
159 return __pte(pte_update(ptep, ~0UL, 0));
163 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
164 unsigned long addr, pte_t *ptep)
167 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
168 flush_hugetlb_page(vma, addr);
171 static inline int huge_pte_none(pte_t pte)
173 return pte_none(pte);
176 static inline pte_t huge_pte_wrprotect(pte_t pte)
178 return pte_wrprotect(pte);
181 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
182 unsigned long addr, pte_t *ptep,
183 pte_t pte, int dirty)
185 #ifdef HUGETLB_NEED_PRELOAD
187 * The "return 1" forces a call of update_mmu_cache, which will write a
188 * TLB entry. Without this, platforms that don't do a write of the TLB
189 * entry in the TLB miss handler asm will fault ad infinitum.
191 ptep_set_access_flags(vma, addr, ptep, pte, dirty);
194 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
198 static inline pte_t huge_ptep_get(pte_t *ptep)
203 static inline void arch_clear_hugepage_flags(struct page *page)
207 #else /* ! CONFIG_HUGETLB_PAGE */
208 static inline void flush_hugetlb_page(struct vm_area_struct *vma,
209 unsigned long vmaddr)
213 #define hugepd_shift(x) 0
214 static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
219 #endif /* CONFIG_HUGETLB_PAGE */
222 * FSL Book3E platforms require special gpage handling - the gpages
223 * are reserved early in the boot process by memblock instead of via
224 * the .dts as on IBM platforms.
226 #if defined(CONFIG_HUGETLB_PAGE) && (defined(CONFIG_PPC_FSL_BOOK3E) || \
227 defined(CONFIG_PPC_8xx))
228 extern void __init reserve_hugetlb_gpages(void);
230 static inline void reserve_hugetlb_gpages(void)
235 #endif /* _ASM_POWERPC_HUGETLB_H */