1 #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
2 #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
4 * This file contains the functions and defines necessary to modify and use
5 * the ppc64 hashed page table.
8 #include <asm/book3s/64/hash.h>
9 #include <asm/barrier.h>
12 * The second half of the kernel virtual space is used for IO mappings,
13 * it's itself carved into the PIO region (ISA and PHB IO space) and
16 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
17 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
18 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
20 #define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
21 #define FULL_IO_SIZE 0x80000000ul
22 #define ISA_IO_BASE (KERN_IO_START)
23 #define ISA_IO_END (KERN_IO_START + 0x10000ul)
24 #define PHB_IO_BASE (ISA_IO_END)
25 #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
26 #define IOREMAP_BASE (PHB_IO_END)
27 #define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
29 #define vmemmap ((struct page *)VMEMMAP_BASE)
31 /* Advertise special mapping type for AGP */
34 /* Advertise support for _PAGE_SPECIAL */
35 #define __HAVE_ARCH_PTE_SPECIAL
40 * This is the default implementation of various PTE accessors, it's
41 * used in all cases except Book3S with 64K pages where we have a
42 * concept of sub-pages
46 #define __real_pte(e,p) ((real_pte_t){(e)})
47 #define __rpte_to_pte(r) ((r).pte)
48 #define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
50 #define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
53 shift = mmu_psize_defs[psize].shift; \
55 #define pte_iterate_hashed_end() } while(0)
58 * We expect this to be called only for user addresses or kernel virtual
59 * addresses other than the linear mapping.
61 #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
63 #endif /* __real_pte */
65 static inline void pmd_set(pmd_t *pmdp, unsigned long val)
70 static inline void pmd_clear(pmd_t *pmdp)
75 #define pmd_none(pmd) (!pmd_val(pmd))
76 #define pmd_present(pmd) (!pmd_none(pmd))
78 static inline void pud_set(pud_t *pudp, unsigned long val)
83 static inline void pud_clear(pud_t *pudp)
88 #define pud_none(pud) (!pud_val(pud))
89 #define pud_present(pud) (pud_val(pud) != 0)
91 extern struct page *pud_page(pud_t pud);
92 extern struct page *pmd_page(pmd_t pmd);
93 static inline pte_t pud_pte(pud_t pud)
95 return __pte(pud_val(pud));
98 static inline pud_t pte_pud(pte_t pte)
100 return __pud(pte_val(pte));
102 #define pud_write(pud) pte_write(pud_pte(pud))
103 #define pgd_write(pgd) pte_write(pgd_pte(pgd))
104 static inline void pgd_set(pgd_t *pgdp, unsigned long val)
109 static inline void pgd_clear(pgd_t *pgdp)
114 #define pgd_none(pgd) (!pgd_val(pgd))
115 #define pgd_present(pgd) (!pgd_none(pgd))
117 static inline pte_t pgd_pte(pgd_t pgd)
119 return __pte(pgd_val(pgd));
122 static inline pgd_t pte_pgd(pte_t pte)
124 return __pgd(pte_val(pte));
127 extern struct page *pgd_page(pgd_t pgd);
130 * Find an entry in a page-table-directory. We combine the address region
131 * (the high order N bits) and the pgd portion of the address.
134 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
136 #define pud_offset(pgdp, addr) \
137 (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
138 #define pmd_offset(pudp,addr) \
139 (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
140 #define pte_offset_kernel(dir,addr) \
141 (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
143 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
144 #define pte_unmap(pte) do { } while(0)
146 /* to find an entry in a kernel page-table-directory */
147 /* This now only contains the vmalloc pages */
148 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
150 #define pte_ERROR(e) \
151 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
152 #define pmd_ERROR(e) \
153 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
154 #define pud_ERROR(e) \
155 pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
156 #define pgd_ERROR(e) \
157 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
159 /* Encode and de-code a swap entry */
160 #define MAX_SWAPFILES_CHECK() do { \
161 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
163 * Don't have overlapping bits with _PAGE_HPTEFLAGS \
164 * We filter HPTEFLAGS on set_pte. \
166 BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
167 BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
170 * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
172 #define SWP_TYPE_BITS 5
173 #define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
174 & ((1UL << SWP_TYPE_BITS) - 1))
175 #define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
176 #define __swp_entry(type, offset) ((swp_entry_t) { \
177 ((type) << _PAGE_BIT_SWAP_TYPE) \
178 | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
180 * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
181 * swap type and offset we get from swap and convert that to pte to find a
182 * matching pte in linux page table.
183 * Clear bits not found in swap entries here.
185 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
186 #define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
188 static inline bool pte_user(pte_t pte)
190 return !(pte_val(pte) & _PAGE_PRIVILEGED);
193 #ifdef CONFIG_MEM_SOFT_DIRTY
194 #define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
196 #define _PAGE_SWP_SOFT_DIRTY 0UL
197 #endif /* CONFIG_MEM_SOFT_DIRTY */
199 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
200 static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
202 return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
204 static inline bool pte_swp_soft_dirty(pte_t pte)
206 return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
208 static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
210 return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
212 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
214 static inline bool check_pte_access(unsigned long access, unsigned long ptev)
217 * This check for _PAGE_RWX and _PAGE_PRESENT bits
222 * This check for access to privilege space
224 if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
230 void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
231 void pgtable_cache_init(void);
233 struct page *realmode_pfn_to_page(unsigned long pfn);
235 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
236 extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
237 extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
238 extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
239 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
240 pmd_t *pmdp, pmd_t pmd);
241 extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
243 extern int has_transparent_hugepage(void);
244 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
247 static inline pte_t pmd_pte(pmd_t pmd)
249 return __pte(pmd_val(pmd));
252 static inline pmd_t pte_pmd(pte_t pte)
254 return __pmd(pte_val(pte));
257 static inline pte_t *pmdp_ptep(pmd_t *pmd)
262 #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
263 #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
264 #define pmd_young(pmd) pte_young(pmd_pte(pmd))
265 #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
266 #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
267 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
268 #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
269 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
270 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
272 #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
273 #define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
274 #define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
275 #define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
276 #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
278 #ifdef CONFIG_NUMA_BALANCING
279 static inline int pmd_protnone(pmd_t pmd)
281 return pte_protnone(pmd_pte(pmd));
283 #endif /* CONFIG_NUMA_BALANCING */
285 #define __HAVE_ARCH_PMD_WRITE
286 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
288 static inline pmd_t pmd_mkhuge(pmd_t pmd)
290 return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE));
293 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
294 extern int pmdp_set_access_flags(struct vm_area_struct *vma,
295 unsigned long address, pmd_t *pmdp,
296 pmd_t entry, int dirty);
298 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
299 extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
300 unsigned long address, pmd_t *pmdp);
301 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
302 extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
303 unsigned long address, pmd_t *pmdp);
305 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
306 extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
307 unsigned long addr, pmd_t *pmdp);
309 extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
310 unsigned long address, pmd_t *pmdp);
311 #define pmdp_collapse_flush pmdp_collapse_flush
313 #define __HAVE_ARCH_PGTABLE_DEPOSIT
314 extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
316 #define __HAVE_ARCH_PGTABLE_WITHDRAW
317 extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
319 #define __HAVE_ARCH_PMDP_INVALIDATE
320 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
323 #define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
324 extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
325 unsigned long address, pmd_t *pmdp);
327 #define pmd_move_must_withdraw pmd_move_must_withdraw
329 static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
330 struct spinlock *old_pmd_ptl)
333 * Archs like ppc64 use pgtable to store per pmd
334 * specific information. So when we switch the pmd,
335 * we should also withdraw and deposit the pgtable
339 #endif /* __ASSEMBLY__ */
340 #endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */