powerpc/mm: Replace _PAGE_USER with _PAGE_PRIVILEGED
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / pgtable.h
CommitLineData
3dfcb315
AK
1#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
2#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
3/*
4 * This file contains the functions and defines necessary to modify and use
5 * the ppc64 hashed page table.
6 */
7
ab537dca 8#include <asm/book3s/64/hash.h>
3dfcb315
AK
9#include <asm/barrier.h>
10
3dfcb315
AK
11/*
12 * The second half of the kernel virtual space is used for IO mappings,
13 * it's itself carved into the PIO region (ISA and PHB IO space) and
14 * the ioremap space
15 *
16 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
17 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
18 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
19 */
20#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
21#define FULL_IO_SIZE 0x80000000ul
22#define ISA_IO_BASE (KERN_IO_START)
23#define ISA_IO_END (KERN_IO_START + 0x10000ul)
24#define PHB_IO_BASE (ISA_IO_END)
25#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
26#define IOREMAP_BASE (PHB_IO_END)
27#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
28
3dfcb315
AK
29#define vmemmap ((struct page *)VMEMMAP_BASE)
30
b0412ea9 31/* Advertise special mapping type for AGP */
b0412ea9
AK
32#define HAVE_PAGE_AGP
33
34/* Advertise support for _PAGE_SPECIAL */
35#define __HAVE_ARCH_PTE_SPECIAL
36
3dfcb315
AK
37#ifndef __ASSEMBLY__
38
39/*
40 * This is the default implementation of various PTE accessors, it's
41 * used in all cases except Book3S with 64K pages where we have a
42 * concept of sub-pages
43 */
44#ifndef __real_pte
45
3dfcb315
AK
46#define __real_pte(e,p) ((real_pte_t){(e)})
47#define __rpte_to_pte(r) ((r).pte)
4d9057c3 48#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
3dfcb315
AK
49
50#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
51 do { \
52 index = 0; \
53 shift = mmu_psize_defs[psize].shift; \
54
55#define pte_iterate_hashed_end() } while(0)
56
57/*
58 * We expect this to be called only for user addresses or kernel virtual
59 * addresses other than the linear mapping.
60 */
61#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
62
63#endif /* __real_pte */
64
f281b5d5
AK
65static inline void pmd_set(pmd_t *pmdp, unsigned long val)
66{
67 *pmdp = __pmd(val);
68}
69
70static inline void pmd_clear(pmd_t *pmdp)
71{
72 *pmdp = __pmd(0);
73}
74
3dfcb315 75#define pmd_none(pmd) (!pmd_val(pmd))
3dfcb315 76#define pmd_present(pmd) (!pmd_none(pmd))
3dfcb315 77
f281b5d5
AK
78static inline void pud_set(pud_t *pudp, unsigned long val)
79{
80 *pudp = __pud(val);
81}
82
83static inline void pud_clear(pud_t *pudp)
84{
85 *pudp = __pud(0);
86}
87
3dfcb315 88#define pud_none(pud) (!pud_val(pud))
3dfcb315 89#define pud_present(pud) (pud_val(pud) != 0)
3dfcb315
AK
90
91extern struct page *pud_page(pud_t pud);
371352ca 92extern struct page *pmd_page(pmd_t pmd);
3dfcb315
AK
93static inline pte_t pud_pte(pud_t pud)
94{
95 return __pte(pud_val(pud));
96}
97
98static inline pud_t pte_pud(pte_t pte)
99{
100 return __pud(pte_val(pte));
101}
102#define pud_write(pud) pte_write(pud_pte(pud))
3dfcb315 103#define pgd_write(pgd) pte_write(pgd_pte(pgd))
f281b5d5
AK
104static inline void pgd_set(pgd_t *pgdp, unsigned long val)
105{
106 *pgdp = __pgd(val);
107}
3dfcb315 108
368ced78
AK
109static inline void pgd_clear(pgd_t *pgdp)
110{
111 *pgdp = __pgd(0);
112}
113
114#define pgd_none(pgd) (!pgd_val(pgd))
115#define pgd_present(pgd) (!pgd_none(pgd))
116
117static inline pte_t pgd_pte(pgd_t pgd)
118{
119 return __pte(pgd_val(pgd));
120}
121
122static inline pgd_t pte_pgd(pte_t pte)
123{
124 return __pgd(pte_val(pte));
125}
126
127extern struct page *pgd_page(pgd_t pgd);
128
3dfcb315
AK
129/*
130 * Find an entry in a page-table-directory. We combine the address region
131 * (the high order N bits) and the pgd portion of the address.
132 */
3dfcb315
AK
133
134#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
135
368ced78
AK
136#define pud_offset(pgdp, addr) \
137 (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
3dfcb315 138#define pmd_offset(pudp,addr) \
371352ca 139 (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
3dfcb315 140#define pte_offset_kernel(dir,addr) \
371352ca 141 (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
3dfcb315
AK
142
143#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
144#define pte_unmap(pte) do { } while(0)
145
146/* to find an entry in a kernel page-table-directory */
147/* This now only contains the vmalloc pages */
148#define pgd_offset_k(address) pgd_offset(&init_mm, address)
3dfcb315
AK
149
150#define pte_ERROR(e) \
151 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
152#define pmd_ERROR(e) \
153 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
368ced78
AK
154#define pud_ERROR(e) \
155 pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
3dfcb315
AK
156#define pgd_ERROR(e) \
157 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
158
159/* Encode and de-code a swap entry */
160#define MAX_SWAPFILES_CHECK() do { \
161 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
162 /* \
163 * Don't have overlapping bits with _PAGE_HPTEFLAGS \
164 * We filter HPTEFLAGS on set_pte. \
165 */ \
166 BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
7207f436 167 BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
3dfcb315
AK
168 } while (0)
169/*
170 * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
171 */
172#define SWP_TYPE_BITS 5
173#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
174 & ((1UL << SWP_TYPE_BITS) - 1))
f1a9ae03 175#define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PTE_RPN_SHIFT)
3dfcb315 176#define __swp_entry(type, offset) ((swp_entry_t) { \
f1a9ae03
PM
177 ((type) << _PAGE_BIT_SWAP_TYPE) \
178 | (((offset) << PTE_RPN_SHIFT) & PTE_RPN_MASK)})
44734f23
AK
179/*
180 * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
181 * swap type and offset we get from swap and convert that to pte to find a
182 * matching pte in linux page table.
183 * Clear bits not found in swap entries here.
184 */
185#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
186#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
3dfcb315 187
e7bfc462
AK
188static inline bool pte_user(pte_t pte)
189{
ac29c640 190 return !(pte_val(pte) & _PAGE_PRIVILEGED);
e7bfc462
AK
191}
192
2f10f1a7 193#ifdef CONFIG_MEM_SOFT_DIRTY
7207f436 194#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
2f10f1a7
HD
195#else
196#define _PAGE_SWP_SOFT_DIRTY 0UL
197#endif /* CONFIG_MEM_SOFT_DIRTY */
198
199#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
7207f436
LD
200static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
201{
202 return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
203}
204static inline bool pte_swp_soft_dirty(pte_t pte)
205{
206 return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
207}
208static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
209{
210 return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
211}
7207f436
LD
212#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
213
ac29c640
AK
214static inline bool check_pte_access(unsigned long access, unsigned long ptev)
215{
216 /*
217 * This check for _PAGE_RWX and _PAGE_PRESENT bits
218 */
219 if (access & ~ptev)
220 return false;
221 /*
222 * This check for access to privilege space
223 */
224 if ((access & _PAGE_PRIVILEGED) != (ptev & _PAGE_PRIVILEGED))
225 return false;
226
227 return true;
228}
229
3dfcb315
AK
230void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
231void pgtable_cache_init(void);
3dfcb315 232
3dfcb315
AK
233struct page *realmode_pfn_to_page(unsigned long pfn);
234
3dfcb315 235#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3dfcb315
AK
236extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
237extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
238extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
239extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
240 pmd_t *pmdp, pmd_t pmd);
241extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
242 pmd_t *pmd);
3dfcb315 243extern int has_transparent_hugepage(void);
3dfcb315
AK
244#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
245
3dfcb315
AK
246
247static inline pte_t pmd_pte(pmd_t pmd)
248{
249 return __pte(pmd_val(pmd));
250}
251
252static inline pmd_t pte_pmd(pte_t pte)
253{
254 return __pmd(pte_val(pte));
255}
256
257static inline pte_t *pmdp_ptep(pmd_t *pmd)
258{
259 return (pte_t *)pmd;
260}
261
262#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
263#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
264#define pmd_young(pmd) pte_young(pmd_pte(pmd))
265#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
266#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
267#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
d5d6a443 268#define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd)))
3dfcb315
AK
269#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
270#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
7207f436
LD
271
272#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
273#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
274#define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
275#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
276#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
277
1ca72129
AK
278#ifdef CONFIG_NUMA_BALANCING
279static inline int pmd_protnone(pmd_t pmd)
280{
281 return pte_protnone(pmd_pte(pmd));
282}
283#endif /* CONFIG_NUMA_BALANCING */
3dfcb315
AK
284
285#define __HAVE_ARCH_PMD_WRITE
286#define pmd_write(pmd) pte_write(pmd_pte(pmd))
287
288static inline pmd_t pmd_mkhuge(pmd_t pmd)
289{
6a119eae 290 return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE));
3dfcb315
AK
291}
292
3dfcb315
AK
293#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
294extern int pmdp_set_access_flags(struct vm_area_struct *vma,
295 unsigned long address, pmd_t *pmdp,
296 pmd_t entry, int dirty);
297
3dfcb315
AK
298#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
299extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
300 unsigned long address, pmd_t *pmdp);
301#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
302extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
303 unsigned long address, pmd_t *pmdp);
304
305#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
306extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
307 unsigned long addr, pmd_t *pmdp);
308
3dfcb315
AK
309extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
310 unsigned long address, pmd_t *pmdp);
311#define pmdp_collapse_flush pmdp_collapse_flush
312
313#define __HAVE_ARCH_PGTABLE_DEPOSIT
314extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
315 pgtable_t pgtable);
316#define __HAVE_ARCH_PGTABLE_WITHDRAW
317extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
318
319#define __HAVE_ARCH_PMDP_INVALIDATE
320extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
321 pmd_t *pmdp);
322
c777e2a8
AK
323#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
324extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
325 unsigned long address, pmd_t *pmdp);
326
3dfcb315
AK
327#define pmd_move_must_withdraw pmd_move_must_withdraw
328struct spinlock;
329static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
330 struct spinlock *old_pmd_ptl)
331{
332 /*
333 * Archs like ppc64 use pgtable to store per pmd
334 * specific information. So when we switch the pmd,
335 * we should also withdraw and deposit the pgtable
336 */
337 return true;
338}
339#endif /* __ASSEMBLY__ */
340#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */