Commit | Line | Data |
---|---|---|
f88df14b DG |
1 | #ifndef _ASM_POWERPC_PGTABLE_PPC64_H_ |
2 | #define _ASM_POWERPC_PGTABLE_PPC64_H_ | |
3 | /* | |
4 | * This file contains the functions and defines necessary to modify and use | |
5 | * the ppc64 hashed page table. | |
6 | */ | |
7 | ||
8 | #ifndef __ASSEMBLY__ | |
9 | #include <linux/stddef.h> | |
10 | #include <asm/processor.h> /* For TASK_SIZE */ | |
11 | #include <asm/mmu.h> | |
12 | #include <asm/page.h> | |
13 | #include <asm/tlbflush.h> | |
14 | struct mm_struct; | |
15 | #endif /* __ASSEMBLY__ */ | |
16 | ||
17 | #ifdef CONFIG_PPC_64K_PAGES | |
18 | #include <asm/pgtable-64k.h> | |
19 | #else | |
20 | #include <asm/pgtable-4k.h> | |
21 | #endif | |
22 | ||
23 | #define FIRST_USER_ADDRESS 0 | |
24 | ||
25 | /* | |
26 | * Size of EA range mapped by our pagetables. | |
27 | */ | |
28 | #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ | |
29 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) | |
30 | #define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) | |
31 | ||
32 | #if TASK_SIZE_USER64 > PGTABLE_RANGE | |
33 | #error TASK_SIZE_USER64 exceeds pagetable range | |
34 | #endif | |
35 | ||
36 | #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) | |
37 | #error TASK_SIZE_USER64 exceeds user VSID range | |
38 | #endif | |
39 | ||
40 | /* | |
41 | * Define the address range of the vmalloc VM area. | |
42 | */ | |
43 | #define VMALLOC_START ASM_CONST(0xD000000000000000) | |
44 | #define VMALLOC_SIZE ASM_CONST(0x80000000000) | |
45 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) | |
46 | ||
47 | /* | |
48 | * Define the address range of the imalloc VM area. | |
49 | */ | |
50 | #define PHBS_IO_BASE VMALLOC_END | |
51 | #define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ | |
52 | #define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE) | |
53 | ||
54 | /* | |
55 | * Region IDs | |
56 | */ | |
57 | #define REGION_SHIFT 60UL | |
58 | #define REGION_MASK (0xfUL << REGION_SHIFT) | |
59 | #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) | |
60 | ||
61 | #define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START)) | |
62 | #define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET)) | |
63 | #define USER_REGION_ID (0UL) | |
64 | ||
65 | /* | |
66 | * Common bits in a linux-style PTE. These match the bits in the | |
67 | * (hardware-defined) PowerPC PTE as closely as possible. Additional | |
68 | * bits may be defined in pgtable-*.h | |
69 | */ | |
70 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ | |
71 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ | |
72 | #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ | |
73 | #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ | |
74 | #define _PAGE_GUARDED 0x0008 | |
75 | #define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ | |
76 | #define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ | |
77 | #define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ | |
78 | #define _PAGE_DIRTY 0x0080 /* C: page changed */ | |
79 | #define _PAGE_ACCESSED 0x0100 /* R: page referenced */ | |
80 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ | |
81 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | |
82 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ | |
83 | ||
84 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) | |
85 | ||
86 | #define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY) | |
87 | ||
88 | /* __pgprot defined in asm-powerpc/page.h */ | |
89 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) | |
90 | ||
91 | #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) | |
92 | #define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) | |
93 | #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) | |
94 | #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
95 | #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) | |
96 | #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) | |
97 | #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE) | |
98 | #define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \ | |
99 | _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED) | |
100 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC) | |
101 | ||
102 | #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) | |
103 | #define HAVE_PAGE_AGP | |
104 | ||
105 | /* PTEIDX nibble */ | |
106 | #define _PTEIDX_SECONDARY 0x8 | |
107 | #define _PTEIDX_GROUP_IX 0x7 | |
108 | ||
109 | ||
110 | /* | |
111 | * POWER4 and newer have per page execute protection, older chips can only | |
112 | * do this on a segment (256MB) basis. | |
113 | * | |
114 | * Also, write permissions imply read permissions. | |
115 | * This is the closest we can get.. | |
116 | * | |
117 | * Note due to the way vm flags are laid out, the bits are XWR | |
118 | */ | |
119 | #define __P000 PAGE_NONE | |
120 | #define __P001 PAGE_READONLY | |
121 | #define __P010 PAGE_COPY | |
122 | #define __P011 PAGE_COPY | |
123 | #define __P100 PAGE_READONLY_X | |
124 | #define __P101 PAGE_READONLY_X | |
125 | #define __P110 PAGE_COPY_X | |
126 | #define __P111 PAGE_COPY_X | |
127 | ||
128 | #define __S000 PAGE_NONE | |
129 | #define __S001 PAGE_READONLY | |
130 | #define __S010 PAGE_SHARED | |
131 | #define __S011 PAGE_SHARED | |
132 | #define __S100 PAGE_READONLY_X | |
133 | #define __S101 PAGE_READONLY_X | |
134 | #define __S110 PAGE_SHARED_X | |
135 | #define __S111 PAGE_SHARED_X | |
136 | ||
137 | #ifndef __ASSEMBLY__ | |
138 | ||
139 | /* | |
140 | * ZERO_PAGE is a global shared page that is always zero: used | |
141 | * for zero-mapped memory areas etc.. | |
142 | */ | |
143 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | |
144 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | |
145 | #endif /* __ASSEMBLY__ */ | |
146 | ||
147 | #ifdef CONFIG_HUGETLB_PAGE | |
148 | ||
149 | #define HAVE_ARCH_UNMAPPED_AREA | |
150 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | |
151 | ||
152 | #endif | |
153 | ||
154 | #ifndef __ASSEMBLY__ | |
155 | ||
156 | /* | |
157 | * Conversion functions: convert a page and protection to a page entry, | |
158 | * and a page entry and page directory to the page they refer to. | |
159 | * | |
160 | * mk_pte takes a (struct page *) as input | |
161 | */ | |
162 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
163 | ||
164 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |
165 | { | |
166 | pte_t pte; | |
167 | ||
168 | ||
169 | pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); | |
170 | return pte; | |
171 | } | |
172 | ||
173 | #define pte_modify(_pte, newprot) \ | |
174 | (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) | |
175 | ||
176 | #define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) | |
177 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | |
178 | ||
179 | /* pte_clear moved to later in this file */ | |
180 | ||
181 | #define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) | |
182 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | |
183 | ||
184 | #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) | |
185 | #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) | |
186 | ||
187 | #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) | |
188 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
189 | #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ | |
190 | || (pmd_val(pmd) & PMD_BAD_BITS)) | |
191 | #define pmd_present(pmd) (pmd_val(pmd) != 0) | |
192 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) | |
193 | #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) | |
194 | #define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd)) | |
195 | ||
196 | #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) | |
197 | #define pud_none(pud) (!pud_val(pud)) | |
198 | #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ | |
199 | || (pud_val(pud) & PUD_BAD_BITS)) | |
200 | #define pud_present(pud) (pud_val(pud) != 0) | |
201 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) | |
202 | #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) | |
203 | #define pud_page(pud) virt_to_page(pud_page_vaddr(pud)) | |
204 | ||
205 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) | |
206 | ||
207 | /* | |
208 | * Find an entry in a page-table-directory. We combine the address region | |
209 | * (the high order N bits) and the pgd portion of the address. | |
210 | */ | |
211 | /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ | |
212 | #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) | |
213 | ||
214 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
215 | ||
216 | #define pmd_offset(pudp,addr) \ | |
217 | (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) | |
218 | ||
219 | #define pte_offset_kernel(dir,addr) \ | |
220 | (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) | |
221 | ||
222 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | |
223 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | |
224 | #define pte_unmap(pte) do { } while(0) | |
225 | #define pte_unmap_nested(pte) do { } while(0) | |
226 | ||
227 | /* to find an entry in a kernel page-table-directory */ | |
228 | /* This now only contains the vmalloc pages */ | |
229 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
230 | ||
231 | /* | |
232 | * The following only work if pte_present() is true. | |
233 | * Undefined behaviour if not.. | |
234 | */ | |
235 | static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;} | |
236 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;} | |
237 | static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} | |
238 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} | |
239 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} | |
240 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} | |
241 | ||
242 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | |
243 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | |
244 | ||
245 | static inline pte_t pte_rdprotect(pte_t pte) { | |
246 | pte_val(pte) &= ~_PAGE_USER; return pte; } | |
247 | static inline pte_t pte_exprotect(pte_t pte) { | |
248 | pte_val(pte) &= ~_PAGE_EXEC; return pte; } | |
249 | static inline pte_t pte_wrprotect(pte_t pte) { | |
250 | pte_val(pte) &= ~(_PAGE_RW); return pte; } | |
251 | static inline pte_t pte_mkclean(pte_t pte) { | |
252 | pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } | |
253 | static inline pte_t pte_mkold(pte_t pte) { | |
254 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | |
255 | static inline pte_t pte_mkread(pte_t pte) { | |
256 | pte_val(pte) |= _PAGE_USER; return pte; } | |
257 | static inline pte_t pte_mkexec(pte_t pte) { | |
258 | pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; } | |
259 | static inline pte_t pte_mkwrite(pte_t pte) { | |
260 | pte_val(pte) |= _PAGE_RW; return pte; } | |
261 | static inline pte_t pte_mkdirty(pte_t pte) { | |
262 | pte_val(pte) |= _PAGE_DIRTY; return pte; } | |
263 | static inline pte_t pte_mkyoung(pte_t pte) { | |
264 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | |
265 | static inline pte_t pte_mkhuge(pte_t pte) { | |
266 | return pte; } | |
267 | ||
268 | /* Atomic PTE updates */ | |
269 | static inline unsigned long pte_update(struct mm_struct *mm, | |
270 | unsigned long addr, | |
271 | pte_t *ptep, unsigned long clr, | |
272 | int huge) | |
273 | { | |
274 | unsigned long old, tmp; | |
275 | ||
276 | __asm__ __volatile__( | |
277 | "1: ldarx %0,0,%3 # pte_update\n\ | |
278 | andi. %1,%0,%6\n\ | |
279 | bne- 1b \n\ | |
280 | andc %1,%0,%4 \n\ | |
281 | stdcx. %1,0,%3 \n\ | |
282 | bne- 1b" | |
283 | : "=&r" (old), "=&r" (tmp), "=m" (*ptep) | |
284 | : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) | |
285 | : "cc" ); | |
286 | ||
287 | if (old & _PAGE_HASHPTE) | |
288 | hpte_need_flush(mm, addr, ptep, old, huge); | |
289 | return old; | |
290 | } | |
291 | ||
292 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | |
293 | unsigned long addr, pte_t *ptep) | |
294 | { | |
295 | unsigned long old; | |
296 | ||
297 | if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) | |
298 | return 0; | |
299 | old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); | |
300 | return (old & _PAGE_ACCESSED) != 0; | |
301 | } | |
302 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
303 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
304 | ({ \ | |
305 | int __r; \ | |
306 | __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ | |
307 | __r; \ | |
308 | }) | |
309 | ||
310 | /* | |
311 | * On RW/DIRTY bit transitions we can avoid flushing the hpte. For the | |
312 | * moment we always flush but we need to fix hpte_update and test if the | |
313 | * optimisation is worth it. | |
314 | */ | |
315 | static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, | |
316 | unsigned long addr, pte_t *ptep) | |
317 | { | |
318 | unsigned long old; | |
319 | ||
320 | if ((pte_val(*ptep) & _PAGE_DIRTY) == 0) | |
321 | return 0; | |
322 | old = pte_update(mm, addr, ptep, _PAGE_DIRTY, 0); | |
323 | return (old & _PAGE_DIRTY) != 0; | |
324 | } | |
325 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | |
326 | #define ptep_test_and_clear_dirty(__vma, __addr, __ptep) \ | |
327 | ({ \ | |
328 | int __r; \ | |
329 | __r = __ptep_test_and_clear_dirty((__vma)->vm_mm, __addr, __ptep); \ | |
330 | __r; \ | |
331 | }) | |
332 | ||
333 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
334 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
335 | pte_t *ptep) | |
336 | { | |
337 | unsigned long old; | |
338 | ||
339 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | |
340 | return; | |
341 | old = pte_update(mm, addr, ptep, _PAGE_RW, 0); | |
342 | } | |
343 | ||
344 | /* | |
345 | * We currently remove entries from the hashtable regardless of whether | |
346 | * the entry was young or dirty. The generic routines only flush if the | |
347 | * entry was young or dirty which is not good enough. | |
348 | * | |
349 | * We should be more intelligent about this but for the moment we override | |
350 | * these functions and force a tlb flush unconditionally | |
351 | */ | |
352 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
353 | #define ptep_clear_flush_young(__vma, __address, __ptep) \ | |
354 | ({ \ | |
355 | int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ | |
356 | __ptep); \ | |
357 | __young; \ | |
358 | }) | |
359 | ||
360 | #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH | |
361 | #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ | |
362 | ({ \ | |
363 | int __dirty = __ptep_test_and_clear_dirty((__vma)->vm_mm, __address, \ | |
364 | __ptep); \ | |
365 | __dirty; \ | |
366 | }) | |
367 | ||
368 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
369 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |
370 | unsigned long addr, pte_t *ptep) | |
371 | { | |
372 | unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); | |
373 | return __pte(old); | |
374 | } | |
375 | ||
376 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, | |
377 | pte_t * ptep) | |
378 | { | |
379 | pte_update(mm, addr, ptep, ~0UL, 0); | |
380 | } | |
381 | ||
382 | /* | |
383 | * set_pte stores a linux PTE into the linux page table. | |
384 | */ | |
385 | static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |
386 | pte_t *ptep, pte_t pte) | |
387 | { | |
388 | if (pte_present(*ptep)) | |
389 | pte_clear(mm, addr, ptep); | |
390 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); | |
391 | *ptep = pte; | |
392 | } | |
393 | ||
394 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this | |
395 | * function doesn't need to flush the hash entry | |
396 | */ | |
397 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | |
398 | static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) | |
399 | { | |
400 | unsigned long bits = pte_val(entry) & | |
401 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); | |
402 | unsigned long old, tmp; | |
403 | ||
404 | __asm__ __volatile__( | |
405 | "1: ldarx %0,0,%4\n\ | |
406 | andi. %1,%0,%6\n\ | |
407 | bne- 1b \n\ | |
408 | or %0,%3,%0\n\ | |
409 | stdcx. %0,0,%4\n\ | |
410 | bne- 1b" | |
411 | :"=&r" (old), "=&r" (tmp), "=m" (*ptep) | |
412 | :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) | |
413 | :"cc"); | |
414 | } | |
415 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | |
416 | do { \ | |
417 | __ptep_set_access_flags(__ptep, __entry, __dirty); \ | |
418 | flush_tlb_page_nohash(__vma, __address); \ | |
419 | } while(0) | |
420 | ||
421 | /* | |
422 | * Macro to mark a page protection value as "uncacheable". | |
423 | */ | |
424 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED)) | |
425 | ||
426 | struct file; | |
427 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
428 | unsigned long size, pgprot_t vma_prot); | |
429 | #define __HAVE_PHYS_MEM_ACCESS_PROT | |
430 | ||
431 | #define __HAVE_ARCH_PTE_SAME | |
432 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) | |
433 | ||
434 | #define pte_ERROR(e) \ | |
435 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | |
436 | #define pmd_ERROR(e) \ | |
437 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | |
438 | #define pgd_ERROR(e) \ | |
439 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | |
440 | ||
441 | extern pgd_t swapper_pg_dir[]; | |
442 | ||
443 | extern void paging_init(void); | |
444 | ||
445 | /* Encode and de-code a swap entry */ | |
446 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) | |
447 | #define __swp_offset(entry) ((entry).val >> 8) | |
448 | #define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) | |
449 | #define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) | |
450 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) | |
451 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) | |
452 | #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) | |
453 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) | |
454 | ||
455 | /* | |
456 | * kern_addr_valid is intended to indicate whether an address is a valid | |
457 | * kernel address. Most 32-bit archs define it as always true (like this) | |
458 | * but most 64-bit archs actually perform a test. What should we do here? | |
459 | * The only use is in fs/ncpfs/dir.c | |
460 | */ | |
461 | #define kern_addr_valid(addr) (1) | |
462 | ||
463 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | |
464 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
465 | ||
466 | void pgtable_cache_init(void); | |
467 | ||
468 | /* | |
469 | * find_linux_pte returns the address of a linux pte for a given | |
470 | * effective address and directory. If not found, it returns zero. | |
471 | */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) | |
472 | { | |
473 | pgd_t *pg; | |
474 | pud_t *pu; | |
475 | pmd_t *pm; | |
476 | pte_t *pt = NULL; | |
477 | ||
478 | pg = pgdir + pgd_index(ea); | |
479 | if (!pgd_none(*pg)) { | |
480 | pu = pud_offset(pg, ea); | |
481 | if (!pud_none(*pu)) { | |
482 | pm = pmd_offset(pu, ea); | |
483 | if (pmd_present(*pm)) | |
484 | pt = pte_offset_kernel(pm, ea); | |
485 | } | |
486 | } | |
487 | return pt; | |
488 | } | |
489 | ||
490 | #endif /* __ASSEMBLY__ */ | |
491 | ||
492 | #endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ |