Commit | Line | Data |
---|---|---|
17ed9e31 AK |
1 | #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H |
2 | #define _ASM_POWERPC_NOHASH_32_PGTABLE_H | |
f88df14b | 3 | |
9849a569 | 4 | #define __ARCH_USE_5LEVEL_HACK |
d1953c88 | 5 | #include <asm-generic/pgtable-nopmd.h> |
f88df14b DG |
6 | |
7 | #ifndef __ASSEMBLY__ | |
8 | #include <linux/sched.h> | |
9 | #include <linux/threads.h> | |
f88df14b | 10 | #include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */ |
f88df14b | 11 | |
f637a49e | 12 | extern unsigned long ioremap_bot; |
b98ac05d BH |
13 | |
14 | #ifdef CONFIG_44x | |
15 | extern int icache_44x_need_flush; | |
16 | #endif | |
17 | ||
f88df14b DG |
18 | #endif /* __ASSEMBLY__ */ |
19 | ||
9b081e10 CL |
20 | #define PTE_INDEX_SIZE PTE_SHIFT |
21 | #define PMD_INDEX_SIZE 0 | |
22 | #define PUD_INDEX_SIZE 0 | |
23 | #define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) | |
24 | ||
25 | #define PMD_CACHE_INDEX PMD_INDEX_SIZE | |
26 | ||
27 | #ifndef __ASSEMBLY__ | |
28 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | |
29 | #define PMD_TABLE_SIZE 0 | |
30 | #define PUD_TABLE_SIZE 0 | |
31 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | |
32 | #endif /* __ASSEMBLY__ */ | |
33 | ||
34 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | |
35 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | |
36 | ||
f88df14b DG |
37 | /* |
38 | * The normal case is that PTEs are 32-bits and we have a 1-page | |
39 | * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus | |
40 | * | |
41 | * For any >32-bit physical address platform, we can use the following | |
42 | * two level page table layout where the pgdir is 8KB and the MS 13 bits | |
43 | * are an index to the second level table. The combined pgdir/pmd first | |
44 | * level has 2048 entries and the second level has 512 64-bit PTE entries. | |
45 | * -Matt | |
46 | */ | |
f88df14b | 47 | /* PGDIR_SHIFT determines what a top-level page table entry can map */ |
9b081e10 | 48 | #define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) |
f88df14b DG |
49 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
50 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
51 | ||
9b081e10 CL |
52 | /* Bits to mask out from a PGD to get to the PUD page */ |
53 | #define PGD_MASKED_BITS 0 | |
f88df14b DG |
54 | |
55 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | |
d016bf7e | 56 | #define FIRST_USER_ADDRESS 0UL |
f88df14b | 57 | |
f88df14b | 58 | #define pte_ERROR(e) \ |
a7696b36 | 59 | pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ |
0aeafb0c | 60 | (unsigned long long)pte_val(e)) |
f88df14b | 61 | #define pgd_ERROR(e) \ |
a7696b36 | 62 | pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
f88df14b | 63 | |
f637a49e BH |
64 | /* |
65 | * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary | |
66 | * value (for now) on others, from where we can start layout kernel | |
67 | * virtual space that goes below PKMAP and FIXMAP | |
68 | */ | |
69 | #ifdef CONFIG_HIGHMEM | |
70 | #define KVIRT_TOP PKMAP_BASE | |
71 | #else | |
72 | #define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */ | |
73 | #endif | |
74 | ||
75 | /* | |
76 | * ioremap_bot starts at that address. Early ioremaps move down from there, | |
77 | * until mem_init() at which point this becomes the top of the vmalloc | |
78 | * and ioremap space | |
79 | */ | |
8b31e49d BH |
80 | #ifdef CONFIG_NOT_COHERENT_CACHE |
81 | #define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK) | |
82 | #else | |
f637a49e | 83 | #define IOREMAP_TOP KVIRT_TOP |
8b31e49d | 84 | #endif |
f637a49e | 85 | |
f88df14b DG |
86 | /* |
87 | * Just any arbitrary offset to the start of the vmalloc VM area: the | |
f637a49e | 88 | * current 16MB value just means that there will be a 64MB "hole" after the |
f88df14b DG |
89 | * physical memory until the kernel virtual memory starts. That means that |
90 | * any out-of-bounds memory accesses will hopefully be caught. | |
91 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
92 | * area for the same reason. ;) | |
93 | * | |
94 | * We no longer map larger than phys RAM with the BATs so we don't have | |
95 | * to worry about the VMALLOC_OFFSET causing problems. We do have to worry | |
96 | * about clashes between our early calls to ioremap() that start growing down | |
e974cd4b | 97 | * from IOREMAP_TOP being run into the VM area allocations (growing upwards |
f88df14b DG |
98 | * from VMALLOC_START). For this reason we have ioremap_bot to check when |
99 | * we actually run into our mappings setup in the early boot with the VM | |
100 | * system. This really does become a problem for machines with good amounts | |
101 | * of RAM. -- Cort | |
102 | */ | |
103 | #define VMALLOC_OFFSET (0x1000000) /* 16M */ | |
104 | #ifdef PPC_PIN_SIZE | |
105 | #define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | |
106 | #else | |
107 | #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) | |
108 | #endif | |
109 | #define VMALLOC_END ioremap_bot | |
110 | ||
111 | /* | |
112 | * Bits in a linux-style PTE. These match the bits in the | |
113 | * (hardware-defined) PowerPC PTE as closely as possible. | |
114 | */ | |
115 | ||
116 | #if defined(CONFIG_40x) | |
17ed9e31 | 117 | #include <asm/nohash/32/pte-40x.h> |
f88df14b | 118 | #elif defined(CONFIG_44x) |
17ed9e31 | 119 | #include <asm/nohash/32/pte-44x.h> |
76acc2c1 | 120 | #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT) |
17ed9e31 | 121 | #include <asm/nohash/pte-book3e.h> |
f88df14b | 122 | #elif defined(CONFIG_FSL_BOOKE) |
17ed9e31 | 123 | #include <asm/nohash/32/pte-fsl-booke.h> |
f88df14b | 124 | #elif defined(CONFIG_8xx) |
17ed9e31 | 125 | #include <asm/nohash/32/pte-8xx.h> |
4ee7084e | 126 | #endif |
f88df14b | 127 | |
71087002 BH |
128 | /* And here we include common definitions */ |
129 | #include <asm/pte-common.h> | |
f88df14b DG |
130 | |
131 | #ifndef __ASSEMBLY__ | |
f88df14b | 132 | |
9bf2b5cd KG |
133 | #define pte_clear(mm, addr, ptep) \ |
134 | do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0) | |
f88df14b DG |
135 | |
136 | #define pmd_none(pmd) (!pmd_val(pmd)) | |
137 | #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) | |
138 | #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) | |
f281b5d5 AK |
139 | static inline void pmd_clear(pmd_t *pmdp) |
140 | { | |
141 | *pmdp = __pmd(0); | |
142 | } | |
143 | ||
144 | ||
f88df14b | 145 | |
f88df14b DG |
146 | /* |
147 | * When flushing the tlb entry for a page, we also need to flush the hash | |
148 | * table entry. flush_hash_pages is assembler (for speed) in hashtable.S. | |
149 | */ | |
150 | extern int flush_hash_pages(unsigned context, unsigned long va, | |
151 | unsigned long pmdval, int count); | |
152 | ||
153 | /* Add an HPTE to the hash table */ | |
154 | extern void add_hash_page(unsigned context, unsigned long va, | |
155 | unsigned long pmdval); | |
156 | ||
4ee7084e BB |
157 | /* Flush an entry from the TLB/hash table */ |
158 | extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, | |
159 | unsigned long address); | |
160 | ||
f88df14b | 161 | /* |
c605782b BH |
162 | * PTE updates. This function is called whenever an existing |
163 | * valid PTE is updated. This does -not- include set_pte_at() | |
164 | * which nowadays only sets a new PTE. | |
165 | * | |
166 | * Depending on the type of MMU, we may need to use atomic updates | |
167 | * and the PTE may be either 32 or 64 bit wide. In the later case, | |
168 | * when using atomic updates, only the low part of the PTE is | |
169 | * accessed atomically. | |
f88df14b | 170 | * |
c605782b BH |
171 | * In addition, on 44x, we also maintain a global flag indicating |
172 | * that an executable user mapping was modified, which is needed | |
173 | * to properly flush the virtually tagged instruction cache of | |
174 | * those implementations. | |
f88df14b DG |
175 | */ |
176 | #ifndef CONFIG_PTE_64BIT | |
1bc54c03 BH |
177 | static inline unsigned long pte_update(pte_t *p, |
178 | unsigned long clr, | |
f88df14b DG |
179 | unsigned long set) |
180 | { | |
1bc54c03 | 181 | #ifdef PTE_ATOMIC_UPDATES |
f88df14b DG |
182 | unsigned long old, tmp; |
183 | ||
184 | __asm__ __volatile__("\ | |
185 | 1: lwarx %0,0,%3\n\ | |
186 | andc %1,%0,%4\n\ | |
187 | or %1,%1,%5\n" | |
188 | PPC405_ERR77(0,%3) | |
189 | " stwcx. %1,0,%3\n\ | |
190 | bne- 1b" | |
191 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
192 | : "r" (p), "r" (clr), "r" (set), "m" (*p) | |
193 | : "cc" ); | |
1bc54c03 BH |
194 | #else /* PTE_ATOMIC_UPDATES */ |
195 | unsigned long old = pte_val(*p); | |
196 | *p = __pte((old & ~clr) | set); | |
197 | #endif /* !PTE_ATOMIC_UPDATES */ | |
198 | ||
b98ac05d | 199 | #ifdef CONFIG_44x |
ea3cc330 | 200 | if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) |
b98ac05d BH |
201 | icache_44x_need_flush = 1; |
202 | #endif | |
f88df14b DG |
203 | return old; |
204 | } | |
1bc54c03 | 205 | #else /* CONFIG_PTE_64BIT */ |
1bc54c03 BH |
206 | static inline unsigned long long pte_update(pte_t *p, |
207 | unsigned long clr, | |
208 | unsigned long set) | |
f88df14b | 209 | { |
1bc54c03 | 210 | #ifdef PTE_ATOMIC_UPDATES |
f88df14b DG |
211 | unsigned long long old; |
212 | unsigned long tmp; | |
213 | ||
214 | __asm__ __volatile__("\ | |
215 | 1: lwarx %L0,0,%4\n\ | |
216 | lwzx %0,0,%3\n\ | |
217 | andc %1,%L0,%5\n\ | |
218 | or %1,%1,%6\n" | |
219 | PPC405_ERR77(0,%3) | |
220 | " stwcx. %1,0,%4\n\ | |
221 | bne- 1b" | |
222 | : "=&r" (old), "=&r" (tmp), "=m" (*p) | |
223 | : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p) | |
224 | : "cc" ); | |
1bc54c03 BH |
225 | #else /* PTE_ATOMIC_UPDATES */ |
226 | unsigned long long old = pte_val(*p); | |
585583d9 | 227 | *p = __pte((old & ~(unsigned long long)clr) | set); |
1bc54c03 BH |
228 | #endif /* !PTE_ATOMIC_UPDATES */ |
229 | ||
b98ac05d | 230 | #ifdef CONFIG_44x |
ea3cc330 | 231 | if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) |
b98ac05d BH |
232 | icache_44x_need_flush = 1; |
233 | #endif | |
f88df14b DG |
234 | return old; |
235 | } | |
1bc54c03 | 236 | #endif /* CONFIG_PTE_64BIT */ |
f88df14b | 237 | |
f88df14b | 238 | /* |
bf2737f7 BB |
239 | * 2.6 calls this without flushing the TLB entry; this is wrong |
240 | * for our hash-based implementation, we fix that up here. | |
f88df14b DG |
241 | */ |
242 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
243 | static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) | |
244 | { | |
245 | unsigned long old; | |
246 | old = pte_update(ptep, _PAGE_ACCESSED, 0); | |
247 | #if _PAGE_HASHPTE != 0 | |
248 | if (old & _PAGE_HASHPTE) { | |
249 | unsigned long ptephys = __pa(ptep) & PAGE_MASK; | |
250 | flush_hash_pages(context, addr, ptephys, 1); | |
251 | } | |
252 | #endif | |
253 | return (old & _PAGE_ACCESSED) != 0; | |
254 | } | |
255 | #define ptep_test_and_clear_young(__vma, __addr, __ptep) \ | |
256 | __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep) | |
257 | ||
f88df14b DG |
258 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
259 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
260 | pte_t *ptep) | |
261 | { | |
262 | return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0)); | |
263 | } | |
264 | ||
265 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
266 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |
267 | pte_t *ptep) | |
268 | { | |
a7b9f671 | 269 | pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO); |
f88df14b | 270 | } |
016b33c4 AW |
271 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
272 | unsigned long addr, pte_t *ptep) | |
273 | { | |
274 | ptep_set_wrprotect(mm, addr, ptep); | |
275 | } | |
276 | ||
f88df14b | 277 | |
c6d1a767 | 278 | static inline void __ptep_set_access_flags(struct mm_struct *mm, |
b3603e17 AK |
279 | pte_t *ptep, pte_t entry, |
280 | unsigned long address) | |
f88df14b | 281 | { |
a7b9f671 | 282 | unsigned long set = pte_val(entry) & |
ea3cc330 | 283 | (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); |
a7b9f671 LC |
284 | unsigned long clr = ~pte_val(entry) & _PAGE_RO; |
285 | ||
286 | pte_update(ptep, clr, set); | |
f88df14b DG |
287 | } |
288 | ||
f88df14b DG |
289 | #define __HAVE_ARCH_PTE_SAME |
290 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) | |
291 | ||
292 | /* | |
293 | * Note that on Book E processors, the pmd contains the kernel virtual | |
294 | * (lowmem) address of the pte page. The physical address is less useful | |
295 | * because everything runs with translation enabled (even the TLB miss | |
296 | * handler). On everything else the pmd contains the physical address | |
297 | * of the pte page. -- paulus | |
298 | */ | |
299 | #ifndef CONFIG_BOOKE | |
300 | #define pmd_page_vaddr(pmd) \ | |
301 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | |
302 | #define pmd_page(pmd) \ | |
43b5fefc | 303 | pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) |
f88df14b DG |
304 | #else |
305 | #define pmd_page_vaddr(pmd) \ | |
306 | ((unsigned long) (pmd_val(pmd) & PAGE_MASK)) | |
307 | #define pmd_page(pmd) \ | |
af892e0f | 308 | pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT)) |
f88df14b DG |
309 | #endif |
310 | ||
311 | /* to find an entry in a kernel page-table-directory */ | |
312 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
313 | ||
314 | /* to find an entry in a page-table-directory */ | |
315 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | |
316 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | |
317 | ||
f88df14b DG |
318 | /* Find an entry in the third-level page table.. */ |
319 | #define pte_index(address) \ | |
320 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
321 | #define pte_offset_kernel(dir, addr) \ | |
be00ed72 CL |
322 | (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \ |
323 | pte_index(addr)) | |
f88df14b | 324 | #define pte_offset_map(dir, addr) \ |
ece0e2b6 PZ |
325 | ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) |
326 | #define pte_unmap(pte) kunmap_atomic(pte) | |
f88df14b | 327 | |
f88df14b DG |
328 | /* |
329 | * Encode and decode a swap entry. | |
330 | * Note that the bits we use in a PTE for representing a swap entry | |
780fc564 KS |
331 | * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). |
332 | * -- paulus | |
f88df14b DG |
333 | */ |
334 | #define __swp_type(entry) ((entry).val & 0x1f) | |
335 | #define __swp_offset(entry) ((entry).val >> 5) | |
336 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) | |
337 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) | |
338 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) | |
339 | ||
f88df14b DG |
340 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, |
341 | pmd_t **pmdp); | |
342 | ||
343 | #endif /* !__ASSEMBLY__ */ | |
344 | ||
17ed9e31 | 345 | #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ |