powerpc32: Remove useless/wrong MMU:setio progress message
[linux-block.git] / arch / powerpc / include / asm / nohash / 32 / pgtable.h
CommitLineData
17ed9e31
AK
1#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
2#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
f88df14b 3
d1953c88 4#include <asm-generic/pgtable-nopmd.h>
f88df14b
DG
5
6#ifndef __ASSEMBLY__
7#include <linux/sched.h>
8#include <linux/threads.h>
f88df14b 9#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
f88df14b 10
f637a49e 11extern unsigned long ioremap_bot;
b98ac05d
BH
12
13#ifdef CONFIG_44x
14extern int icache_44x_need_flush;
15#endif
16
f88df14b
DG
17#endif /* __ASSEMBLY__ */
18
f88df14b
DG
19/*
20 * The normal case is that PTEs are 32-bits and we have a 1-page
21 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
22 *
23 * For any >32-bit physical address platform, we can use the following
24 * two level page table layout where the pgdir is 8KB and the MS 13 bits
25 * are an index to the second level table. The combined pgdir/pmd first
26 * level has 2048 entries and the second level has 512 64-bit PTE entries.
27 * -Matt
28 */
f88df14b 29/* PGDIR_SHIFT determines what a top-level page table entry can map */
d1953c88 30#define PGDIR_SHIFT (PAGE_SHIFT + PTE_SHIFT)
f88df14b
DG
31#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
32#define PGDIR_MASK (~(PGDIR_SIZE-1))
33
34/*
35 * entries per page directory level: our page-table tree is two-level, so
36 * we don't really have any PMD directory.
37 */
bee86f14
KG
38#ifndef __ASSEMBLY__
39#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
40#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
41#endif /* __ASSEMBLY__ */
42
f88df14b
DG
43#define PTRS_PER_PTE (1 << PTE_SHIFT)
44#define PTRS_PER_PMD 1
45#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
46
47#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
d016bf7e 48#define FIRST_USER_ADDRESS 0UL
f88df14b 49
f88df14b 50#define pte_ERROR(e) \
a7696b36 51 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
0aeafb0c 52 (unsigned long long)pte_val(e))
f88df14b 53#define pgd_ERROR(e) \
a7696b36 54 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
f88df14b 55
f637a49e
BH
56/*
57 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
58 * value (for now) on others, from where we can start layout kernel
59 * virtual space that goes below PKMAP and FIXMAP
60 */
61#ifdef CONFIG_HIGHMEM
62#define KVIRT_TOP PKMAP_BASE
63#else
64#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
65#endif
66
67/*
68 * ioremap_bot starts at that address. Early ioremaps move down from there,
69 * until mem_init() at which point this becomes the top of the vmalloc
70 * and ioremap space
71 */
8b31e49d
BH
72#ifdef CONFIG_NOT_COHERENT_CACHE
73#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
74#else
f637a49e 75#define IOREMAP_TOP KVIRT_TOP
8b31e49d 76#endif
f637a49e 77
f88df14b
DG
78/*
79 * Just any arbitrary offset to the start of the vmalloc VM area: the
f637a49e 80 * current 16MB value just means that there will be a 64MB "hole" after the
f88df14b
DG
81 * physical memory until the kernel virtual memory starts. That means that
82 * any out-of-bounds memory accesses will hopefully be caught.
83 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
84 * area for the same reason. ;)
85 *
86 * We no longer map larger than phys RAM with the BATs so we don't have
87 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
88 * about clashes between our early calls to ioremap() that start growing down
89 * from ioremap_base being run into the VM area allocations (growing upwards
90 * from VMALLOC_START). For this reason we have ioremap_bot to check when
91 * we actually run into our mappings setup in the early boot with the VM
92 * system. This really does become a problem for machines with good amounts
93 * of RAM. -- Cort
94 */
95#define VMALLOC_OFFSET (0x1000000) /* 16M */
96#ifdef PPC_PIN_SIZE
97#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
98#else
99#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
100#endif
101#define VMALLOC_END ioremap_bot
102
103/*
104 * Bits in a linux-style PTE. These match the bits in the
105 * (hardware-defined) PowerPC PTE as closely as possible.
106 */
107
108#if defined(CONFIG_40x)
17ed9e31 109#include <asm/nohash/32/pte-40x.h>
f88df14b 110#elif defined(CONFIG_44x)
17ed9e31 111#include <asm/nohash/32/pte-44x.h>
76acc2c1 112#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
17ed9e31 113#include <asm/nohash/pte-book3e.h>
f88df14b 114#elif defined(CONFIG_FSL_BOOKE)
17ed9e31 115#include <asm/nohash/32/pte-fsl-booke.h>
f88df14b 116#elif defined(CONFIG_8xx)
17ed9e31 117#include <asm/nohash/32/pte-8xx.h>
4ee7084e 118#endif
f88df14b 119
71087002
BH
120/* And here we include common definitions */
121#include <asm/pte-common.h>
f88df14b
DG
122
123#ifndef __ASSEMBLY__
f88df14b 124
9bf2b5cd
KG
125#define pte_clear(mm, addr, ptep) \
126 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
f88df14b
DG
127
128#define pmd_none(pmd) (!pmd_val(pmd))
129#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
130#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
f281b5d5
AK
131static inline void pmd_clear(pmd_t *pmdp)
132{
133 *pmdp = __pmd(0);
134}
135
136
f88df14b 137
f88df14b
DG
138/*
139 * When flushing the tlb entry for a page, we also need to flush the hash
140 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
141 */
142extern int flush_hash_pages(unsigned context, unsigned long va,
143 unsigned long pmdval, int count);
144
145/* Add an HPTE to the hash table */
146extern void add_hash_page(unsigned context, unsigned long va,
147 unsigned long pmdval);
148
4ee7084e
BB
149/* Flush an entry from the TLB/hash table */
150extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
151 unsigned long address);
152
f88df14b 153/*
c605782b
BH
154 * PTE updates. This function is called whenever an existing
155 * valid PTE is updated. This does -not- include set_pte_at()
156 * which nowadays only sets a new PTE.
157 *
158 * Depending on the type of MMU, we may need to use atomic updates
159 * and the PTE may be either 32 or 64 bit wide. In the later case,
160 * when using atomic updates, only the low part of the PTE is
161 * accessed atomically.
f88df14b 162 *
c605782b
BH
163 * In addition, on 44x, we also maintain a global flag indicating
164 * that an executable user mapping was modified, which is needed
165 * to properly flush the virtually tagged instruction cache of
166 * those implementations.
f88df14b
DG
167 */
168#ifndef CONFIG_PTE_64BIT
1bc54c03
BH
169static inline unsigned long pte_update(pte_t *p,
170 unsigned long clr,
f88df14b
DG
171 unsigned long set)
172{
1bc54c03 173#ifdef PTE_ATOMIC_UPDATES
f88df14b
DG
174 unsigned long old, tmp;
175
176 __asm__ __volatile__("\
1771: lwarx %0,0,%3\n\
178 andc %1,%0,%4\n\
179 or %1,%1,%5\n"
180 PPC405_ERR77(0,%3)
181" stwcx. %1,0,%3\n\
182 bne- 1b"
183 : "=&r" (old), "=&r" (tmp), "=m" (*p)
184 : "r" (p), "r" (clr), "r" (set), "m" (*p)
185 : "cc" );
1bc54c03
BH
186#else /* PTE_ATOMIC_UPDATES */
187 unsigned long old = pte_val(*p);
188 *p = __pte((old & ~clr) | set);
189#endif /* !PTE_ATOMIC_UPDATES */
190
b98ac05d 191#ifdef CONFIG_44x
ea3cc330 192 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
b98ac05d
BH
193 icache_44x_need_flush = 1;
194#endif
f88df14b
DG
195 return old;
196}
1bc54c03 197#else /* CONFIG_PTE_64BIT */
1bc54c03
BH
198static inline unsigned long long pte_update(pte_t *p,
199 unsigned long clr,
200 unsigned long set)
f88df14b 201{
1bc54c03 202#ifdef PTE_ATOMIC_UPDATES
f88df14b
DG
203 unsigned long long old;
204 unsigned long tmp;
205
206 __asm__ __volatile__("\
2071: lwarx %L0,0,%4\n\
208 lwzx %0,0,%3\n\
209 andc %1,%L0,%5\n\
210 or %1,%1,%6\n"
211 PPC405_ERR77(0,%3)
212" stwcx. %1,0,%4\n\
213 bne- 1b"
214 : "=&r" (old), "=&r" (tmp), "=m" (*p)
215 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
216 : "cc" );
1bc54c03
BH
217#else /* PTE_ATOMIC_UPDATES */
218 unsigned long long old = pte_val(*p);
585583d9 219 *p = __pte((old & ~(unsigned long long)clr) | set);
1bc54c03
BH
220#endif /* !PTE_ATOMIC_UPDATES */
221
b98ac05d 222#ifdef CONFIG_44x
ea3cc330 223 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
b98ac05d
BH
224 icache_44x_need_flush = 1;
225#endif
f88df14b
DG
226 return old;
227}
1bc54c03 228#endif /* CONFIG_PTE_64BIT */
f88df14b 229
f88df14b 230/*
bf2737f7
BB
231 * 2.6 calls this without flushing the TLB entry; this is wrong
232 * for our hash-based implementation, we fix that up here.
f88df14b
DG
233 */
234#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
235static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
236{
237 unsigned long old;
238 old = pte_update(ptep, _PAGE_ACCESSED, 0);
239#if _PAGE_HASHPTE != 0
240 if (old & _PAGE_HASHPTE) {
241 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
242 flush_hash_pages(context, addr, ptephys, 1);
243 }
244#endif
245 return (old & _PAGE_ACCESSED) != 0;
246}
247#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
248 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
249
f88df14b
DG
250#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
251static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
252 pte_t *ptep)
253{
254 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
255}
256
257#define __HAVE_ARCH_PTEP_SET_WRPROTECT
258static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
259 pte_t *ptep)
260{
a7b9f671 261 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
f88df14b 262}
016b33c4
AW
263static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
264 unsigned long addr, pte_t *ptep)
265{
266 ptep_set_wrprotect(mm, addr, ptep);
267}
268
f88df14b 269
8d30c14c 270static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
f88df14b 271{
a7b9f671 272 unsigned long set = pte_val(entry) &
ea3cc330 273 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
a7b9f671
LC
274 unsigned long clr = ~pte_val(entry) & _PAGE_RO;
275
276 pte_update(ptep, clr, set);
f88df14b
DG
277}
278
f88df14b
DG
279#define __HAVE_ARCH_PTE_SAME
280#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
281
282/*
283 * Note that on Book E processors, the pmd contains the kernel virtual
284 * (lowmem) address of the pte page. The physical address is less useful
285 * because everything runs with translation enabled (even the TLB miss
286 * handler). On everything else the pmd contains the physical address
287 * of the pte page. -- paulus
288 */
289#ifndef CONFIG_BOOKE
290#define pmd_page_vaddr(pmd) \
291 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
292#define pmd_page(pmd) \
43b5fefc 293 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
f88df14b
DG
294#else
295#define pmd_page_vaddr(pmd) \
296 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
297#define pmd_page(pmd) \
af892e0f 298 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
f88df14b
DG
299#endif
300
301/* to find an entry in a kernel page-table-directory */
302#define pgd_offset_k(address) pgd_offset(&init_mm, address)
303
304/* to find an entry in a page-table-directory */
305#define pgd_index(address) ((address) >> PGDIR_SHIFT)
306#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
307
f88df14b
DG
308/* Find an entry in the third-level page table.. */
309#define pte_index(address) \
310 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
311#define pte_offset_kernel(dir, addr) \
be00ed72
CL
312 (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
313 pte_index(addr))
f88df14b 314#define pte_offset_map(dir, addr) \
ece0e2b6
PZ
315 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
316#define pte_unmap(pte) kunmap_atomic(pte)
f88df14b 317
f88df14b
DG
318/*
319 * Encode and decode a swap entry.
320 * Note that the bits we use in a PTE for representing a swap entry
780fc564
KS
321 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
322 * -- paulus
f88df14b
DG
323 */
324#define __swp_type(entry) ((entry).val & 0x1f)
325#define __swp_offset(entry) ((entry).val >> 5)
326#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
327#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
328#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
329
ce67f5d0
LC
330#ifndef CONFIG_PPC_4K_PAGES
331void pgtable_cache_init(void);
332#else
f88df14b
DG
333/*
334 * No page table caches to initialise
335 */
336#define pgtable_cache_init() do { } while (0)
ce67f5d0 337#endif
f88df14b
DG
338
339extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
340 pmd_t **pmdp);
341
342#endif /* !__ASSEMBLY__ */
343
17ed9e31 344#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */