Merge remote-tracking branch 'asoc/topic/pcm5102a' into asoc-next
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 32 / pgtable.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
3dfcb315
AK
2#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4
9849a569 5#define __ARCH_USE_5LEVEL_HACK
3dfcb315
AK
6#include <asm-generic/pgtable-nopmd.h>
7
cbbb8683 8#include <asm/book3s/32/hash.h>
3dfcb315 9
cbbb8683
AK
10/* And here we include common definitions */
11#include <asm/pte-common.h>
3dfcb315 12
9b081e10
CL
13#define PTE_INDEX_SIZE PTE_SHIFT
14#define PMD_INDEX_SIZE 0
15#define PUD_INDEX_SIZE 0
16#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
17
18#define PMD_CACHE_INDEX PMD_INDEX_SIZE
fae22116 19#define PUD_CACHE_INDEX PUD_INDEX_SIZE
9b081e10
CL
20
21#ifndef __ASSEMBLY__
22#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
23#define PMD_TABLE_SIZE 0
24#define PUD_TABLE_SIZE 0
25#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
26#endif /* __ASSEMBLY__ */
27
28#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
29#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
30
3dfcb315
AK
31/*
32 * The normal case is that PTEs are 32-bits and we have a 1-page
33 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
34 *
35 * For any >32-bit physical address platform, we can use the following
36 * two level page table layout where the pgdir is 8KB and the MS 13 bits
37 * are an index to the second level table. The combined pgdir/pmd first
38 * level has 2048 entries and the second level has 512 64-bit PTE entries.
39 * -Matt
40 */
41/* PGDIR_SHIFT determines what a top-level page table entry can map */
9b081e10 42#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
3dfcb315
AK
43#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
44#define PGDIR_MASK (~(PGDIR_SIZE-1))
45
3dfcb315 46#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
3dfcb315
AK
47/*
48 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
49 * value (for now) on others, from where we can start layout kernel
50 * virtual space that goes below PKMAP and FIXMAP
51 */
52#ifdef CONFIG_HIGHMEM
53#define KVIRT_TOP PKMAP_BASE
54#else
55#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
56#endif
57
58/*
59 * ioremap_bot starts at that address. Early ioremaps move down from there,
60 * until mem_init() at which point this becomes the top of the vmalloc
61 * and ioremap space
62 */
63#ifdef CONFIG_NOT_COHERENT_CACHE
64#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
65#else
66#define IOREMAP_TOP KVIRT_TOP
67#endif
68
69/*
70 * Just any arbitrary offset to the start of the vmalloc VM area: the
71 * current 16MB value just means that there will be a 64MB "hole" after the
72 * physical memory until the kernel virtual memory starts. That means that
73 * any out-of-bounds memory accesses will hopefully be caught.
74 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
75 * area for the same reason. ;)
76 *
77 * We no longer map larger than phys RAM with the BATs so we don't have
78 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
79 * about clashes between our early calls to ioremap() that start growing down
80 * from ioremap_base being run into the VM area allocations (growing upwards
81 * from VMALLOC_START). For this reason we have ioremap_bot to check when
82 * we actually run into our mappings setup in the early boot with the VM
83 * system. This really does become a problem for machines with good amounts
84 * of RAM. -- Cort
85 */
86#define VMALLOC_OFFSET (0x1000000) /* 16M */
87#ifdef PPC_PIN_SIZE
88#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
89#else
90#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
91#endif
92#define VMALLOC_END ioremap_bot
93
cbbb8683
AK
94#ifndef __ASSEMBLY__
95#include <linux/sched.h>
96#include <linux/threads.h>
97#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
98
99extern unsigned long ioremap_bot;
100
9b081e10
CL
101/* Bits to mask out from a PGD to get to the PUD page */
102#define PGD_MASKED_BITS 0
cbbb8683
AK
103
104#define pte_ERROR(e) \
105 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
106 (unsigned long long)pte_val(e))
107#define pgd_ERROR(e) \
108 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
3dfcb315
AK
109/*
110 * Bits in a linux-style PTE. These match the bits in the
111 * (hardware-defined) PowerPC PTE as closely as possible.
112 */
113
3dfcb315
AK
114#define pte_clear(mm, addr, ptep) \
115 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
116
117#define pmd_none(pmd) (!pmd_val(pmd))
118#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
119#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
f281b5d5
AK
120static inline void pmd_clear(pmd_t *pmdp)
121{
122 *pmdp = __pmd(0);
123}
124
3dfcb315
AK
125
126/*
127 * When flushing the tlb entry for a page, we also need to flush the hash
128 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
129 */
130extern int flush_hash_pages(unsigned context, unsigned long va,
131 unsigned long pmdval, int count);
132
133/* Add an HPTE to the hash table */
134extern void add_hash_page(unsigned context, unsigned long va,
135 unsigned long pmdval);
136
137/* Flush an entry from the TLB/hash table */
138extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
139 unsigned long address);
140
141/*
142 * PTE updates. This function is called whenever an existing
143 * valid PTE is updated. This does -not- include set_pte_at()
144 * which nowadays only sets a new PTE.
145 *
146 * Depending on the type of MMU, we may need to use atomic updates
147 * and the PTE may be either 32 or 64 bit wide. In the later case,
148 * when using atomic updates, only the low part of the PTE is
149 * accessed atomically.
150 *
151 * In addition, on 44x, we also maintain a global flag indicating
152 * that an executable user mapping was modified, which is needed
153 * to properly flush the virtually tagged instruction cache of
154 * those implementations.
155 */
156#ifndef CONFIG_PTE_64BIT
157static inline unsigned long pte_update(pte_t *p,
158 unsigned long clr,
159 unsigned long set)
160{
3dfcb315
AK
161 unsigned long old, tmp;
162
163 __asm__ __volatile__("\
1641: lwarx %0,0,%3\n\
165 andc %1,%0,%4\n\
166 or %1,%1,%5\n"
167 PPC405_ERR77(0,%3)
168" stwcx. %1,0,%3\n\
169 bne- 1b"
170 : "=&r" (old), "=&r" (tmp), "=m" (*p)
171 : "r" (p), "r" (clr), "r" (set), "m" (*p)
172 : "cc" );
cbbb8683 173
3dfcb315
AK
174 return old;
175}
176#else /* CONFIG_PTE_64BIT */
177static inline unsigned long long pte_update(pte_t *p,
178 unsigned long clr,
179 unsigned long set)
180{
3dfcb315
AK
181 unsigned long long old;
182 unsigned long tmp;
183
184 __asm__ __volatile__("\
1851: lwarx %L0,0,%4\n\
186 lwzx %0,0,%3\n\
187 andc %1,%L0,%5\n\
188 or %1,%1,%6\n"
189 PPC405_ERR77(0,%3)
190" stwcx. %1,0,%4\n\
191 bne- 1b"
192 : "=&r" (old), "=&r" (tmp), "=m" (*p)
193 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
194 : "cc" );
cbbb8683 195
3dfcb315
AK
196 return old;
197}
198#endif /* CONFIG_PTE_64BIT */
199
200/*
201 * 2.6 calls this without flushing the TLB entry; this is wrong
202 * for our hash-based implementation, we fix that up here.
203 */
204#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
205static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
206{
207 unsigned long old;
208 old = pte_update(ptep, _PAGE_ACCESSED, 0);
3dfcb315
AK
209 if (old & _PAGE_HASHPTE) {
210 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
211 flush_hash_pages(context, addr, ptephys, 1);
212 }
3dfcb315
AK
213 return (old & _PAGE_ACCESSED) != 0;
214}
215#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
216 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
217
218#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
219static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
220 pte_t *ptep)
221{
222 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
223}
224
225#define __HAVE_ARCH_PTEP_SET_WRPROTECT
226static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
227 pte_t *ptep)
228{
229 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
230}
231static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
232 unsigned long addr, pte_t *ptep)
233{
234 ptep_set_wrprotect(mm, addr, ptep);
235}
236
237
c6d1a767 238static inline void __ptep_set_access_flags(struct mm_struct *mm,
b3603e17
AK
239 pte_t *ptep, pte_t entry,
240 unsigned long address)
3dfcb315
AK
241{
242 unsigned long set = pte_val(entry) &
243 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
244 unsigned long clr = ~pte_val(entry) & _PAGE_RO;
245
246 pte_update(ptep, clr, set);
247}
248
249#define __HAVE_ARCH_PTE_SAME
250#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
251
252/*
253 * Note that on Book E processors, the pmd contains the kernel virtual
254 * (lowmem) address of the pte page. The physical address is less useful
255 * because everything runs with translation enabled (even the TLB miss
256 * handler). On everything else the pmd contains the physical address
257 * of the pte page. -- paulus
258 */
259#ifndef CONFIG_BOOKE
260#define pmd_page_vaddr(pmd) \
261 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
262#define pmd_page(pmd) \
263 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
264#else
265#define pmd_page_vaddr(pmd) \
266 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
267#define pmd_page(pmd) \
268 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
269#endif
270
271/* to find an entry in a kernel page-table-directory */
272#define pgd_offset_k(address) pgd_offset(&init_mm, address)
273
274/* to find an entry in a page-table-directory */
275#define pgd_index(address) ((address) >> PGDIR_SHIFT)
276#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
277
278/* Find an entry in the third-level page table.. */
279#define pte_index(address) \
280 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
281#define pte_offset_kernel(dir, addr) \
282 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
283#define pte_offset_map(dir, addr) \
284 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
285#define pte_unmap(pte) kunmap_atomic(pte)
286
287/*
288 * Encode and decode a swap entry.
289 * Note that the bits we use in a PTE for representing a swap entry
290 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
291 * -- paulus
292 */
293#define __swp_type(entry) ((entry).val & 0x1f)
294#define __swp_offset(entry) ((entry).val >> 5)
295#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
296#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
297#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
298
4386c096
CL
299int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
300
1ca72129
AK
301/* Generic accessors to PTE bits */
302static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
ca8afd40 303static inline int pte_read(pte_t pte) { return 1; }
1ca72129
AK
304static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY); }
305static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
306static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
307static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
308static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
309
310static inline int pte_present(pte_t pte)
311{
312 return pte_val(pte) & _PAGE_PRESENT;
313}
314
5769beaf
AK
315/*
316 * We only find page table entry in the last level
317 * Hence no need for other accessors
318 */
319#define pte_access_permitted pte_access_permitted
320static inline bool pte_access_permitted(pte_t pte, bool write)
321{
322 unsigned long pteval = pte_val(pte);
323 /*
324 * A read-only access is controlled by _PAGE_USER bit.
325 * We have _PAGE_READ set for WRITE and EXECUTE
326 */
327 unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER;
328
329 if (write)
330 need_pte_bits |= _PAGE_WRITE;
331
332 if ((pteval & need_pte_bits) != need_pte_bits)
333 return false;
334
335 return true;
336}
337
1ca72129
AK
338/* Conversion functions: convert a page and protection to a page entry,
339 * and a page entry and page directory to the page they refer to.
340 *
341 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
342 * long for now.
343 */
344static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
345{
346 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
347 pgprot_val(pgprot));
348}
349
350static inline unsigned long pte_pfn(pte_t pte)
351{
352 return pte_val(pte) >> PTE_RPN_SHIFT;
353}
354
355/* Generic modifiers for PTE bits */
356static inline pte_t pte_wrprotect(pte_t pte)
357{
358 return __pte(pte_val(pte) & ~_PAGE_RW);
359}
360
361static inline pte_t pte_mkclean(pte_t pte)
362{
363 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
364}
365
366static inline pte_t pte_mkold(pte_t pte)
367{
368 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
369}
370
371static inline pte_t pte_mkwrite(pte_t pte)
372{
373 return __pte(pte_val(pte) | _PAGE_RW);
374}
375
376static inline pte_t pte_mkdirty(pte_t pte)
377{
378 return __pte(pte_val(pte) | _PAGE_DIRTY);
379}
380
381static inline pte_t pte_mkyoung(pte_t pte)
382{
383 return __pte(pte_val(pte) | _PAGE_ACCESSED);
384}
385
386static inline pte_t pte_mkspecial(pte_t pte)
387{
388 return __pte(pte_val(pte) | _PAGE_SPECIAL);
389}
390
391static inline pte_t pte_mkhuge(pte_t pte)
392{
393 return pte;
394}
395
396static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
397{
398 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
399}
400
401
402
403/* This low level function performs the actual PTE insertion
404 * Setting the PTE depends on the MMU type and other factors. It's
405 * an horrible mess that I'm not going to try to clean up now but
406 * I'm keeping it in one place rather than spread around
407 */
408static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
409 pte_t *ptep, pte_t pte, int percpu)
410{
411#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
412 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
413 * helper pte_update() which does an atomic update. We need to do that
414 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
415 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
416 * the hash bits instead (ie, same as the non-SMP case)
417 */
418 if (percpu)
419 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
420 | (pte_val(pte) & ~_PAGE_HASHPTE));
421 else
422 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
423
424#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
425 /* Second case is 32-bit with 64-bit PTE. In this case, we
426 * can just store as long as we do the two halves in the right order
427 * with a barrier in between. This is possible because we take care,
428 * in the hash code, to pre-invalidate if the PTE was already hashed,
429 * which synchronizes us with any concurrent invalidation.
430 * In the percpu case, we also fallback to the simple update preserving
431 * the hash bits
432 */
433 if (percpu) {
434 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
435 | (pte_val(pte) & ~_PAGE_HASHPTE));
436 return;
437 }
438 if (pte_val(*ptep) & _PAGE_HASHPTE)
439 flush_hash_entry(mm, ptep, addr);
440 __asm__ __volatile__("\
441 stw%U0%X0 %2,%0\n\
442 eieio\n\
443 stw%U0%X0 %L2,%1"
444 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
445 : "r" (pte) : "memory");
446
447#elif defined(CONFIG_PPC_STD_MMU_32)
448 /* Third case is 32-bit hash table in UP mode, we need to preserve
449 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
450 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
451 * and see we need to keep track that this PTE needs invalidating
452 */
453 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
454 | (pte_val(pte) & ~_PAGE_HASHPTE));
455
456#else
457#error "Not supported "
458#endif
459}
460
461/*
462 * Macro to mark a page protection value as "uncacheable".
463 */
464
465#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
466 _PAGE_WRITETHRU)
467
468#define pgprot_noncached pgprot_noncached
469static inline pgprot_t pgprot_noncached(pgprot_t prot)
470{
471 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
472 _PAGE_NO_CACHE | _PAGE_GUARDED);
473}
474
475#define pgprot_noncached_wc pgprot_noncached_wc
476static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
477{
478 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
479 _PAGE_NO_CACHE);
480}
481
482#define pgprot_cached pgprot_cached
483static inline pgprot_t pgprot_cached(pgprot_t prot)
484{
485 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
486 _PAGE_COHERENT);
487}
488
489#define pgprot_cached_wthru pgprot_cached_wthru
490static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
491{
492 return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
493 _PAGE_COHERENT | _PAGE_WRITETHRU);
494}
495
496#define pgprot_cached_noncoherent pgprot_cached_noncoherent
497static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
498{
499 return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
500}
501
502#define pgprot_writecombine pgprot_writecombine
503static inline pgprot_t pgprot_writecombine(pgprot_t prot)
504{
505 return pgprot_noncached_wc(prot);
506}
507
3dfcb315
AK
508#endif /* !__ASSEMBLY__ */
509
510#endif /* _ASM_POWERPC_BOOK3S_32_PGTABLE_H */