Merge remote-tracking branches 'asoc/topic/wm8753', 'asoc/topic/wm8770', 'asoc/topic...
[linux-block.git] / arch / powerpc / include / asm / nohash / 32 / pgtable.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
17ed9e31
AK
2#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
f88df14b 4
9849a569 5#define __ARCH_USE_5LEVEL_HACK
d1953c88 6#include <asm-generic/pgtable-nopmd.h>
f88df14b
DG
7
8#ifndef __ASSEMBLY__
9#include <linux/sched.h>
10#include <linux/threads.h>
f88df14b 11#include <asm/io.h> /* For sub-arch specific PPC_PIN_SIZE */
f88df14b 12
f637a49e 13extern unsigned long ioremap_bot;
b98ac05d
BH
14
15#ifdef CONFIG_44x
16extern int icache_44x_need_flush;
17#endif
18
f88df14b
DG
19#endif /* __ASSEMBLY__ */
20
9b081e10
CL
21#define PTE_INDEX_SIZE PTE_SHIFT
22#define PMD_INDEX_SIZE 0
23#define PUD_INDEX_SIZE 0
24#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
25
26#define PMD_CACHE_INDEX PMD_INDEX_SIZE
fae22116 27#define PUD_CACHE_INDEX PUD_INDEX_SIZE
9b081e10
CL
28
29#ifndef __ASSEMBLY__
30#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
31#define PMD_TABLE_SIZE 0
32#define PUD_TABLE_SIZE 0
33#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
34#endif /* __ASSEMBLY__ */
35
36#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
37#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
38
f88df14b
DG
39/*
40 * The normal case is that PTEs are 32-bits and we have a 1-page
41 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus
42 *
43 * For any >32-bit physical address platform, we can use the following
44 * two level page table layout where the pgdir is 8KB and the MS 13 bits
45 * are an index to the second level table. The combined pgdir/pmd first
46 * level has 2048 entries and the second level has 512 64-bit PTE entries.
47 * -Matt
48 */
f88df14b 49/* PGDIR_SHIFT determines what a top-level page table entry can map */
9b081e10 50#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
f88df14b
DG
51#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
52#define PGDIR_MASK (~(PGDIR_SIZE-1))
53
9b081e10
CL
54/* Bits to mask out from a PGD to get to the PUD page */
55#define PGD_MASKED_BITS 0
f88df14b
DG
56
57#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
d016bf7e 58#define FIRST_USER_ADDRESS 0UL
f88df14b 59
f88df14b 60#define pte_ERROR(e) \
a7696b36 61 pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
0aeafb0c 62 (unsigned long long)pte_val(e))
f88df14b 63#define pgd_ERROR(e) \
a7696b36 64 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
f88df14b 65
f637a49e
BH
66/*
67 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
68 * value (for now) on others, from where we can start layout kernel
69 * virtual space that goes below PKMAP and FIXMAP
70 */
71#ifdef CONFIG_HIGHMEM
72#define KVIRT_TOP PKMAP_BASE
73#else
74#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
75#endif
76
77/*
78 * ioremap_bot starts at that address. Early ioremaps move down from there,
79 * until mem_init() at which point this becomes the top of the vmalloc
80 * and ioremap space
81 */
8b31e49d
BH
82#ifdef CONFIG_NOT_COHERENT_CACHE
83#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
84#else
f637a49e 85#define IOREMAP_TOP KVIRT_TOP
8b31e49d 86#endif
f637a49e 87
f88df14b
DG
88/*
89 * Just any arbitrary offset to the start of the vmalloc VM area: the
f637a49e 90 * current 16MB value just means that there will be a 64MB "hole" after the
f88df14b
DG
91 * physical memory until the kernel virtual memory starts. That means that
92 * any out-of-bounds memory accesses will hopefully be caught.
93 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
94 * area for the same reason. ;)
95 *
96 * We no longer map larger than phys RAM with the BATs so we don't have
97 * to worry about the VMALLOC_OFFSET causing problems. We do have to worry
98 * about clashes between our early calls to ioremap() that start growing down
e974cd4b 99 * from IOREMAP_TOP being run into the VM area allocations (growing upwards
f88df14b
DG
100 * from VMALLOC_START). For this reason we have ioremap_bot to check when
101 * we actually run into our mappings setup in the early boot with the VM
102 * system. This really does become a problem for machines with good amounts
103 * of RAM. -- Cort
104 */
105#define VMALLOC_OFFSET (0x1000000) /* 16M */
106#ifdef PPC_PIN_SIZE
107#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
108#else
109#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
110#endif
111#define VMALLOC_END ioremap_bot
112
113/*
114 * Bits in a linux-style PTE. These match the bits in the
115 * (hardware-defined) PowerPC PTE as closely as possible.
116 */
117
118#if defined(CONFIG_40x)
17ed9e31 119#include <asm/nohash/32/pte-40x.h>
f88df14b 120#elif defined(CONFIG_44x)
17ed9e31 121#include <asm/nohash/32/pte-44x.h>
76acc2c1 122#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
17ed9e31 123#include <asm/nohash/pte-book3e.h>
f88df14b 124#elif defined(CONFIG_FSL_BOOKE)
17ed9e31 125#include <asm/nohash/32/pte-fsl-booke.h>
968159c0 126#elif defined(CONFIG_PPC_8xx)
17ed9e31 127#include <asm/nohash/32/pte-8xx.h>
4ee7084e 128#endif
f88df14b 129
71087002
BH
130/* And here we include common definitions */
131#include <asm/pte-common.h>
f88df14b
DG
132
133#ifndef __ASSEMBLY__
f88df14b 134
9bf2b5cd
KG
135#define pte_clear(mm, addr, ptep) \
136 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
f88df14b
DG
137
138#define pmd_none(pmd) (!pmd_val(pmd))
139#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
140#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
f281b5d5
AK
141static inline void pmd_clear(pmd_t *pmdp)
142{
143 *pmdp = __pmd(0);
144}
145
146
f88df14b 147
f88df14b
DG
148/*
149 * When flushing the tlb entry for a page, we also need to flush the hash
150 * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
151 */
152extern int flush_hash_pages(unsigned context, unsigned long va,
153 unsigned long pmdval, int count);
154
155/* Add an HPTE to the hash table */
156extern void add_hash_page(unsigned context, unsigned long va,
157 unsigned long pmdval);
158
4ee7084e
BB
159/* Flush an entry from the TLB/hash table */
160extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
161 unsigned long address);
162
f88df14b 163/*
c605782b
BH
164 * PTE updates. This function is called whenever an existing
165 * valid PTE is updated. This does -not- include set_pte_at()
166 * which nowadays only sets a new PTE.
167 *
168 * Depending on the type of MMU, we may need to use atomic updates
169 * and the PTE may be either 32 or 64 bit wide. In the later case,
170 * when using atomic updates, only the low part of the PTE is
171 * accessed atomically.
f88df14b 172 *
c605782b
BH
173 * In addition, on 44x, we also maintain a global flag indicating
174 * that an executable user mapping was modified, which is needed
175 * to properly flush the virtually tagged instruction cache of
176 * those implementations.
f88df14b
DG
177 */
178#ifndef CONFIG_PTE_64BIT
1bc54c03
BH
179static inline unsigned long pte_update(pte_t *p,
180 unsigned long clr,
f88df14b
DG
181 unsigned long set)
182{
1bc54c03 183#ifdef PTE_ATOMIC_UPDATES
f88df14b
DG
184 unsigned long old, tmp;
185
186 __asm__ __volatile__("\
1871: lwarx %0,0,%3\n\
188 andc %1,%0,%4\n\
189 or %1,%1,%5\n"
190 PPC405_ERR77(0,%3)
191" stwcx. %1,0,%3\n\
192 bne- 1b"
193 : "=&r" (old), "=&r" (tmp), "=m" (*p)
194 : "r" (p), "r" (clr), "r" (set), "m" (*p)
195 : "cc" );
1bc54c03
BH
196#else /* PTE_ATOMIC_UPDATES */
197 unsigned long old = pte_val(*p);
198 *p = __pte((old & ~clr) | set);
199#endif /* !PTE_ATOMIC_UPDATES */
200
b98ac05d 201#ifdef CONFIG_44x
ea3cc330 202 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
b98ac05d
BH
203 icache_44x_need_flush = 1;
204#endif
f88df14b
DG
205 return old;
206}
1bc54c03 207#else /* CONFIG_PTE_64BIT */
1bc54c03
BH
208static inline unsigned long long pte_update(pte_t *p,
209 unsigned long clr,
210 unsigned long set)
f88df14b 211{
1bc54c03 212#ifdef PTE_ATOMIC_UPDATES
f88df14b
DG
213 unsigned long long old;
214 unsigned long tmp;
215
216 __asm__ __volatile__("\
2171: lwarx %L0,0,%4\n\
218 lwzx %0,0,%3\n\
219 andc %1,%L0,%5\n\
220 or %1,%1,%6\n"
221 PPC405_ERR77(0,%3)
222" stwcx. %1,0,%4\n\
223 bne- 1b"
224 : "=&r" (old), "=&r" (tmp), "=m" (*p)
225 : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
226 : "cc" );
1bc54c03
BH
227#else /* PTE_ATOMIC_UPDATES */
228 unsigned long long old = pte_val(*p);
585583d9 229 *p = __pte((old & ~(unsigned long long)clr) | set);
1bc54c03
BH
230#endif /* !PTE_ATOMIC_UPDATES */
231
b98ac05d 232#ifdef CONFIG_44x
ea3cc330 233 if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
b98ac05d
BH
234 icache_44x_need_flush = 1;
235#endif
f88df14b
DG
236 return old;
237}
1bc54c03 238#endif /* CONFIG_PTE_64BIT */
f88df14b 239
f88df14b 240/*
bf2737f7
BB
241 * 2.6 calls this without flushing the TLB entry; this is wrong
242 * for our hash-based implementation, we fix that up here.
f88df14b
DG
243 */
244#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
245static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
246{
247 unsigned long old;
248 old = pte_update(ptep, _PAGE_ACCESSED, 0);
249#if _PAGE_HASHPTE != 0
250 if (old & _PAGE_HASHPTE) {
251 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
252 flush_hash_pages(context, addr, ptephys, 1);
253 }
254#endif
255 return (old & _PAGE_ACCESSED) != 0;
256}
257#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
258 __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
259
f88df14b
DG
260#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
261static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
262 pte_t *ptep)
263{
264 return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
265}
266
267#define __HAVE_ARCH_PTEP_SET_WRPROTECT
268static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
269 pte_t *ptep)
270{
a7b9f671 271 pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
f88df14b 272}
016b33c4
AW
273static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
274 unsigned long addr, pte_t *ptep)
275{
276 ptep_set_wrprotect(mm, addr, ptep);
277}
278
f88df14b 279
c6d1a767 280static inline void __ptep_set_access_flags(struct mm_struct *mm,
b3603e17
AK
281 pte_t *ptep, pte_t entry,
282 unsigned long address)
f88df14b 283{
a7b9f671 284 unsigned long set = pte_val(entry) &
ea3cc330 285 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
35175033 286 unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA);
a7b9f671
LC
287
288 pte_update(ptep, clr, set);
f88df14b
DG
289}
290
f88df14b
DG
291#define __HAVE_ARCH_PTE_SAME
292#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
293
294/*
295 * Note that on Book E processors, the pmd contains the kernel virtual
296 * (lowmem) address of the pte page. The physical address is less useful
297 * because everything runs with translation enabled (even the TLB miss
298 * handler). On everything else the pmd contains the physical address
299 * of the pte page. -- paulus
300 */
301#ifndef CONFIG_BOOKE
302#define pmd_page_vaddr(pmd) \
303 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
304#define pmd_page(pmd) \
43b5fefc 305 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
f88df14b
DG
306#else
307#define pmd_page_vaddr(pmd) \
308 ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
309#define pmd_page(pmd) \
af892e0f 310 pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
f88df14b
DG
311#endif
312
313/* to find an entry in a kernel page-table-directory */
314#define pgd_offset_k(address) pgd_offset(&init_mm, address)
315
316/* to find an entry in a page-table-directory */
317#define pgd_index(address) ((address) >> PGDIR_SHIFT)
318#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
319
f88df14b
DG
320/* Find an entry in the third-level page table.. */
321#define pte_index(address) \
322 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
323#define pte_offset_kernel(dir, addr) \
be00ed72
CL
324 (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
325 pte_index(addr))
f88df14b 326#define pte_offset_map(dir, addr) \
ece0e2b6
PZ
327 ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
328#define pte_unmap(pte) kunmap_atomic(pte)
f88df14b 329
f88df14b
DG
330/*
331 * Encode and decode a swap entry.
332 * Note that the bits we use in a PTE for representing a swap entry
780fc564
KS
333 * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
334 * -- paulus
f88df14b
DG
335 */
336#define __swp_type(entry) ((entry).val & 0x1f)
337#define __swp_offset(entry) ((entry).val >> 5)
338#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) })
339#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
340#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
341
4386c096
CL
342int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
343
f88df14b
DG
344#endif /* !__ASSEMBLY__ */
345
17ed9e31 346#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */