License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-block.git] / arch / powerpc / include / asm / nohash / 64 / pgtable.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
17ed9e31
AK
2#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H
3#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
f88df14b
DG
4/*
5 * This file contains the functions and defines necessary to modify and use
6 * the ppc64 hashed page table.
7 */
8
f88df14b 9#ifdef CONFIG_PPC_64K_PAGES
17ed9e31 10#include <asm/nohash/64/pgtable-64k.h>
f88df14b 11#else
17ed9e31 12#include <asm/nohash/64/pgtable-4k.h>
f88df14b 13#endif
074c2eae 14#include <asm/barrier.h>
f88df14b 15
d016bf7e 16#define FIRST_USER_ADDRESS 0UL
f88df14b
DG
17
18/*
19 * Size of EA range mapped by our pagetables.
20 */
21#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
17ed9e31 22 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
3d5134ee 23#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
f88df14b 24
f940f528
AK
25#ifdef CONFIG_TRANSPARENT_HUGEPAGE
26#define PMD_CACHE_INDEX (PMD_INDEX_SIZE + 1)
27#else
28#define PMD_CACHE_INDEX PMD_INDEX_SIZE
29#endif
fda0440d 30
f88df14b 31/*
57e2a99f 32 * Define the address range of the kernel non-linear virtual area
f88df14b 33 */
57e2a99f 34#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
67550080 35#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
f88df14b
DG
36
37/*
57e2a99f
BH
38 * The vmalloc space starts at the beginning of that region, and
39 * occupies half of it on hash CPUs and a quarter of it on Book3E
32a74949 40 * (we keep a quarter for the virtual memmap)
57e2a99f
BH
41 */
42#define VMALLOC_START KERN_VIRT_START
57e2a99f 43#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2)
57e2a99f
BH
44#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
45
46/*
47 * The second half of the kernel virtual space is used for IO mappings,
48 * it's itself carved into the PIO region (ISA and PHB IO space) and
49 * the ioremap space
3d5134ee 50 *
57e2a99f 51 * ISA_IO_BASE = KERN_IO_START, 64K reserved area
3d5134ee
BH
52 * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
53 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
f88df14b 54 */
57e2a99f 55#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
3d5134ee 56#define FULL_IO_SIZE 0x80000000ul
57e2a99f
BH
57#define ISA_IO_BASE (KERN_IO_START)
58#define ISA_IO_END (KERN_IO_START + 0x10000ul)
3d5134ee 59#define PHB_IO_BASE (ISA_IO_END)
57e2a99f 60#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
3d5134ee 61#define IOREMAP_BASE (PHB_IO_END)
57e2a99f
BH
62#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
63
f88df14b
DG
64
65/*
66 * Region IDs
67 */
68#define REGION_SHIFT 60UL
69#define REGION_MASK (0xfUL << REGION_SHIFT)
70#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
71
72#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
73#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
32a74949 74#define VMEMMAP_REGION_ID (0xfUL) /* Server only */
f88df14b
DG
75#define USER_REGION_ID (0UL)
76
d29eff7b 77/*
57e2a99f
BH
78 * Defines the address of the vmemap area, in its own region on
79 * hash table CPUs and after the vmalloc space on Book3E
d29eff7b 80 */
57e2a99f
BH
81#define VMEMMAP_BASE VMALLOC_END
82#define VMEMMAP_END KERN_IO_START
cec08e7a
BH
83#define vmemmap ((struct page *)VMEMMAP_BASE)
84
d29eff7b 85
f88df14b 86/*
c605782b 87 * Include the PTE bits definitions
f88df14b 88 */
17ed9e31 89#include <asm/nohash/pte-book3e.h>
71087002 90#include <asm/pte-common.h>
c605782b 91
f88df14b 92#ifndef __ASSEMBLY__
f88df14b
DG
93/* pte_clear moved to later in this file */
94
f88df14b
DG
95#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
96#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
97
f281b5d5
AK
98static inline void pmd_set(pmd_t *pmdp, unsigned long val)
99{
100 *pmdp = __pmd(val);
101}
102
103static inline void pmd_clear(pmd_t *pmdp)
104{
105 *pmdp = __pmd(0);
106}
107
e34aa03c
AK
108static inline pte_t pmd_pte(pmd_t pmd)
109{
110 return __pte(pmd_val(pmd));
111}
112
f88df14b
DG
113#define pmd_none(pmd) (!pmd_val(pmd))
114#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
115 || (pmd_val(pmd) & PMD_BAD_BITS))
06743521 116#define pmd_present(pmd) (!pmd_none(pmd))
f88df14b 117#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
074c2eae 118extern struct page *pmd_page(pmd_t pmd);
f88df14b 119
f281b5d5
AK
120static inline void pud_set(pud_t *pudp, unsigned long val)
121{
122 *pudp = __pud(val);
123}
124
125static inline void pud_clear(pud_t *pudp)
126{
127 *pudp = __pud(0);
128}
129
f88df14b
DG
130#define pud_none(pud) (!pud_val(pud))
131#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
132 || (pud_val(pud) & PUD_BAD_BITS))
133#define pud_present(pud) (pud_val(pud) != 0)
f88df14b 134#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
f88df14b 135
06743521
AK
136extern struct page *pud_page(pud_t pud);
137
138static inline pte_t pud_pte(pud_t pud)
139{
140 return __pte(pud_val(pud));
141}
142
143static inline pud_t pte_pud(pte_t pte)
144{
145 return __pud(pte_val(pte));
146}
147#define pud_write(pud) pte_write(pud_pte(pud))
06743521 148#define pgd_write(pgd) pte_write(pgd_pte(pgd))
f88df14b 149
f281b5d5
AK
150static inline void pgd_set(pgd_t *pgdp, unsigned long val)
151{
152 *pgdp = __pgd(val);
153}
154
f88df14b
DG
155/*
156 * Find an entry in a page-table-directory. We combine the address region
157 * (the high order N bits) and the pgd portion of the address.
158 */
0e5f35d0 159#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
f88df14b
DG
160
161#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
162
163#define pmd_offset(pudp,addr) \
164 (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
165
166#define pte_offset_kernel(dir,addr) \
167 (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
168
169#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
f88df14b 170#define pte_unmap(pte) do { } while(0)
f88df14b
DG
171
172/* to find an entry in a kernel page-table-directory */
173/* This now only contains the vmalloc pages */
174#define pgd_offset_k(address) pgd_offset(&init_mm, address)
78f1dbde
AK
175extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
176 pte_t *ptep, unsigned long pte, int huge);
f88df14b
DG
177
178/* Atomic PTE updates */
179static inline unsigned long pte_update(struct mm_struct *mm,
180 unsigned long addr,
181 pte_t *ptep, unsigned long clr,
88247e8d 182 unsigned long set,
f88df14b
DG
183 int huge)
184{
a033a487 185#ifdef PTE_ATOMIC_UPDATES
f88df14b
DG
186 unsigned long old, tmp;
187
188 __asm__ __volatile__(
189 "1: ldarx %0,0,%3 # pte_update\n\
190 andi. %1,%0,%6\n\
191 bne- 1b \n\
192 andc %1,%0,%4 \n\
88247e8d 193 or %1,%1,%7\n\
f88df14b
DG
194 stdcx. %1,0,%3 \n\
195 bne- 1b"
196 : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
88247e8d 197 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
f88df14b 198 : "cc" );
a033a487
BH
199#else
200 unsigned long old = pte_val(*ptep);
88247e8d 201 *ptep = __pte((old & ~clr) | set);
a033a487 202#endif
8d30c14c
BH
203 /* huge pages use the old page table lock */
204 if (!huge)
205 assert_pte_locked(mm, addr);
206
94491685 207#ifdef CONFIG_PPC_STD_MMU_64
f88df14b
DG
208 if (old & _PAGE_HASHPTE)
209 hpte_need_flush(mm, addr, ptep, old, huge);
94491685
BH
210#endif
211
f88df14b
DG
212 return old;
213}
214
215static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
216 unsigned long addr, pte_t *ptep)
217{
218 unsigned long old;
219
88247e8d 220 if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
f88df14b 221 return 0;
88247e8d 222 old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
f88df14b
DG
223 return (old & _PAGE_ACCESSED) != 0;
224}
225#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
226#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
227({ \
228 int __r; \
229 __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
230 __r; \
231})
232
f88df14b
DG
233#define __HAVE_ARCH_PTEP_SET_WRPROTECT
234static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
235 pte_t *ptep)
236{
f88df14b 237
2a2c29c1
SP
238 if ((pte_val(*ptep) & _PAGE_RW) == 0)
239 return;
240
88247e8d 241 pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
f88df14b
DG
242}
243
016b33c4
AW
244static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
245 unsigned long addr, pte_t *ptep)
246{
86df8642
DG
247 if ((pte_val(*ptep) & _PAGE_RW) == 0)
248 return;
2a2c29c1 249
88247e8d 250 pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
016b33c4 251}
f88df14b
DG
252
253/*
254 * We currently remove entries from the hashtable regardless of whether
255 * the entry was young or dirty. The generic routines only flush if the
256 * entry was young or dirty which is not good enough.
257 *
258 * We should be more intelligent about this but for the moment we override
259 * these functions and force a tlb flush unconditionally
260 */
261#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
262#define ptep_clear_flush_young(__vma, __address, __ptep) \
263({ \
264 int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
265 __ptep); \
266 __young; \
267})
268
f88df14b
DG
269#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
270static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
271 unsigned long addr, pte_t *ptep)
272{
88247e8d 273 unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
f88df14b
DG
274 return __pte(old);
275}
276
277static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
278 pte_t * ptep)
279{
88247e8d 280 pte_update(mm, addr, ptep, ~0UL, 0, 0);
f88df14b
DG
281}
282
f88df14b
DG
283
284/* Set the dirty and/or accessed bits atomically in a linux PTE, this
285 * function doesn't need to flush the hash entry
286 */
c6d1a767 287static inline void __ptep_set_access_flags(struct mm_struct *mm,
b3603e17
AK
288 pte_t *ptep, pte_t entry,
289 unsigned long address)
f88df14b
DG
290{
291 unsigned long bits = pte_val(entry) &
ea3cc330 292 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
a033a487
BH
293
294#ifdef PTE_ATOMIC_UPDATES
f88df14b
DG
295 unsigned long old, tmp;
296
297 __asm__ __volatile__(
298 "1: ldarx %0,0,%4\n\
299 andi. %1,%0,%6\n\
300 bne- 1b \n\
301 or %0,%3,%0\n\
302 stdcx. %0,0,%4\n\
303 bne- 1b"
304 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
305 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
306 :"cc");
a033a487
BH
307#else
308 unsigned long old = pte_val(*ptep);
309 *ptep = __pte(old | bits);
310#endif
f88df14b 311}
f88df14b 312
f88df14b
DG
313#define __HAVE_ARCH_PTE_SAME
314#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
315
316#define pte_ERROR(e) \
a7696b36 317 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
f88df14b 318#define pmd_ERROR(e) \
a7696b36 319 pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
f88df14b 320#define pgd_ERROR(e) \
a7696b36 321 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
f88df14b 322
f88df14b 323/* Encode and de-code a swap entry */
e1483158
AK
324#define MAX_SWAPFILES_CHECK() do { \
325 BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
326 /* \
327 * Don't have overlapping bits with _PAGE_HPTEFLAGS \
328 * We filter HPTEFLAGS on set_pte. \
329 */ \
330 BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
331 } while (0)
332/*
333 * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
334 */
335#define SWP_TYPE_BITS 5
336#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
337 & ((1UL << SWP_TYPE_BITS) - 1))
338#define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT)
339#define __swp_entry(type, offset) ((swp_entry_t) { \
340 ((type) << _PAGE_BIT_SWAP_TYPE) \
341 | ((offset) << PTE_RPN_SHIFT) })
342
343#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
344#define __swp_entry_to_pte(x) __pte((x).val)
f88df14b 345
31a14fae
AK
346extern int map_kernel_page(unsigned long ea, unsigned long pa,
347 unsigned long flags);
348extern int __meminit vmemmap_create_mapping(unsigned long start,
349 unsigned long page_size,
350 unsigned long phys);
351extern void vmemmap_remove_mapping(unsigned long start,
352 unsigned long page_size);
f88df14b
DG
353#endif /* __ASSEMBLY__ */
354
17ed9e31 355#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */