powerpc/mm/hash64: Map all the kernel regions in the same 0xc range
[linux-2.6-block.git] / arch / powerpc / include / asm / book3s / 64 / hash.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
26b6a3d9
AK
2#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
3#define _ASM_POWERPC_BOOK3S_64_HASH_H
c605782b
BH
4#ifdef __KERNEL__
5
ec0c464c
CL
6#include <asm/asm-const.h>
7
e34aa03c
AK
8/*
9 * Common bits between 4K and 64K pages in a linux-style PTE.
1ec3f937 10 * Additional bits may be defined in pgtable-hash64-*.h
e34aa03c 11 *
e34aa03c 12 */
d2cf0050 13#define H_PTE_NONE_MASK _PAGE_HPTEFLAGS
e34aa03c 14
371352ca
AK
15#ifdef CONFIG_PPC_64K_PAGES
16#include <asm/book3s/64/hash-64k.h>
17#else
18#include <asm/book3s/64/hash-4k.h>
19#endif
20
da7ad366
AK
21/* Bits to set in a PMD/PUD/PGD entry valid bit*/
22#define HASH_PMD_VAL_BITS (0x8000000000000000UL)
23#define HASH_PUD_VAL_BITS (0x8000000000000000UL)
24#define HASH_PGD_VAL_BITS (0x8000000000000000UL)
25
371352ca
AK
26/*
27 * Size of EA range mapped by our pagetables.
28 */
dd1842a2
AK
29#define H_PGTABLE_EADDR_SIZE (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
30 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
31#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
0034d395
AK
32/*
33 * Top 2 bits are ignored in page table walk.
34 */
35#define EA_MASK (~(0xcUL << 60))
371352ca 36
fae22116
AK
37/*
38 * We store the slot details in the second half of page table.
39 * Increase the pud level table so that hugetlb ptes can be stored
40 * at pud level.
41 */
42#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES)
43#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1)
44#else
45#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE)
46#endif
3d8810e0 47
371352ca 48/*
0034d395
AK
49 * +------------------------------+
50 * | |
51 * | |
52 * | |
53 * +------------------------------+ Kernel virtual map end (0xc00e000000000000)
54 * | |
55 * | |
56 * | 512TB/16TB of vmemmap |
57 * | |
58 * | |
59 * +------------------------------+ Kernel vmemmap start
60 * | |
61 * | 512TB/16TB of IO map |
62 * | |
63 * +------------------------------+ Kernel IO map start
64 * | |
65 * | 512TB/16TB of vmap |
66 * | |
67 * +------------------------------+ Kernel virt start (0xc008000000000000)
68 * | |
69 * | |
70 * | |
71 * +------------------------------+ Kernel linear (0xc.....)
371352ca 72 */
371352ca 73
0034d395
AK
74#define H_VMALLOC_START H_KERN_VIRT_START
75#define H_VMALLOC_SIZE H_KERN_MAP_SIZE
76#define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
3d8810e0 77
0034d395
AK
78#define H_KERN_IO_START H_VMALLOC_END
79#define H_KERN_IO_SIZE H_KERN_MAP_SIZE
80#define H_KERN_IO_END (H_KERN_IO_START + H_KERN_IO_SIZE)
371352ca 81
0034d395
AK
82#define H_VMEMMAP_START H_KERN_IO_END
83#define H_VMEMMAP_SIZE H_KERN_MAP_SIZE
84#define H_VMEMMAP_END (H_VMEMMAP_START + H_VMEMMAP_SIZE)
63ee9b2f 85
371352ca
AK
86/*
87 * Region IDs
88 */
0034d395
AK
89#define USER_REGION_ID 1
90#define KERNEL_REGION_ID 2
91#define VMALLOC_REGION_ID 3
92#define IO_REGION_ID 4
93#define VMEMMAP_REGION_ID 5
371352ca
AK
94
95/*
96 * Defines the address of the vmemap area, in its own region on
97 * hash table CPUs.
98 */
371352ca
AK
99
100#ifdef CONFIG_PPC_MM_SLICES
101#define HAVE_ARCH_UNMAPPED_AREA
102#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
103#endif /* CONFIG_PPC_MM_SLICES */
8d1cf34e 104
c605782b
BH
105
106/* PTEIDX nibble */
107#define _PTEIDX_SECONDARY 0x8
108#define _PTEIDX_GROUP_IX 0x7
109
ac94ac79
AK
110#define H_PMD_BAD_BITS (PTE_TABLE_SIZE-1)
111#define H_PUD_BAD_BITS (PMD_TABLE_SIZE-1)
371352ca
AK
112
113#ifndef __ASSEMBLY__
0034d395
AK
114static inline int get_region_id(unsigned long ea)
115{
116 int id = (ea >> 60UL);
117
118 if (id == 0)
119 return USER_REGION_ID;
120
121 VM_BUG_ON(id != 0xc);
122 VM_BUG_ON(ea >= H_VMEMMAP_END);
123
124 if (ea >= H_VMEMMAP_START)
125 return VMEMMAP_REGION_ID;
126 else if (ea >= H_KERN_IO_START)
127 return IO_REGION_ID;
128 else if (ea >= H_VMALLOC_START)
129 return VMALLOC_REGION_ID;
130
131 return KERNEL_REGION_ID;
132}
133
ac94ac79
AK
134#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
135#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
136static inline int hash__pgd_bad(pgd_t pgd)
137{
138 return (pgd_val(pgd) == 0);
139}
cd65d697
BS
140#ifdef CONFIG_STRICT_KERNEL_RWX
141extern void hash__mark_rodata_ro(void);
029d9252 142extern void hash__mark_initmem_nx(void);
cd65d697 143#endif
371352ca
AK
144
145extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
146 pte_t *ptep, unsigned long pte, int huge);
c6a3c495 147extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
371352ca 148/* Atomic PTE updates */
ac94ac79
AK
149static inline unsigned long hash__pte_update(struct mm_struct *mm,
150 unsigned long addr,
151 pte_t *ptep, unsigned long clr,
152 unsigned long set,
153 int huge)
371352ca 154{
5dc1ef85
AK
155 __be64 old_be, tmp_be;
156 unsigned long old;
371352ca
AK
157
158 __asm__ __volatile__(
159 "1: ldarx %0,0,%3 # pte_update\n\
5dc1ef85 160 and. %1,%0,%6\n\
371352ca
AK
161 bne- 1b \n\
162 andc %1,%0,%4 \n\
163 or %1,%1,%7\n\
164 stdcx. %1,0,%3 \n\
165 bne- 1b"
5dc1ef85
AK
166 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
167 : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
945537df 168 "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
371352ca
AK
169 : "cc" );
170 /* huge pages use the old page table lock */
171 if (!huge)
172 assert_pte_locked(mm, addr);
173
5dc1ef85 174 old = be64_to_cpu(old_be);
945537df 175 if (old & H_PAGE_HASHPTE)
371352ca
AK
176 hpte_need_flush(mm, addr, ptep, old, huge);
177
178 return old;
179}
180
371352ca
AK
181/* Set the dirty and/or accessed bits atomically in a linux PTE, this
182 * function doesn't need to flush the hash entry
183 */
ac94ac79 184static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
371352ca 185{
5dc1ef85
AK
186 __be64 old, tmp, val, mask;
187
c7d54842 188 mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
5dc1ef85 189 _PAGE_EXEC | _PAGE_SOFT_DIRTY);
371352ca 190
5dc1ef85 191 val = pte_raw(entry) & mask;
371352ca
AK
192
193 __asm__ __volatile__(
194 "1: ldarx %0,0,%4\n\
5dc1ef85 195 and. %1,%0,%6\n\
371352ca
AK
196 bne- 1b \n\
197 or %0,%3,%0\n\
198 stdcx. %0,0,%4\n\
199 bne- 1b"
200 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
945537df 201 :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
371352ca
AK
202 :"cc");
203}
204
ac94ac79 205static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
368ced78 206{
ac94ac79 207 return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
368ced78
AK
208}
209
ac94ac79 210static inline int hash__pte_none(pte_t pte)
ee3caed3 211{
ac94ac79 212 return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
ee3caed3
ME
213}
214
318995b4
RP
215unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
216 int ssize, real_pte_t rpte, unsigned int subpg_index);
217
1ca72129
AK
218/* This low level function performs the actual PTE insertion
219 * Setting the PTE depends on the MMU type and other factors. It's
220 * an horrible mess that I'm not going to try to clean up now but
221 * I'm keeping it in one place rather than spread around
222 */
ac94ac79
AK
223static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
224 pte_t *ptep, pte_t pte, int percpu)
1ca72129
AK
225{
226 /*
227 * Anything else just stores the PTE normally. That covers all 64-bit
228 * cases, and 32-bit non-hash with 32-bit PTEs.
229 */
230 *ptep = pte;
231}
232
371352ca
AK
233#ifdef CONFIG_TRANSPARENT_HUGEPAGE
234extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
235 pmd_t *pmdp, unsigned long old_pmd);
236#else
237static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
238 unsigned long addr, pmd_t *pmdp,
239 unsigned long old_pmd)
240{
241 WARN(1, "%s called with THP disabled\n", __func__);
242}
243#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
244
31a14fae 245
c766ee72 246int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
31a14fae
AK
247extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
248 unsigned long page_size,
249 unsigned long phys);
250extern void hash__vmemmap_remove_mapping(unsigned long start,
251 unsigned long page_size);
32b53c01 252
29ab6c47 253int hash__create_section_mapping(unsigned long start, unsigned long end, int nid);
32b53c01
RA
254int hash__remove_section_mapping(unsigned long start, unsigned long end);
255
371352ca 256#endif /* !__ASSEMBLY__ */
c605782b 257#endif /* __KERNEL__ */
26b6a3d9 258#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */