Commit | Line | Data |
---|---|---|
11a6f6ab AK |
1 | #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ |
2 | #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ | |
8d2169e8 DG |
3 | /* |
4 | * PowerPC64 memory management structures | |
5 | * | |
6 | * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com> | |
7 | * PPC64 rework. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU General Public License | |
11 | * as published by the Free Software Foundation; either version | |
12 | * 2 of the License, or (at your option) any later version. | |
13 | */ | |
14 | ||
15 | #include <asm/asm-compat.h> | |
16 | #include <asm/page.h> | |
891121e6 | 17 | #include <asm/bug.h> |
8d2169e8 | 18 | |
78f1dbde AK |
19 | /* |
20 | * This is necessary to get the definition of PGTABLE_RANGE which we | |
21 | * need for various slices related matters. Note that this isn't the | |
22 | * complete pgtable.h but only a portion of it. | |
23 | */ | |
3dfcb315 | 24 | #include <asm/book3s/64/pgtable.h> |
cf9427b8 | 25 | #include <asm/bug.h> |
dad6f37c | 26 | #include <asm/processor.h> |
b92a226e | 27 | #include <asm/cpu_has_feature.h> |
78f1dbde | 28 | |
8d2169e8 DG |
29 | /* |
30 | * SLB | |
31 | */ | |
32 | ||
33 | #define SLB_NUM_BOLTED 3 | |
34 | #define SLB_CACHE_ENTRIES 8 | |
46db2f86 | 35 | #define SLB_MIN_SIZE 32 |
8d2169e8 DG |
36 | |
37 | /* Bits in the SLB ESID word */ | |
38 | #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */ | |
39 | ||
40 | /* Bits in the SLB VSID word */ | |
41 | #define SLB_VSID_SHIFT 12 | |
e6f81a92 | 42 | #define SLB_VSID_SHIFT_256M SLB_VSID_SHIFT |
1189be65 PM |
43 | #define SLB_VSID_SHIFT_1T 24 |
44 | #define SLB_VSID_SSIZE_SHIFT 62 | |
8d2169e8 DG |
45 | #define SLB_VSID_B ASM_CONST(0xc000000000000000) |
46 | #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000) | |
47 | #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000) | |
48 | #define SLB_VSID_KS ASM_CONST(0x0000000000000800) | |
49 | #define SLB_VSID_KP ASM_CONST(0x0000000000000400) | |
50 | #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ | |
51 | #define SLB_VSID_L ASM_CONST(0x0000000000000100) | |
52 | #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ | |
53 | #define SLB_VSID_LP ASM_CONST(0x0000000000000030) | |
54 | #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000) | |
55 | #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010) | |
56 | #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020) | |
57 | #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030) | |
58 | #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP) | |
59 | ||
60 | #define SLB_VSID_KERNEL (SLB_VSID_KP) | |
61 | #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C) | |
62 | ||
63 | #define SLBIE_C (0x08000000) | |
1189be65 | 64 | #define SLBIE_SSIZE_SHIFT 25 |
8d2169e8 DG |
65 | |
66 | /* | |
67 | * Hash table | |
68 | */ | |
69 | ||
70 | #define HPTES_PER_GROUP 8 | |
71 | ||
2454c7e9 | 72 | #define HPTE_V_SSIZE_SHIFT 62 |
8d2169e8 | 73 | #define HPTE_V_AVPN_SHIFT 7 |
6b243fcf | 74 | #define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff) |
2454c7e9 | 75 | #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80) |
6b243fcf | 76 | #define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80) |
8d2169e8 | 77 | #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT) |
91bbbe22 | 78 | #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL)) |
8d2169e8 DG |
79 | #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010) |
80 | #define HPTE_V_LOCK ASM_CONST(0x0000000000000008) | |
81 | #define HPTE_V_LARGE ASM_CONST(0x0000000000000004) | |
82 | #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002) | |
83 | #define HPTE_V_VALID ASM_CONST(0x0000000000000001) | |
84 | ||
50de596d | 85 | /* |
6b243fcf | 86 | * ISA 3.0 has a different HPTE format. |
50de596d AK |
87 | */ |
88 | #define HPTE_R_3_0_SSIZE_SHIFT 58 | |
6b243fcf | 89 | #define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT) |
8d2169e8 DG |
90 | #define HPTE_R_PP0 ASM_CONST(0x8000000000000000) |
91 | #define HPTE_R_TS ASM_CONST(0x4000000000000000) | |
de56a948 | 92 | #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000) |
8d2169e8 | 93 | #define HPTE_R_RPN_SHIFT 12 |
de56a948 | 94 | #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000) |
6b243fcf | 95 | #define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000) |
8d2169e8 | 96 | #define HPTE_R_PP ASM_CONST(0x0000000000000003) |
8550e2fa | 97 | #define HPTE_R_PPP ASM_CONST(0x8000000000000003) |
8d2169e8 | 98 | #define HPTE_R_N ASM_CONST(0x0000000000000004) |
de56a948 PM |
99 | #define HPTE_R_G ASM_CONST(0x0000000000000008) |
100 | #define HPTE_R_M ASM_CONST(0x0000000000000010) | |
101 | #define HPTE_R_I ASM_CONST(0x0000000000000020) | |
102 | #define HPTE_R_W ASM_CONST(0x0000000000000040) | |
103 | #define HPTE_R_WIMG ASM_CONST(0x0000000000000078) | |
8d2169e8 DG |
104 | #define HPTE_R_C ASM_CONST(0x0000000000000080) |
105 | #define HPTE_R_R ASM_CONST(0x0000000000000100) | |
de56a948 | 106 | #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00) |
8d2169e8 | 107 | |
b7abc5c5 SS |
108 | #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000) |
109 | #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) | |
110 | ||
8d2169e8 | 111 | /* Values for PP (assumes Ks=0, Kp=1) */ |
8d2169e8 DG |
112 | #define PP_RWXX 0 /* Supervisor read/write, User none */ |
113 | #define PP_RWRX 1 /* Supervisor read/write, User read */ | |
114 | #define PP_RWRW 2 /* Supervisor read/write, User read/write */ | |
115 | #define PP_RXRX 3 /* Supervisor read, User read */ | |
697d3899 | 116 | #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */ |
8d2169e8 | 117 | |
b4072df4 PM |
118 | /* Fields for tlbiel instruction in architecture 2.06 */ |
119 | #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */ | |
120 | #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */ | |
121 | #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */ | |
122 | #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */ | |
123 | #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */ | |
124 | #define TLBIEL_INVAL_SET_SHIFT 12 | |
125 | ||
126 | #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */ | |
45706bb5 | 127 | #define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */ |
c3ab300e | 128 | #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */ |
1a472c9d | 129 | #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */ |
b4072df4 | 130 | |
8d2169e8 DG |
131 | #ifndef __ASSEMBLY__ |
132 | ||
7025776e BH |
133 | struct mmu_hash_ops { |
134 | void (*hpte_invalidate)(unsigned long slot, | |
135 | unsigned long vpn, | |
136 | int bpsize, int apsize, | |
137 | int ssize, int local); | |
138 | long (*hpte_updatepp)(unsigned long slot, | |
139 | unsigned long newpp, | |
140 | unsigned long vpn, | |
141 | int bpsize, int apsize, | |
142 | int ssize, unsigned long flags); | |
143 | void (*hpte_updateboltedpp)(unsigned long newpp, | |
144 | unsigned long ea, | |
145 | int psize, int ssize); | |
146 | long (*hpte_insert)(unsigned long hpte_group, | |
147 | unsigned long vpn, | |
148 | unsigned long prpn, | |
149 | unsigned long rflags, | |
150 | unsigned long vflags, | |
151 | int psize, int apsize, | |
152 | int ssize); | |
153 | long (*hpte_remove)(unsigned long hpte_group); | |
154 | int (*hpte_removebolted)(unsigned long ea, | |
155 | int psize, int ssize); | |
156 | void (*flush_hash_range)(unsigned long number, int local); | |
157 | void (*hugepage_invalidate)(unsigned long vsid, | |
158 | unsigned long addr, | |
159 | unsigned char *hpte_slot_array, | |
160 | int psize, int ssize, int local); | |
dbcf929c | 161 | int (*resize_hpt)(unsigned long shift); |
7025776e BH |
162 | /* |
163 | * Special for kexec. | |
164 | * To be called in real mode with interrupts disabled. No locks are | |
165 | * taken as such, concurrent access on pre POWER5 hardware could result | |
166 | * in a deadlock. | |
167 | * The linear mapping is destroyed as well. | |
168 | */ | |
169 | void (*hpte_clear_all)(void); | |
170 | }; | |
171 | extern struct mmu_hash_ops mmu_hash_ops; | |
172 | ||
8e561e7e | 173 | struct hash_pte { |
12f04f2b AB |
174 | __be64 v; |
175 | __be64 r; | |
8e561e7e | 176 | }; |
8d2169e8 | 177 | |
8e561e7e | 178 | extern struct hash_pte *htab_address; |
8d2169e8 DG |
179 | extern unsigned long htab_size_bytes; |
180 | extern unsigned long htab_hash_mask; | |
181 | ||
cf9427b8 AK |
182 | |
183 | static inline int shift_to_mmu_psize(unsigned int shift) | |
184 | { | |
185 | int psize; | |
186 | ||
187 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) | |
188 | if (mmu_psize_defs[psize].shift == shift) | |
189 | return psize; | |
190 | return -1; | |
191 | } | |
192 | ||
193 | static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) | |
194 | { | |
195 | if (mmu_psize_defs[mmu_psize].shift) | |
196 | return mmu_psize_defs[mmu_psize].shift; | |
197 | BUG(); | |
198 | } | |
8d2169e8 | 199 | |
138ee7ee AK |
200 | static inline unsigned long get_sllp_encoding(int psize) |
201 | { | |
202 | unsigned long sllp; | |
203 | ||
204 | sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) | | |
205 | ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4); | |
206 | return sllp; | |
207 | } | |
208 | ||
8d2169e8 DG |
209 | #endif /* __ASSEMBLY__ */ |
210 | ||
2454c7e9 PM |
211 | /* |
212 | * Segment sizes. | |
213 | * These are the values used by hardware in the B field of | |
214 | * SLB entries and the first dword of MMU hashtable entries. | |
215 | * The B field is 2 bits; the values 2 and 3 are unused and reserved. | |
216 | */ | |
217 | #define MMU_SEGSIZE_256M 0 | |
218 | #define MMU_SEGSIZE_1T 1 | |
219 | ||
5524a27d AK |
220 | /* |
221 | * encode page number shift. | |
222 | * in order to fit the 78 bit va in a 64 bit variable we shift the va by | |
223 | * 12 bits. This enable us to address upto 76 bit va. | |
224 | * For hpt hash from a va we can ignore the page size bits of va and for | |
225 | * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure | |
226 | * we work in all cases including 4k page size. | |
227 | */ | |
228 | #define VPN_SHIFT 12 | |
1189be65 | 229 | |
b1022fbd AK |
230 | /* |
231 | * HPTE Large Page (LP) details | |
232 | */ | |
233 | #define LP_SHIFT 12 | |
234 | #define LP_BITS 8 | |
235 | #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT) | |
236 | ||
8d2169e8 DG |
237 | #ifndef __ASSEMBLY__ |
238 | ||
73d16a6e IM |
239 | static inline int slb_vsid_shift(int ssize) |
240 | { | |
241 | if (ssize == MMU_SEGSIZE_256M) | |
242 | return SLB_VSID_SHIFT; | |
243 | return SLB_VSID_SHIFT_1T; | |
244 | } | |
245 | ||
5524a27d AK |
246 | static inline int segment_shift(int ssize) |
247 | { | |
248 | if (ssize == MMU_SEGSIZE_256M) | |
249 | return SID_SHIFT; | |
250 | return SID_SHIFT_1T; | |
251 | } | |
252 | ||
0eeede0c PM |
253 | /* |
254 | * This array is indexed by the LP field of the HPTE second dword. | |
255 | * Since this field may contain some RPN bits, some entries are | |
256 | * replicated so that we get the same value irrespective of RPN. | |
257 | * The top 4 bits are the page size index (MMU_PAGE_*) for the | |
258 | * actual page size, the bottom 4 bits are the base page size. | |
259 | */ | |
260 | extern u8 hpte_page_sizes[1 << LP_BITS]; | |
261 | ||
262 | static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l, | |
263 | bool is_base_size) | |
264 | { | |
265 | unsigned int i, lp; | |
266 | ||
267 | if (!(h & HPTE_V_LARGE)) | |
268 | return 1ul << 12; | |
269 | ||
270 | /* Look at the 8 bit LP value */ | |
271 | lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); | |
272 | i = hpte_page_sizes[lp]; | |
273 | if (!i) | |
274 | return 0; | |
275 | if (!is_base_size) | |
276 | i >>= 4; | |
277 | return 1ul << mmu_psize_defs[i & 0xf].shift; | |
278 | } | |
279 | ||
280 | static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) | |
281 | { | |
282 | return __hpte_page_size(h, l, 0); | |
283 | } | |
284 | ||
285 | static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l) | |
286 | { | |
287 | return __hpte_page_size(h, l, 1); | |
288 | } | |
289 | ||
8d2169e8 | 290 | /* |
1189be65 | 291 | * The current system page and segment sizes |
8d2169e8 | 292 | */ |
1189be65 PM |
293 | extern int mmu_kernel_ssize; |
294 | extern int mmu_highuser_ssize; | |
584f8b71 | 295 | extern u16 mmu_slb_size; |
572fb578 | 296 | extern unsigned long tce_alloc_start, tce_alloc_end; |
8d2169e8 DG |
297 | |
298 | /* | |
299 | * If the processor supports 64k normal pages but not 64k cache | |
300 | * inhibited pages, we have to be prepared to switch processes | |
301 | * to use 4k pages when they create cache-inhibited mappings. | |
302 | * If this is the case, mmu_ci_restrictions will be set to 1. | |
303 | */ | |
304 | extern int mmu_ci_restrictions; | |
305 | ||
5524a27d AK |
306 | /* |
307 | * This computes the AVPN and B fields of the first dword of a HPTE, | |
308 | * for use when we want to match an existing PTE. The bottom 7 bits | |
309 | * of the returned value are zero. | |
310 | */ | |
311 | static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize, | |
312 | int ssize) | |
313 | { | |
314 | unsigned long v; | |
315 | /* | |
316 | * The AVA field omits the low-order 23 bits of the 78 bits VA. | |
317 | * These bits are not needed in the PTE, because the | |
318 | * low-order b of these bits are part of the byte offset | |
319 | * into the virtual page and, if b < 23, the high-order | |
320 | * 23-b of these bits are always used in selecting the | |
321 | * PTEGs to be searched | |
322 | */ | |
323 | v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm); | |
324 | v <<= HPTE_V_AVPN_SHIFT; | |
6b243fcf | 325 | v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT; |
5524a27d AK |
326 | return v; |
327 | } | |
328 | ||
6b243fcf PM |
329 | /* |
330 | * ISA v3.0 defines a new HPTE format, which differs from the old | |
331 | * format in having smaller AVPN and ARPN fields, and the B field | |
332 | * in the second dword instead of the first. | |
333 | */ | |
334 | static inline unsigned long hpte_old_to_new_v(unsigned long v) | |
335 | { | |
336 | /* trim AVPN, drop B */ | |
337 | return v & HPTE_V_COMMON_BITS; | |
338 | } | |
339 | ||
340 | static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r) | |
341 | { | |
342 | /* move B field from 1st to 2nd dword, trim ARPN */ | |
343 | return (r & ~HPTE_R_3_0_SSIZE_MASK) | | |
344 | (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT); | |
345 | } | |
346 | ||
347 | static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r) | |
348 | { | |
349 | /* insert B field */ | |
350 | return (v & HPTE_V_COMMON_BITS) | | |
351 | ((r & HPTE_R_3_0_SSIZE_MASK) << | |
352 | (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT)); | |
353 | } | |
354 | ||
355 | static inline unsigned long hpte_new_to_old_r(unsigned long r) | |
356 | { | |
357 | /* clear out B field */ | |
358 | return r & ~HPTE_R_3_0_SSIZE_MASK; | |
359 | } | |
360 | ||
8d2169e8 DG |
361 | /* |
362 | * This function sets the AVPN and L fields of the HPTE appropriately | |
b1022fbd | 363 | * using the base page size and actual page size. |
8d2169e8 | 364 | */ |
b1022fbd AK |
365 | static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize, |
366 | int actual_psize, int ssize) | |
8d2169e8 | 367 | { |
1189be65 | 368 | unsigned long v; |
b1022fbd AK |
369 | v = hpte_encode_avpn(vpn, base_psize, ssize); |
370 | if (actual_psize != MMU_PAGE_4K) | |
8d2169e8 DG |
371 | v |= HPTE_V_LARGE; |
372 | return v; | |
373 | } | |
374 | ||
375 | /* | |
376 | * This function sets the ARPN, and LP fields of the HPTE appropriately | |
377 | * for the page size. We assume the pa is already "clean" that is properly | |
378 | * aligned for the requested page size | |
379 | */ | |
b1022fbd | 380 | static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize, |
6b243fcf | 381 | int actual_psize) |
8d2169e8 | 382 | { |
8d2169e8 | 383 | /* A 4K page needs no special encoding */ |
b1022fbd | 384 | if (actual_psize == MMU_PAGE_4K) |
8d2169e8 DG |
385 | return pa & HPTE_R_RPN; |
386 | else { | |
b1022fbd AK |
387 | unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize]; |
388 | unsigned int shift = mmu_psize_defs[actual_psize].shift; | |
389 | return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT); | |
8d2169e8 | 390 | } |
8d2169e8 DG |
391 | } |
392 | ||
393 | /* | |
5524a27d | 394 | * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size. |
8d2169e8 | 395 | */ |
5524a27d AK |
396 | static inline unsigned long hpt_vpn(unsigned long ea, |
397 | unsigned long vsid, int ssize) | |
1189be65 | 398 | { |
5524a27d AK |
399 | unsigned long mask; |
400 | int s_shift = segment_shift(ssize); | |
401 | ||
402 | mask = (1ul << (s_shift - VPN_SHIFT)) - 1; | |
403 | return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask); | |
1189be65 | 404 | } |
8d2169e8 | 405 | |
1189be65 PM |
406 | /* |
407 | * This hashes a virtual address | |
408 | */ | |
5524a27d AK |
409 | static inline unsigned long hpt_hash(unsigned long vpn, |
410 | unsigned int shift, int ssize) | |
8d2169e8 | 411 | { |
59248aec | 412 | unsigned long mask; |
1189be65 PM |
413 | unsigned long hash, vsid; |
414 | ||
5524a27d | 415 | /* VPN_SHIFT can be atmost 12 */ |
1189be65 | 416 | if (ssize == MMU_SEGSIZE_256M) { |
5524a27d AK |
417 | mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1; |
418 | hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^ | |
419 | ((vpn & mask) >> (shift - VPN_SHIFT)); | |
1189be65 | 420 | } else { |
5524a27d AK |
421 | mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1; |
422 | vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT); | |
423 | hash = vsid ^ (vsid << 25) ^ | |
424 | ((vpn & mask) >> (shift - VPN_SHIFT)) ; | |
1189be65 PM |
425 | } |
426 | return hash & 0x7fffffffffUL; | |
8d2169e8 DG |
427 | } |
428 | ||
aefa5688 AK |
429 | #define HPTE_LOCAL_UPDATE 0x1 |
430 | #define HPTE_NOHPTE_UPDATE 0x2 | |
431 | ||
8d2169e8 DG |
432 | extern int __hash_page_4K(unsigned long ea, unsigned long access, |
433 | unsigned long vsid, pte_t *ptep, unsigned long trap, | |
aefa5688 | 434 | unsigned long flags, int ssize, int subpage_prot); |
8d2169e8 DG |
435 | extern int __hash_page_64K(unsigned long ea, unsigned long access, |
436 | unsigned long vsid, pte_t *ptep, unsigned long trap, | |
aefa5688 | 437 | unsigned long flags, int ssize); |
8d2169e8 | 438 | struct mm_struct; |
0895ecda | 439 | unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); |
aefa5688 AK |
440 | extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, |
441 | unsigned long access, unsigned long trap, | |
442 | unsigned long flags); | |
443 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap, | |
444 | unsigned long dsisr); | |
a4fe3ce7 | 445 | int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, |
aefa5688 AK |
446 | pte_t *ptep, unsigned long trap, unsigned long flags, |
447 | int ssize, unsigned int shift, unsigned int mmu_psize); | |
6d492ecc AK |
448 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
449 | extern int __hash_page_thp(unsigned long ea, unsigned long access, | |
450 | unsigned long vsid, pmd_t *pmdp, unsigned long trap, | |
aefa5688 | 451 | unsigned long flags, int ssize, unsigned int psize); |
6d492ecc AK |
452 | #else |
453 | static inline int __hash_page_thp(unsigned long ea, unsigned long access, | |
454 | unsigned long vsid, pmd_t *pmdp, | |
aefa5688 | 455 | unsigned long trap, unsigned long flags, |
6d492ecc AK |
456 | int ssize, unsigned int psize) |
457 | { | |
458 | BUG(); | |
ff1e7683 | 459 | return -1; |
6d492ecc AK |
460 | } |
461 | #endif | |
4b8692c0 BH |
462 | extern void hash_failure_debug(unsigned long ea, unsigned long access, |
463 | unsigned long vsid, unsigned long trap, | |
d8139ebf AK |
464 | int ssize, int psize, int lpsize, |
465 | unsigned long pte); | |
8d2169e8 | 466 | extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, |
bc033b63 | 467 | unsigned long pstart, unsigned long prot, |
1189be65 | 468 | int psize, int ssize); |
f6026df1 AB |
469 | int htab_remove_mapping(unsigned long vstart, unsigned long vend, |
470 | int psize, int ssize); | |
79cc38de | 471 | extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages); |
fa28237c | 472 | extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr); |
8d2169e8 | 473 | |
6364e84e ME |
474 | #ifdef CONFIG_PPC_PSERIES |
475 | void hpte_init_pseries(void); | |
476 | #else | |
477 | static inline void hpte_init_pseries(void) { } | |
478 | #endif | |
479 | ||
8d2169e8 | 480 | extern void hpte_init_native(void); |
8d2169e8 | 481 | |
8d2169e8 DG |
482 | extern void slb_initialize(void); |
483 | extern void slb_flush_and_rebolt(void); | |
8d2169e8 | 484 | |
67439b76 | 485 | extern void slb_vmalloc_update(void); |
46db2f86 | 486 | extern void slb_set_size(u16 size); |
8d2169e8 DG |
487 | #endif /* __ASSEMBLY__ */ |
488 | ||
489 | /* | |
f033d659 | 490 | * VSID allocation (256MB segment) |
8d2169e8 | 491 | * |
c60ac569 AK |
492 | * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated |
493 | * from mmu context id and effective segment id of the address. | |
8d2169e8 | 494 | * |
941711a3 AK |
495 | * For user processes max context id is limited to MAX_USER_CONTEXT. |
496 | ||
add2e1e5 | 497 | * For kernel space, we use context ids 1-4 to map addresses as below: |
c60ac569 | 498 | * NOTE: each context only support 64TB now. |
941711a3 AK |
499 | * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ] |
500 | * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ] | |
501 | * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ] | |
502 | * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ] | |
8d2169e8 DG |
503 | * |
504 | * The proto-VSIDs are then scrambled into real VSIDs with the | |
505 | * multiplicative hash: | |
506 | * | |
507 | * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS | |
8d2169e8 | 508 | * |
f033d659 | 509 | * VSID_MULTIPLIER is prime, so in particular it is |
8d2169e8 DG |
510 | * co-prime to VSID_MODULUS, making this a 1:1 scrambling function. |
511 | * Because the modulus is 2^n-1 we can compute it efficiently without | |
c60ac569 AK |
512 | * a divide or extra multiply (see below). The scramble function gives |
513 | * robust scattering in the hash table (at least based on some initial | |
514 | * results). | |
8d2169e8 | 515 | * |
941711a3 AK |
516 | * We use VSID 0 to indicate an invalid VSID. The means we can't use context id |
517 | * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which | |
518 | * will produce a VSID of 0. | |
8d2169e8 | 519 | * |
c60ac569 AK |
520 | * We also need to avoid the last segment of the last context, because that |
521 | * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 | |
941711a3 | 522 | * because of the modulo operation in vsid scramble. |
8d2169e8 | 523 | */ |
8d2169e8 | 524 | |
e6f81a92 AK |
525 | /* |
526 | * Max Va bits we support as of now is 68 bits. We want 19 bit | |
527 | * context ID. | |
528 | * Restrictions: | |
529 | * GPU has restrictions of not able to access beyond 128TB | |
530 | * (47 bit effective address). We also cannot do more than 20bit PID. | |
531 | * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS | |
532 | * to 16 bits (ie, we can only have 2^16 pids at the same time). | |
533 | */ | |
534 | #define VA_BITS 68 | |
e39d1a47 | 535 | #define CONTEXT_BITS 19 |
e6f81a92 AK |
536 | #define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS)) |
537 | #define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS)) | |
e39d1a47 | 538 | |
79270e0a AK |
539 | #define ESID_BITS_MASK ((1 << ESID_BITS) - 1) |
540 | #define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1) | |
541 | ||
c60ac569 AK |
542 | /* |
543 | * 256MB segment | |
af81d787 | 544 | * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments |
941711a3 AK |
545 | * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts |
546 | * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each | |
e6f81a92 | 547 | * context maps 2^49 bytes (512TB). |
941711a3 AK |
548 | * |
549 | * We also need to avoid the last segment of the last context, because that | |
550 | * would give a protovsid of 0x1fffffffff. That will result in a VSID 0 | |
551 | * because of the modulo operation in vsid scramble. | |
c60ac569 | 552 | */ |
941711a3 AK |
553 | #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2) |
554 | #define MIN_USER_CONTEXT (5) | |
555 | ||
556 | /* Would be nice to use KERNEL_REGION_ID here */ | |
557 | #define KERNEL_REGION_CONTEXT_OFFSET (0xc - 1) | |
c60ac569 | 558 | |
e6f81a92 AK |
559 | /* |
560 | * For platforms that support on 65bit VA we limit the context bits | |
561 | */ | |
562 | #define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2) | |
563 | ||
048ee099 AK |
564 | /* |
565 | * This should be computed such that protovosid * vsid_mulitplier | |
e6f81a92 AK |
566 | * doesn't overflow 64 bits. The vsid_mutliplier should also be |
567 | * co-prime to vsid_modulus. We also need to make sure that number | |
568 | * of bits in multiplied result (dividend) is less than twice the number of | |
569 | * protovsid bits for our modulus optmization to work. | |
570 | * | |
571 | * The below table shows the current values used. | |
572 | * |-------+------------+----------------------+------------+-------------------| | |
573 | * | | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS | | |
574 | * |-------+------------+----------------------+------------+-------------------| | |
575 | * | 1T | 24 | 25 | 49 | 50 | | |
576 | * |-------+------------+----------------------+------------+-------------------| | |
577 | * | 256MB | 24 | 37 | 61 | 74 | | |
578 | * |-------+------------+----------------------+------------+-------------------| | |
579 | * | |
580 | * |-------+------------+----------------------+------------+--------------------| | |
581 | * | | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS | | |
582 | * |-------+------------+----------------------+------------+--------------------| | |
583 | * | 1T | 24 | 28 | 52 | 56 | | |
584 | * |-------+------------+----------------------+------------+--------------------| | |
585 | * | 256MB | 24 | 40 | 64 | 80 | | |
586 | * |-------+------------+----------------------+------------+--------------------| | |
587 | * | |
048ee099 AK |
588 | */ |
589 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ | |
e6f81a92 AK |
590 | #define VSID_BITS_256M (VA_BITS - SID_SHIFT) |
591 | #define VSID_BITS_65_256M (65 - SID_SHIFT) | |
82228e36 AK |
592 | /* |
593 | * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS | |
594 | */ | |
595 | #define VSID_MULINV_256M ASM_CONST(665548017062) | |
8d2169e8 | 596 | |
1189be65 | 597 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ |
e6f81a92 AK |
598 | #define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T) |
599 | #define VSID_BITS_65_1T (65 - SID_SHIFT_1T) | |
82228e36 | 600 | #define VSID_MULINV_1T ASM_CONST(209034062) |
8d2169e8 | 601 | |
82228e36 AK |
602 | /* 1TB VSID reserved for VRMA */ |
603 | #define VRMA_VSID 0x1ffffffUL | |
af81d787 | 604 | #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT)) |
8d2169e8 | 605 | |
78f1dbde | 606 | /* 4 bits per slice and we have one slice per 1TB */ |
957b778a AK |
607 | #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41) |
608 | #define TASK_SLICE_ARRAY_SZ(x) ((x)->context.addr_limit >> 41) | |
8d2169e8 DG |
609 | |
610 | #ifndef __ASSEMBLY__ | |
611 | ||
d28513bc DG |
612 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
613 | /* | |
614 | * For the sub-page protection option, we extend the PGD with one of | |
615 | * these. Basically we have a 3-level tree, with the top level being | |
616 | * the protptrs array. To optimize speed and memory consumption when | |
617 | * only addresses < 4GB are being protected, pointers to the first | |
618 | * four pages of sub-page protection words are stored in the low_prot | |
619 | * array. | |
620 | * Each page of sub-page protection words protects 1GB (4 bytes | |
621 | * protects 64k). For the 3-level tree, each page of pointers then | |
622 | * protects 8TB. | |
623 | */ | |
624 | struct subpage_prot_table { | |
625 | unsigned long maxaddr; /* only addresses < this are protected */ | |
dad6f37c | 626 | unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)]; |
d28513bc DG |
627 | unsigned int *low_prot[4]; |
628 | }; | |
629 | ||
630 | #define SBP_L1_BITS (PAGE_SHIFT - 2) | |
631 | #define SBP_L2_BITS (PAGE_SHIFT - 3) | |
632 | #define SBP_L1_COUNT (1 << SBP_L1_BITS) | |
633 | #define SBP_L2_COUNT (1 << SBP_L2_BITS) | |
634 | #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS) | |
635 | #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS) | |
636 | ||
637 | extern void subpage_prot_free(struct mm_struct *mm); | |
638 | extern void subpage_prot_init_new_context(struct mm_struct *mm); | |
639 | #else | |
640 | static inline void subpage_prot_free(struct mm_struct *mm) {} | |
641 | static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } | |
642 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | |
643 | ||
8d2169e8 | 644 | #if 0 |
1189be65 PM |
645 | /* |
646 | * The code below is equivalent to this function for arguments | |
647 | * < 2^VSID_BITS, which is all this should ever be called | |
648 | * with. However gcc is not clever enough to compute the | |
649 | * modulus (2^n-1) without a second multiply. | |
650 | */ | |
34692708 | 651 | #define vsid_scramble(protovsid, size) \ |
1189be65 | 652 | ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size)) |
8d2169e8 | 653 | |
e6f81a92 | 654 | /* simplified form avoiding mod operation */ |
1189be65 PM |
655 | #define vsid_scramble(protovsid, size) \ |
656 | ({ \ | |
657 | unsigned long x; \ | |
658 | x = (protovsid) * VSID_MULTIPLIER_##size; \ | |
659 | x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \ | |
660 | (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \ | |
661 | }) | |
e6f81a92 AK |
662 | |
663 | #else /* 1 */ | |
664 | static inline unsigned long vsid_scramble(unsigned long protovsid, | |
665 | unsigned long vsid_multiplier, int vsid_bits) | |
666 | { | |
667 | unsigned long vsid; | |
668 | unsigned long vsid_modulus = ((1UL << vsid_bits) - 1); | |
669 | /* | |
670 | * We have same multipler for both 256 and 1T segements now | |
671 | */ | |
672 | vsid = protovsid * vsid_multiplier; | |
673 | vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus); | |
674 | return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus; | |
675 | } | |
676 | ||
8d2169e8 | 677 | #endif /* 1 */ |
8d2169e8 | 678 | |
1189be65 PM |
679 | /* Returns the segment size indicator for a user address */ |
680 | static inline int user_segment_size(unsigned long addr) | |
8d2169e8 | 681 | { |
1189be65 PM |
682 | /* Use 1T segments if possible for addresses >= 1T */ |
683 | if (addr >= (1UL << SID_SHIFT_1T)) | |
684 | return mmu_highuser_ssize; | |
685 | return MMU_SEGSIZE_256M; | |
8d2169e8 DG |
686 | } |
687 | ||
1189be65 PM |
688 | static inline unsigned long get_vsid(unsigned long context, unsigned long ea, |
689 | int ssize) | |
690 | { | |
e6f81a92 AK |
691 | unsigned long va_bits = VA_BITS; |
692 | unsigned long vsid_bits; | |
693 | unsigned long protovsid; | |
694 | ||
c60ac569 AK |
695 | /* |
696 | * Bad address. We return VSID 0 for that | |
697 | */ | |
dd1842a2 | 698 | if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE) |
c60ac569 AK |
699 | return 0; |
700 | ||
e6f81a92 AK |
701 | if (!mmu_has_feature(MMU_FTR_68_BIT_VA)) |
702 | va_bits = 65; | |
703 | ||
704 | if (ssize == MMU_SEGSIZE_256M) { | |
705 | vsid_bits = va_bits - SID_SHIFT; | |
706 | protovsid = (context << ESID_BITS) | | |
707 | ((ea >> SID_SHIFT) & ESID_BITS_MASK); | |
708 | return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits); | |
709 | } | |
710 | /* 1T segment */ | |
711 | vsid_bits = va_bits - SID_SHIFT_1T; | |
712 | protovsid = (context << ESID_BITS_1T) | | |
713 | ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK); | |
714 | return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits); | |
1189be65 PM |
715 | } |
716 | ||
c60ac569 AK |
717 | /* |
718 | * This is only valid for addresses >= PAGE_OFFSET | |
c60ac569 AK |
719 | */ |
720 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) | |
721 | { | |
722 | unsigned long context; | |
723 | ||
85beb1c4 ME |
724 | if (!is_kernel_addr(ea)) |
725 | return 0; | |
726 | ||
c60ac569 | 727 | /* |
941711a3 AK |
728 | * For kernel space, we use context ids 1-4 to map the address space as |
729 | * below: | |
730 | * | |
731 | * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ] | |
732 | * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ] | |
733 | * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ] | |
734 | * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ] | |
735 | * | |
736 | * So we can compute the context from the region (top nibble) by | |
737 | * subtracting 11, or 0xc - 1. | |
c60ac569 | 738 | */ |
941711a3 AK |
739 | context = (ea >> 60) - KERNEL_REGION_CONTEXT_OFFSET; |
740 | ||
c60ac569 AK |
741 | return get_vsid(context, ea, ssize); |
742 | } | |
5c3c7ede DG |
743 | |
744 | unsigned htab_shift_for_mem_size(unsigned long mem_size); | |
745 | ||
8d2169e8 DG |
746 | #endif /* __ASSEMBLY__ */ |
747 | ||
11a6f6ab | 748 | #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */ |