Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * native hashtable management. | |
3 | * | |
4 | * SMP scalability work: | |
5 | * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
3c726f8d BH |
12 | |
13 | #undef DEBUG_LOW | |
14 | ||
1da177e4 LT |
15 | #include <linux/spinlock.h> |
16 | #include <linux/bitops.h> | |
beacc6da | 17 | #include <linux/of.h> |
4e287e65 | 18 | #include <linux/processor.h> |
1da177e4 LT |
19 | #include <linux/threads.h> |
20 | #include <linux/smp.h> | |
21 | ||
1da177e4 LT |
22 | #include <asm/machdep.h> |
23 | #include <asm/mmu.h> | |
24 | #include <asm/mmu_context.h> | |
25 | #include <asm/pgtable.h> | |
0428491c | 26 | #include <asm/trace.h> |
1da177e4 LT |
27 | #include <asm/tlb.h> |
28 | #include <asm/cputable.h> | |
3c726f8d | 29 | #include <asm/udbg.h> |
71bf08b6 | 30 | #include <asm/kexec.h> |
60dbf438 | 31 | #include <asm/ppc-opcode.h> |
2c86cd18 | 32 | #include <asm/feature-fixups.h> |
3c726f8d | 33 | |
ec249dd8 | 34 | #include <misc/cxl-base.h> |
4c6d9acc | 35 | |
3c726f8d BH |
36 | #ifdef DEBUG_LOW |
37 | #define DBG_LOW(fmt...) udbg_printf(fmt) | |
38 | #else | |
39 | #define DBG_LOW(fmt...) | |
40 | #endif | |
1da177e4 | 41 | |
12f04f2b | 42 | #ifdef __BIG_ENDIAN__ |
1da177e4 | 43 | #define HPTE_LOCK_BIT 3 |
12f04f2b AB |
44 | #else |
45 | #define HPTE_LOCK_BIT (56+3) | |
46 | #endif | |
1da177e4 | 47 | |
d667edc0 | 48 | static DEFINE_RAW_SPINLOCK(native_tlbie_lock); |
1da177e4 | 49 | |
d4748276 NP |
50 | static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is) |
51 | { | |
52 | unsigned long rb; | |
53 | ||
54 | rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53)); | |
55 | ||
56 | asm volatile("tlbiel %0" : : "r" (rb)); | |
57 | } | |
58 | ||
59 | /* | |
60 | * tlbiel instruction for hash, set invalidation | |
61 | * i.e., r=1 and is=01 or is=10 or is=11 | |
62 | */ | |
63 | static inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is, | |
64 | unsigned int pid, | |
65 | unsigned int ric, unsigned int prs) | |
66 | { | |
67 | unsigned long rb; | |
68 | unsigned long rs; | |
69 | unsigned int r = 0; /* hash format */ | |
70 | ||
71 | rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53)); | |
72 | rs = ((unsigned long)pid << PPC_BITLSHIFT(31)); | |
73 | ||
74 | asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4) | |
75 | : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r) | |
76 | : "memory"); | |
77 | } | |
78 | ||
79 | ||
80 | static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is) | |
81 | { | |
82 | unsigned int set; | |
83 | ||
84 | asm volatile("ptesync": : :"memory"); | |
85 | ||
86 | for (set = 0; set < num_sets; set++) | |
87 | tlbiel_hash_set_isa206(set, is); | |
88 | ||
89 | asm volatile("ptesync": : :"memory"); | |
90 | } | |
91 | ||
92 | static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is) | |
93 | { | |
94 | unsigned int set; | |
95 | ||
96 | asm volatile("ptesync": : :"memory"); | |
97 | ||
98 | /* | |
99 | * Flush the first set of the TLB, and any caching of partition table | |
100 | * entries. Then flush the remaining sets of the TLB. Hash mode uses | |
101 | * partition scoped TLB translations. | |
102 | */ | |
103 | tlbiel_hash_set_isa300(0, is, 0, 2, 0); | |
104 | for (set = 1; set < num_sets; set++) | |
105 | tlbiel_hash_set_isa300(set, is, 0, 0, 0); | |
106 | ||
107 | /* | |
108 | * Now invalidate the process table cache. | |
109 | * | |
110 | * From ISA v3.0B p. 1078: | |
111 | * The following forms are invalid. | |
112 | * * PRS=1, R=0, and RIC!=2 (The only process-scoped | |
113 | * HPT caching is of the Process Table.) | |
114 | */ | |
115 | tlbiel_hash_set_isa300(0, is, 0, 2, 1); | |
116 | ||
117 | asm volatile("ptesync": : :"memory"); | |
bc276ecb | 118 | |
fe7946ce | 119 | asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory"); |
d4748276 NP |
120 | } |
121 | ||
122 | void hash__tlbiel_all(unsigned int action) | |
123 | { | |
124 | unsigned int is; | |
125 | ||
126 | switch (action) { | |
127 | case TLB_INVAL_SCOPE_GLOBAL: | |
128 | is = 3; | |
129 | break; | |
130 | case TLB_INVAL_SCOPE_LPID: | |
131 | is = 2; | |
132 | break; | |
133 | default: | |
134 | BUG(); | |
135 | } | |
136 | ||
137 | if (early_cpu_has_feature(CPU_FTR_ARCH_300)) | |
138 | tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is); | |
139 | else if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) | |
140 | tlbiel_all_isa206(POWER8_TLB_SETS, is); | |
141 | else if (early_cpu_has_feature(CPU_FTR_ARCH_206)) | |
142 | tlbiel_all_isa206(POWER7_TLB_SETS, is); | |
143 | else | |
144 | WARN(1, "%s called on pre-POWER7 CPU\n", __func__); | |
d4748276 NP |
145 | } |
146 | ||
a3961f82 MS |
147 | static inline unsigned long ___tlbie(unsigned long vpn, int psize, |
148 | int apsize, int ssize) | |
3c726f8d | 149 | { |
5524a27d | 150 | unsigned long va; |
3c726f8d | 151 | unsigned int penc; |
de640959 | 152 | unsigned long sllp; |
3c726f8d | 153 | |
5524a27d AK |
154 | /* |
155 | * We need 14 to 65 bits of va for a tlibe of 4K page | |
156 | * With vpn we ignore the lower VPN_SHIFT bits already. | |
157 | * And top two bits are already ignored because we can | |
027dfac6 | 158 | * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT |
5524a27d AK |
159 | * of 12. |
160 | */ | |
161 | va = vpn << VPN_SHIFT; | |
162 | /* | |
163 | * clear top 16 bits of 64bit va, non SLS segment | |
164 | * Older versions of the architecture (2.02 and earler) require the | |
165 | * masking of the top 16 bits. | |
166 | */ | |
accfad7d AK |
167 | if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA)) |
168 | va &= ~(0xffffULL << 48); | |
3c726f8d BH |
169 | |
170 | switch (psize) { | |
171 | case MMU_PAGE_4K: | |
1f6aaacc AK |
172 | /* clear out bits after (52) [0....52.....63] */ |
173 | va &= ~((1ul << (64 - 52)) - 1); | |
1189be65 | 174 | va |= ssize << 8; |
138ee7ee | 175 | sllp = get_sllp_encoding(apsize); |
de640959 | 176 | va |= sllp << 5; |
a32e252f | 177 | asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) |
969391c5 | 178 | : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) |
60dbf438 | 179 | : "memory"); |
3c726f8d BH |
180 | break; |
181 | default: | |
5524a27d | 182 | /* We need 14 to 14 + i bits of va */ |
b1022fbd | 183 | penc = mmu_psize_defs[psize].penc[apsize]; |
1f6aaacc | 184 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); |
19242b24 | 185 | va |= penc << 12; |
1189be65 | 186 | va |= ssize << 8; |
29ef7a3e AK |
187 | /* |
188 | * AVAL bits: | |
189 | * We don't need all the bits, but rest of the bits | |
190 | * must be ignored by the processor. | |
191 | * vpn cover upto 65 bits of va. (0...65) and we need | |
192 | * 58..64 bits of va. | |
193 | */ | |
194 | va |= (vpn & 0xfe); /* AVAL */ | |
60dbf438 | 195 | va |= 1; /* L */ |
a32e252f | 196 | asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) |
969391c5 | 197 | : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) |
60dbf438 | 198 | : "memory"); |
3c726f8d BH |
199 | break; |
200 | } | |
a3961f82 MS |
201 | return va; |
202 | } | |
203 | ||
a5d4b589 AK |
204 | static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize) |
205 | { | |
206 | if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) { | |
207 | /* Need the extra ptesync to ensure we don't reorder tlbie*/ | |
208 | asm volatile("ptesync": : :"memory"); | |
209 | ___tlbie(vpn, psize, apsize, ssize); | |
210 | } | |
211 | } | |
212 | ||
a3961f82 MS |
213 | static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize) |
214 | { | |
215 | unsigned long rb; | |
216 | ||
217 | rb = ___tlbie(vpn, psize, apsize, ssize); | |
218 | trace_tlbie(0, 0, rb, 0, 0, 0, 0); | |
3c726f8d BH |
219 | } |
220 | ||
b1022fbd | 221 | static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize) |
3c726f8d | 222 | { |
5524a27d | 223 | unsigned long va; |
3c726f8d | 224 | unsigned int penc; |
de640959 | 225 | unsigned long sllp; |
3c726f8d | 226 | |
5524a27d AK |
227 | /* VPN_SHIFT can be atmost 12 */ |
228 | va = vpn << VPN_SHIFT; | |
229 | /* | |
230 | * clear top 16 bits of 64 bit va, non SLS segment | |
231 | * Older versions of the architecture (2.02 and earler) require the | |
232 | * masking of the top 16 bits. | |
233 | */ | |
accfad7d AK |
234 | if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA)) |
235 | va &= ~(0xffffULL << 48); | |
3c726f8d BH |
236 | |
237 | switch (psize) { | |
238 | case MMU_PAGE_4K: | |
1f6aaacc AK |
239 | /* clear out bits after(52) [0....52.....63] */ |
240 | va &= ~((1ul << (64 - 52)) - 1); | |
1189be65 | 241 | va |= ssize << 8; |
138ee7ee | 242 | sllp = get_sllp_encoding(apsize); |
de640959 | 243 | va |= sllp << 5; |
f923efbc BS |
244 | asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1) |
245 | : : "r" (va), "i" (CPU_FTR_ARCH_206) | |
246 | : "memory"); | |
3c726f8d BH |
247 | break; |
248 | default: | |
5524a27d | 249 | /* We need 14 to 14 + i bits of va */ |
b1022fbd | 250 | penc = mmu_psize_defs[psize].penc[apsize]; |
1f6aaacc | 251 | va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); |
19242b24 | 252 | va |= penc << 12; |
1189be65 | 253 | va |= ssize << 8; |
29ef7a3e AK |
254 | /* |
255 | * AVAL bits: | |
256 | * We don't need all the bits, but rest of the bits | |
257 | * must be ignored by the processor. | |
258 | * vpn cover upto 65 bits of va. (0...65) and we need | |
259 | * 58..64 bits of va. | |
260 | */ | |
261 | va |= (vpn & 0xfe); | |
60dbf438 | 262 | va |= 1; /* L */ |
f923efbc BS |
263 | asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1) |
264 | : : "r" (va), "i" (CPU_FTR_ARCH_206) | |
265 | : "memory"); | |
3c726f8d BH |
266 | break; |
267 | } | |
0428491c | 268 | trace_tlbie(0, 1, va, 0, 0, 0, 0); |
3c726f8d BH |
269 | |
270 | } | |
271 | ||
b1022fbd AK |
272 | static inline void tlbie(unsigned long vpn, int psize, int apsize, |
273 | int ssize, int local) | |
3c726f8d | 274 | { |
4c6d9acc | 275 | unsigned int use_local; |
44ae3ab3 | 276 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
3c726f8d | 277 | |
4c6d9acc IM |
278 | use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use(); |
279 | ||
3c726f8d BH |
280 | if (use_local) |
281 | use_local = mmu_psize_defs[psize].tlbiel; | |
282 | if (lock_tlbie && !use_local) | |
6b9c9b8a | 283 | raw_spin_lock(&native_tlbie_lock); |
3c726f8d BH |
284 | asm volatile("ptesync": : :"memory"); |
285 | if (use_local) { | |
b1022fbd | 286 | __tlbiel(vpn, psize, apsize, ssize); |
3c726f8d BH |
287 | asm volatile("ptesync": : :"memory"); |
288 | } else { | |
b1022fbd | 289 | __tlbie(vpn, psize, apsize, ssize); |
a5d4b589 | 290 | fixup_tlbie(vpn, psize, apsize, ssize); |
3c726f8d BH |
291 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); |
292 | } | |
293 | if (lock_tlbie && !use_local) | |
6b9c9b8a | 294 | raw_spin_unlock(&native_tlbie_lock); |
3c726f8d BH |
295 | } |
296 | ||
8e561e7e | 297 | static inline void native_lock_hpte(struct hash_pte *hptep) |
1da177e4 | 298 | { |
12f04f2b | 299 | unsigned long *word = (unsigned long *)&hptep->v; |
1da177e4 LT |
300 | |
301 | while (1) { | |
66d99b88 | 302 | if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) |
1da177e4 | 303 | break; |
4e287e65 | 304 | spin_begin(); |
1da177e4 | 305 | while(test_bit(HPTE_LOCK_BIT, word)) |
4e287e65 NP |
306 | spin_cpu_relax(); |
307 | spin_end(); | |
1da177e4 LT |
308 | } |
309 | } | |
310 | ||
8e561e7e | 311 | static inline void native_unlock_hpte(struct hash_pte *hptep) |
1da177e4 | 312 | { |
12f04f2b | 313 | unsigned long *word = (unsigned long *)&hptep->v; |
1da177e4 | 314 | |
66d99b88 | 315 | clear_bit_unlock(HPTE_LOCK_BIT, word); |
1da177e4 LT |
316 | } |
317 | ||
5524a27d | 318 | static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn, |
3c726f8d | 319 | unsigned long pa, unsigned long rflags, |
b1022fbd | 320 | unsigned long vflags, int psize, int apsize, int ssize) |
1da177e4 | 321 | { |
8e561e7e | 322 | struct hash_pte *hptep = htab_address + hpte_group; |
96e28449 | 323 | unsigned long hpte_v, hpte_r; |
1da177e4 LT |
324 | int i; |
325 | ||
3c726f8d | 326 | if (!(vflags & HPTE_V_BOLTED)) { |
5524a27d | 327 | DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx," |
3c726f8d | 328 | " rflags=%lx, vflags=%lx, psize=%d)\n", |
5524a27d | 329 | hpte_group, vpn, pa, rflags, vflags, psize); |
3c726f8d BH |
330 | } |
331 | ||
1da177e4 | 332 | for (i = 0; i < HPTES_PER_GROUP; i++) { |
12f04f2b | 333 | if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) { |
1da177e4 LT |
334 | /* retry with lock held */ |
335 | native_lock_hpte(hptep); | |
12f04f2b | 336 | if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) |
1da177e4 LT |
337 | break; |
338 | native_unlock_hpte(hptep); | |
339 | } | |
340 | ||
341 | hptep++; | |
342 | } | |
343 | ||
344 | if (i == HPTES_PER_GROUP) | |
345 | return -1; | |
346 | ||
b1022fbd | 347 | hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; |
6b243fcf | 348 | hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; |
3c726f8d BH |
349 | |
350 | if (!(vflags & HPTE_V_BOLTED)) { | |
351 | DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n", | |
352 | i, hpte_v, hpte_r); | |
353 | } | |
1da177e4 | 354 | |
6b243fcf PM |
355 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
356 | hpte_r = hpte_old_to_new_r(hpte_v, hpte_r); | |
357 | hpte_v = hpte_old_to_new_v(hpte_v); | |
358 | } | |
359 | ||
12f04f2b | 360 | hptep->r = cpu_to_be64(hpte_r); |
1da177e4 | 361 | /* Guarantee the second dword is visible before the valid bit */ |
74a0ba61 | 362 | eieio(); |
1da177e4 LT |
363 | /* |
364 | * Now set the first dword including the valid bit | |
365 | * NOTE: this also unlocks the hpte | |
366 | */ | |
12f04f2b | 367 | hptep->v = cpu_to_be64(hpte_v); |
1da177e4 LT |
368 | |
369 | __asm__ __volatile__ ("ptesync" : : : "memory"); | |
370 | ||
96e28449 | 371 | return i | (!!(vflags & HPTE_V_SECONDARY) << 3); |
1da177e4 LT |
372 | } |
373 | ||
374 | static long native_hpte_remove(unsigned long hpte_group) | |
375 | { | |
8e561e7e | 376 | struct hash_pte *hptep; |
1da177e4 LT |
377 | int i; |
378 | int slot_offset; | |
96e28449 | 379 | unsigned long hpte_v; |
1da177e4 | 380 | |
3c726f8d BH |
381 | DBG_LOW(" remove(group=%lx)\n", hpte_group); |
382 | ||
1da177e4 LT |
383 | /* pick a random entry to start at */ |
384 | slot_offset = mftb() & 0x7; | |
385 | ||
386 | for (i = 0; i < HPTES_PER_GROUP; i++) { | |
387 | hptep = htab_address + hpte_group + slot_offset; | |
12f04f2b | 388 | hpte_v = be64_to_cpu(hptep->v); |
1da177e4 | 389 | |
96e28449 | 390 | if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) { |
1da177e4 LT |
391 | /* retry with lock held */ |
392 | native_lock_hpte(hptep); | |
12f04f2b | 393 | hpte_v = be64_to_cpu(hptep->v); |
96e28449 DG |
394 | if ((hpte_v & HPTE_V_VALID) |
395 | && !(hpte_v & HPTE_V_BOLTED)) | |
1da177e4 LT |
396 | break; |
397 | native_unlock_hpte(hptep); | |
398 | } | |
399 | ||
400 | slot_offset++; | |
401 | slot_offset &= 0x7; | |
402 | } | |
403 | ||
404 | if (i == HPTES_PER_GROUP) | |
405 | return -1; | |
406 | ||
407 | /* Invalidate the hpte. NOTE: this also unlocks it */ | |
96e28449 | 408 | hptep->v = 0; |
1da177e4 LT |
409 | |
410 | return i; | |
411 | } | |
412 | ||
3c726f8d | 413 | static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, |
db3d8534 | 414 | unsigned long vpn, int bpsize, |
aefa5688 | 415 | int apsize, int ssize, unsigned long flags) |
1da177e4 | 416 | { |
8e561e7e | 417 | struct hash_pte *hptep = htab_address + slot; |
3c726f8d | 418 | unsigned long hpte_v, want_v; |
aefa5688 | 419 | int ret = 0, local = 0; |
3c726f8d | 420 | |
db3d8534 | 421 | want_v = hpte_encode_avpn(vpn, bpsize, ssize); |
3c726f8d | 422 | |
5524a27d AK |
423 | DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", |
424 | vpn, want_v & HPTE_V_AVPN, slot, newpp); | |
3c726f8d | 425 | |
a833280b | 426 | hpte_v = hpte_get_old_v(hptep); |
0608d692 AK |
427 | /* |
428 | * We need to invalidate the TLB always because hpte_remove doesn't do | |
429 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | |
430 | * random entry from it. When we do that we don't invalidate the TLB | |
431 | * (hpte_remove) because we assume the old translation is still | |
432 | * technically "valid". | |
433 | */ | |
db3d8534 | 434 | if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) { |
3c726f8d | 435 | DBG_LOW(" -> miss\n"); |
3c726f8d BH |
436 | ret = -1; |
437 | } else { | |
0ec2698f AK |
438 | native_lock_hpte(hptep); |
439 | /* recheck with locks held */ | |
a833280b | 440 | hpte_v = hpte_get_old_v(hptep); |
0ec2698f AK |
441 | if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) || |
442 | !(hpte_v & HPTE_V_VALID))) { | |
443 | ret = -1; | |
444 | } else { | |
445 | DBG_LOW(" -> hit\n"); | |
446 | /* Update the HPTE */ | |
447 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & | |
8550e2fa AK |
448 | ~(HPTE_R_PPP | HPTE_R_N)) | |
449 | (newpp & (HPTE_R_PPP | HPTE_R_N | | |
0ec2698f AK |
450 | HPTE_R_C))); |
451 | } | |
452 | native_unlock_hpte(hptep); | |
3c726f8d | 453 | } |
aefa5688 AK |
454 | |
455 | if (flags & HPTE_LOCAL_UPDATE) | |
456 | local = 1; | |
457 | /* | |
458 | * Ensure it is out of the tlb too if it is not a nohpte fault | |
459 | */ | |
460 | if (!(flags & HPTE_NOHPTE_UPDATE)) | |
461 | tlbie(vpn, bpsize, apsize, ssize, local); | |
462 | ||
3c726f8d | 463 | return ret; |
1da177e4 LT |
464 | } |
465 | ||
5524a27d | 466 | static long native_hpte_find(unsigned long vpn, int psize, int ssize) |
1da177e4 | 467 | { |
8e561e7e | 468 | struct hash_pte *hptep; |
1da177e4 | 469 | unsigned long hash; |
1189be65 | 470 | unsigned long i; |
1da177e4 | 471 | long slot; |
3c726f8d | 472 | unsigned long want_v, hpte_v; |
1da177e4 | 473 | |
5524a27d | 474 | hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); |
74f227b2 | 475 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
1da177e4 | 476 | |
1189be65 PM |
477 | /* Bolted mappings are only ever in the primary group */ |
478 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
479 | for (i = 0; i < HPTES_PER_GROUP; i++) { | |
1da177e4 | 480 | |
a833280b AK |
481 | hptep = htab_address + slot; |
482 | hpte_v = hpte_get_old_v(hptep); | |
1189be65 PM |
483 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) |
484 | /* HPTE matches */ | |
485 | return slot; | |
486 | ++slot; | |
1da177e4 LT |
487 | } |
488 | ||
489 | return -1; | |
490 | } | |
491 | ||
1da177e4 LT |
492 | /* |
493 | * Update the page protection bits. Intended to be used to create | |
494 | * guard pages for kernel data structures on pages which are bolted | |
495 | * in the HPT. Assumes pages being operated on will not be stolen. | |
1da177e4 LT |
496 | * |
497 | * No need to lock here because we should be the only user. | |
498 | */ | |
3c726f8d | 499 | static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, |
1189be65 | 500 | int psize, int ssize) |
1da177e4 | 501 | { |
5524a27d AK |
502 | unsigned long vpn; |
503 | unsigned long vsid; | |
1da177e4 | 504 | long slot; |
8e561e7e | 505 | struct hash_pte *hptep; |
1da177e4 | 506 | |
1189be65 | 507 | vsid = get_kernel_vsid(ea, ssize); |
5524a27d | 508 | vpn = hpt_vpn(ea, vsid, ssize); |
1da177e4 | 509 | |
5524a27d | 510 | slot = native_hpte_find(vpn, psize, ssize); |
1da177e4 LT |
511 | if (slot == -1) |
512 | panic("could not find page to bolt\n"); | |
513 | hptep = htab_address + slot; | |
514 | ||
3c726f8d | 515 | /* Update the HPTE */ |
12f04f2b | 516 | hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & |
8550e2fa AK |
517 | ~(HPTE_R_PPP | HPTE_R_N)) | |
518 | (newpp & (HPTE_R_PPP | HPTE_R_N))); | |
db3d8534 AK |
519 | /* |
520 | * Ensure it is out of the tlb too. Bolted entries base and | |
521 | * actual page size will be same. | |
522 | */ | |
523 | tlbie(vpn, psize, psize, ssize, 0); | |
1da177e4 LT |
524 | } |
525 | ||
1b644f57 AB |
526 | /* |
527 | * Remove a bolted kernel entry. Memory hotplug uses this. | |
528 | * | |
529 | * No need to lock here because we should be the only user. | |
530 | */ | |
531 | static int native_hpte_removebolted(unsigned long ea, int psize, int ssize) | |
532 | { | |
533 | unsigned long vpn; | |
534 | unsigned long vsid; | |
535 | long slot; | |
536 | struct hash_pte *hptep; | |
537 | ||
538 | vsid = get_kernel_vsid(ea, ssize); | |
539 | vpn = hpt_vpn(ea, vsid, ssize); | |
540 | ||
541 | slot = native_hpte_find(vpn, psize, ssize); | |
542 | if (slot == -1) | |
543 | return -ENOENT; | |
544 | ||
545 | hptep = htab_address + slot; | |
546 | ||
547 | VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED)); | |
548 | ||
549 | /* Invalidate the hpte */ | |
550 | hptep->v = 0; | |
551 | ||
552 | /* Invalidate the TLB */ | |
553 | tlbie(vpn, psize, psize, ssize, 0); | |
554 | return 0; | |
555 | } | |
556 | ||
557 | ||
5524a27d | 558 | static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, |
db3d8534 | 559 | int bpsize, int apsize, int ssize, int local) |
1da177e4 | 560 | { |
8e561e7e | 561 | struct hash_pte *hptep = htab_address + slot; |
96e28449 | 562 | unsigned long hpte_v; |
3c726f8d | 563 | unsigned long want_v; |
1da177e4 | 564 | unsigned long flags; |
1da177e4 LT |
565 | |
566 | local_irq_save(flags); | |
1da177e4 | 567 | |
5524a27d | 568 | DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot); |
3c726f8d | 569 | |
db3d8534 | 570 | want_v = hpte_encode_avpn(vpn, bpsize, ssize); |
a833280b | 571 | hpte_v = hpte_get_old_v(hptep); |
1da177e4 | 572 | |
27d8959d AK |
573 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { |
574 | native_lock_hpte(hptep); | |
575 | /* recheck with locks held */ | |
576 | hpte_v = hpte_get_old_v(hptep); | |
577 | ||
578 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) | |
579 | /* Invalidate the hpte. NOTE: this also unlocks it */ | |
580 | hptep->v = 0; | |
581 | else | |
582 | native_unlock_hpte(hptep); | |
583 | } | |
0608d692 AK |
584 | /* |
585 | * We need to invalidate the TLB always because hpte_remove doesn't do | |
586 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | |
587 | * random entry from it. When we do that we don't invalidate the TLB | |
588 | * (hpte_remove) because we assume the old translation is still | |
589 | * technically "valid". | |
590 | */ | |
db3d8534 AK |
591 | tlbie(vpn, bpsize, apsize, ssize, local); |
592 | ||
1da177e4 LT |
593 | local_irq_restore(flags); |
594 | } | |
595 | ||
e34aa03c | 596 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
fa1f8ae8 AK |
597 | static void native_hugepage_invalidate(unsigned long vsid, |
598 | unsigned long addr, | |
1a527286 | 599 | unsigned char *hpte_slot_array, |
d557b098 | 600 | int psize, int ssize, int local) |
1a527286 | 601 | { |
969b7b20 | 602 | int i; |
1a527286 AK |
603 | struct hash_pte *hptep; |
604 | int actual_psize = MMU_PAGE_16M; | |
605 | unsigned int max_hpte_count, valid; | |
606 | unsigned long flags, s_addr = addr; | |
607 | unsigned long hpte_v, want_v, shift; | |
fa1f8ae8 | 608 | unsigned long hidx, vpn = 0, hash, slot; |
1a527286 AK |
609 | |
610 | shift = mmu_psize_defs[psize].shift; | |
611 | max_hpte_count = 1U << (PMD_SHIFT - shift); | |
612 | ||
613 | local_irq_save(flags); | |
614 | for (i = 0; i < max_hpte_count; i++) { | |
615 | valid = hpte_valid(hpte_slot_array, i); | |
616 | if (!valid) | |
617 | continue; | |
618 | hidx = hpte_hash_index(hpte_slot_array, i); | |
619 | ||
620 | /* get the vpn */ | |
621 | addr = s_addr + (i * (1ul << shift)); | |
1a527286 AK |
622 | vpn = hpt_vpn(addr, vsid, ssize); |
623 | hash = hpt_hash(vpn, shift, ssize); | |
624 | if (hidx & _PTEIDX_SECONDARY) | |
625 | hash = ~hash; | |
626 | ||
627 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
628 | slot += hidx & _PTEIDX_GROUP_IX; | |
629 | ||
630 | hptep = htab_address + slot; | |
631 | want_v = hpte_encode_avpn(vpn, psize, ssize); | |
a833280b | 632 | hpte_v = hpte_get_old_v(hptep); |
1a527286 AK |
633 | |
634 | /* Even if we miss, we need to invalidate the TLB */ | |
27d8959d AK |
635 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { |
636 | /* recheck with locks held */ | |
637 | native_lock_hpte(hptep); | |
638 | hpte_v = hpte_get_old_v(hptep); | |
639 | ||
640 | if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) { | |
641 | /* | |
642 | * Invalidate the hpte. NOTE: this also unlocks it | |
643 | */ | |
644 | ||
645 | hptep->v = 0; | |
646 | } else | |
647 | native_unlock_hpte(hptep); | |
648 | } | |
969b7b20 AK |
649 | /* |
650 | * We need to do tlb invalidate for all the address, tlbie | |
651 | * instruction compares entry_VA in tlb with the VA specified | |
652 | * here | |
653 | */ | |
d557b098 | 654 | tlbie(vpn, psize, actual_psize, ssize, local); |
1a527286 | 655 | } |
1a527286 AK |
656 | local_irq_restore(flags); |
657 | } | |
e34aa03c AK |
658 | #else |
659 | static void native_hugepage_invalidate(unsigned long vsid, | |
660 | unsigned long addr, | |
661 | unsigned char *hpte_slot_array, | |
662 | int psize, int ssize, int local) | |
663 | { | |
664 | WARN(1, "%s called without THP support\n", __func__); | |
665 | } | |
666 | #endif | |
1a527286 | 667 | |
8e561e7e | 668 | static void hpte_decode(struct hash_pte *hpte, unsigned long slot, |
b1022fbd | 669 | int *psize, int *apsize, int *ssize, unsigned long *vpn) |
71bf08b6 | 670 | { |
dcda287a | 671 | unsigned long avpn, pteg, vpi; |
12f04f2b AB |
672 | unsigned long hpte_v = be64_to_cpu(hpte->v); |
673 | unsigned long hpte_r = be64_to_cpu(hpte->r); | |
dcda287a | 674 | unsigned long vsid, seg_off; |
7e74c392 AK |
675 | int size, a_size, shift; |
676 | /* Look at the 8 bit LP value */ | |
12f04f2b | 677 | unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1); |
71bf08b6 | 678 | |
6b243fcf PM |
679 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
680 | hpte_v = hpte_new_to_old_v(hpte_v, hpte_r); | |
681 | hpte_r = hpte_new_to_old_r(hpte_r); | |
682 | } | |
b1022fbd AK |
683 | if (!(hpte_v & HPTE_V_LARGE)) { |
684 | size = MMU_PAGE_4K; | |
685 | a_size = MMU_PAGE_4K; | |
686 | } else { | |
0eeede0c PM |
687 | size = hpte_page_sizes[lp] & 0xf; |
688 | a_size = hpte_page_sizes[lp] >> 4; | |
71bf08b6 | 689 | } |
2454c7e9 | 690 | /* This works for all page sizes, and for 256M and 1T segments */ |
6b243fcf | 691 | *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; |
71bf08b6 | 692 | shift = mmu_psize_defs[size].shift; |
71bf08b6 | 693 | |
dcda287a AK |
694 | avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); |
695 | pteg = slot / HPTES_PER_GROUP; | |
696 | if (hpte_v & HPTE_V_SECONDARY) | |
697 | pteg = ~pteg; | |
698 | ||
699 | switch (*ssize) { | |
700 | case MMU_SEGSIZE_256M: | |
701 | /* We only have 28 - 23 bits of seg_off in avpn */ | |
702 | seg_off = (avpn & 0x1f) << 23; | |
703 | vsid = avpn >> 5; | |
704 | /* We can find more bits from the pteg value */ | |
705 | if (shift < 23) { | |
706 | vpi = (vsid ^ pteg) & htab_hash_mask; | |
707 | seg_off |= vpi << shift; | |
708 | } | |
5524a27d | 709 | *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; |
83383b73 | 710 | break; |
dcda287a AK |
711 | case MMU_SEGSIZE_1T: |
712 | /* We only have 40 - 23 bits of seg_off in avpn */ | |
713 | seg_off = (avpn & 0x1ffff) << 23; | |
714 | vsid = avpn >> 17; | |
715 | if (shift < 23) { | |
2454c7e9 | 716 | vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask; |
dcda287a | 717 | seg_off |= vpi << shift; |
71bf08b6 | 718 | } |
5524a27d | 719 | *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; |
83383b73 | 720 | break; |
dcda287a | 721 | default: |
5524a27d | 722 | *vpn = size = 0; |
3c726f8d | 723 | } |
b1022fbd AK |
724 | *psize = size; |
725 | *apsize = a_size; | |
3c726f8d BH |
726 | } |
727 | ||
f4c82d51 S |
728 | /* |
729 | * clear all mappings on kexec. All cpus are in real mode (or they will | |
730 | * be when they isi), and we are the only one left. We rely on our kernel | |
731 | * mapping being 0xC0's and the hardware ignoring those two real bits. | |
732 | * | |
fdf880a6 CB |
733 | * This must be called with interrupts disabled. |
734 | * | |
735 | * Taking the native_tlbie_lock is unsafe here due to the possibility of | |
736 | * lockdep being on. On pre POWER5 hardware, not taking the lock could | |
737 | * cause deadlock. POWER5 and newer not taking the lock is fine. This only | |
738 | * gets called during boot before secondary CPUs have come up and during | |
739 | * crashdump and all bets are off anyway. | |
740 | * | |
f4c82d51 | 741 | * TODO: add batching support when enabled. remember, no dynamic memory here, |
027dfac6 | 742 | * although there is the control page available... |
f4c82d51 S |
743 | */ |
744 | static void native_hpte_clear(void) | |
745 | { | |
5524a27d | 746 | unsigned long vpn = 0; |
fdf880a6 | 747 | unsigned long slot, slots; |
8e561e7e | 748 | struct hash_pte *hptep = htab_address; |
5524a27d | 749 | unsigned long hpte_v; |
f4c82d51 | 750 | unsigned long pteg_count; |
b1022fbd | 751 | int psize, apsize, ssize; |
f4c82d51 S |
752 | |
753 | pteg_count = htab_hash_mask + 1; | |
754 | ||
f4c82d51 S |
755 | slots = pteg_count * HPTES_PER_GROUP; |
756 | ||
757 | for (slot = 0; slot < slots; slot++, hptep++) { | |
758 | /* | |
759 | * we could lock the pte here, but we are the only cpu | |
760 | * running, right? and for crash dump, we probably | |
761 | * don't want to wait for a maybe bad cpu. | |
762 | */ | |
12f04f2b | 763 | hpte_v = be64_to_cpu(hptep->v); |
f4c82d51 | 764 | |
47f78a49 | 765 | /* |
fdf880a6 CB |
766 | * Call __tlbie() here rather than tlbie() since we can't take the |
767 | * native_tlbie_lock. | |
47f78a49 | 768 | */ |
96e28449 | 769 | if (hpte_v & HPTE_V_VALID) { |
b1022fbd | 770 | hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn); |
96e28449 | 771 | hptep->v = 0; |
a3961f82 | 772 | ___tlbie(vpn, psize, apsize, ssize); |
f4c82d51 S |
773 | } |
774 | } | |
775 | ||
47f78a49 | 776 | asm volatile("eieio; tlbsync; ptesync":::"memory"); |
f4c82d51 S |
777 | } |
778 | ||
3c726f8d BH |
779 | /* |
780 | * Batched hash table flush, we batch the tlbie's to avoid taking/releasing | |
781 | * the lock all the time | |
782 | */ | |
61b1a942 | 783 | static void native_flush_hash_range(unsigned long number, int local) |
1da177e4 | 784 | { |
a5d4b589 | 785 | unsigned long vpn = 0; |
5524a27d | 786 | unsigned long hash, index, hidx, shift, slot; |
8e561e7e | 787 | struct hash_pte *hptep; |
96e28449 | 788 | unsigned long hpte_v; |
3c726f8d BH |
789 | unsigned long want_v; |
790 | unsigned long flags; | |
791 | real_pte_t pte; | |
69111bac | 792 | struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); |
3c726f8d | 793 | unsigned long psize = batch->psize; |
1189be65 | 794 | int ssize = batch->ssize; |
3c726f8d | 795 | int i; |
88b1bf72 FB |
796 | unsigned int use_local; |
797 | ||
798 | use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && | |
799 | mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use(); | |
1da177e4 LT |
800 | |
801 | local_irq_save(flags); | |
802 | ||
1da177e4 | 803 | for (i = 0; i < number; i++) { |
5524a27d | 804 | vpn = batch->vpn[i]; |
3c726f8d BH |
805 | pte = batch->pte[i]; |
806 | ||
5524a27d AK |
807 | pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { |
808 | hash = hpt_hash(vpn, shift, ssize); | |
3c726f8d BH |
809 | hidx = __rpte_to_hidx(pte, index); |
810 | if (hidx & _PTEIDX_SECONDARY) | |
811 | hash = ~hash; | |
812 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | |
813 | slot += hidx & _PTEIDX_GROUP_IX; | |
814 | hptep = htab_address + slot; | |
74f227b2 | 815 | want_v = hpte_encode_avpn(vpn, psize, ssize); |
27d8959d AK |
816 | hpte_v = hpte_get_old_v(hptep); |
817 | ||
818 | if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) | |
819 | continue; | |
820 | /* lock and try again */ | |
3c726f8d | 821 | native_lock_hpte(hptep); |
a833280b | 822 | hpte_v = hpte_get_old_v(hptep); |
27d8959d AK |
823 | |
824 | if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) | |
3c726f8d BH |
825 | native_unlock_hpte(hptep); |
826 | else | |
827 | hptep->v = 0; | |
27d8959d | 828 | |
3c726f8d | 829 | } pte_iterate_hashed_end(); |
1da177e4 LT |
830 | } |
831 | ||
88b1bf72 | 832 | if (use_local) { |
1da177e4 | 833 | asm volatile("ptesync":::"memory"); |
3c726f8d | 834 | for (i = 0; i < number; i++) { |
5524a27d | 835 | vpn = batch->vpn[i]; |
3c726f8d BH |
836 | pte = batch->pte[i]; |
837 | ||
5524a27d AK |
838 | pte_iterate_hashed_subpages(pte, psize, |
839 | vpn, index, shift) { | |
b1022fbd | 840 | __tlbiel(vpn, psize, psize, ssize); |
3c726f8d BH |
841 | } pte_iterate_hashed_end(); |
842 | } | |
1da177e4 LT |
843 | asm volatile("ptesync":::"memory"); |
844 | } else { | |
44ae3ab3 | 845 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
1da177e4 LT |
846 | |
847 | if (lock_tlbie) | |
6b9c9b8a | 848 | raw_spin_lock(&native_tlbie_lock); |
1da177e4 LT |
849 | |
850 | asm volatile("ptesync":::"memory"); | |
3c726f8d | 851 | for (i = 0; i < number; i++) { |
5524a27d | 852 | vpn = batch->vpn[i]; |
3c726f8d BH |
853 | pte = batch->pte[i]; |
854 | ||
5524a27d AK |
855 | pte_iterate_hashed_subpages(pte, psize, |
856 | vpn, index, shift) { | |
b1022fbd | 857 | __tlbie(vpn, psize, psize, ssize); |
3c726f8d BH |
858 | } pte_iterate_hashed_end(); |
859 | } | |
a5d4b589 AK |
860 | /* |
861 | * Just do one more with the last used values. | |
862 | */ | |
863 | fixup_tlbie(vpn, psize, psize, ssize); | |
1da177e4 LT |
864 | asm volatile("eieio; tlbsync; ptesync":::"memory"); |
865 | ||
866 | if (lock_tlbie) | |
6b9c9b8a | 867 | raw_spin_unlock(&native_tlbie_lock); |
1da177e4 LT |
868 | } |
869 | ||
870 | local_irq_restore(flags); | |
871 | } | |
872 | ||
7d0daae4 | 873 | void __init hpte_init_native(void) |
1da177e4 | 874 | { |
7025776e BH |
875 | mmu_hash_ops.hpte_invalidate = native_hpte_invalidate; |
876 | mmu_hash_ops.hpte_updatepp = native_hpte_updatepp; | |
877 | mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp; | |
1b644f57 | 878 | mmu_hash_ops.hpte_removebolted = native_hpte_removebolted; |
7025776e BH |
879 | mmu_hash_ops.hpte_insert = native_hpte_insert; |
880 | mmu_hash_ops.hpte_remove = native_hpte_remove; | |
881 | mmu_hash_ops.hpte_clear_all = native_hpte_clear; | |
882 | mmu_hash_ops.flush_hash_range = native_flush_hash_range; | |
883 | mmu_hash_ops.hugepage_invalidate = native_hugepage_invalidate; | |
1da177e4 | 884 | } |