Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * PowerPC64 SLB support. | |
3 | * | |
4 | * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM | |
5cdcd9d6 | 5 | * Based on earlier code written by: |
1da177e4 LT |
6 | * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com |
7 | * Copyright (c) 2001 Dave Engebretsen | |
8 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
9 | * | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | */ | |
16 | ||
48e7b769 | 17 | #include <asm/asm-prototypes.h> |
1da177e4 LT |
18 | #include <asm/pgtable.h> |
19 | #include <asm/mmu.h> | |
20 | #include <asm/mmu_context.h> | |
21 | #include <asm/paca.h> | |
08e6a343 | 22 | #include <asm/ppc-opcode.h> |
1da177e4 | 23 | #include <asm/cputable.h> |
3c726f8d | 24 | #include <asm/cacheflush.h> |
2f6093c8 MN |
25 | #include <asm/smp.h> |
26 | #include <linux/compiler.h> | |
f384796c | 27 | #include <linux/context_tracking.h> |
589ee628 IM |
28 | #include <linux/mm_types.h> |
29 | ||
aa39be09 | 30 | #include <asm/udbg.h> |
b68a70c4 | 31 | #include <asm/code-patching.h> |
3c726f8d | 32 | |
1d15010c AK |
33 | enum slb_index { |
34 | LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */ | |
85376e2a | 35 | KSTACK_INDEX = 1, /* Kernel stack map */ |
1d15010c | 36 | }; |
1da177e4 | 37 | |
48e7b769 | 38 | static long slb_allocate_user(struct mm_struct *mm, unsigned long ea); |
1da177e4 | 39 | |
3b575064 PM |
40 | #define slb_esid_mask(ssize) \ |
41 | (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) | |
42 | ||
1189be65 | 43 | static inline unsigned long mk_esid_data(unsigned long ea, int ssize, |
1d15010c | 44 | enum slb_index index) |
1da177e4 | 45 | { |
1d15010c | 46 | return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index; |
1da177e4 LT |
47 | } |
48 | ||
48e7b769 | 49 | static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize, |
5e46e29e NP |
50 | unsigned long flags) |
51 | { | |
48e7b769 | 52 | return (vsid << slb_vsid_shift(ssize)) | flags | |
54be0b9c | 53 | ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT); |
5e46e29e NP |
54 | } |
55 | ||
48e7b769 NP |
56 | static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, |
57 | unsigned long flags) | |
58 | { | |
59 | return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); | |
60 | } | |
61 | ||
0ae79068 | 62 | static void assert_slb_presence(bool present, unsigned long ea) |
e15a4fea NP |
63 | { |
64 | #ifdef CONFIG_DEBUG_VM | |
65 | unsigned long tmp; | |
66 | ||
67 | WARN_ON_ONCE(mfmsr() & MSR_EE); | |
68 | ||
9586d569 ME |
69 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) |
70 | return; | |
71 | ||
7104dccf NP |
72 | /* |
73 | * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware | |
74 | * ignores all other bits from 0-27, so just clear them all. | |
75 | */ | |
76 | ea &= ~((1UL << 28) - 1); | |
08e6a343 | 77 | asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); |
e15a4fea | 78 | |
0ae79068 | 79 | WARN_ON(present == (tmp == 0)); |
e15a4fea NP |
80 | #endif |
81 | } | |
82 | ||
1189be65 | 83 | static inline void slb_shadow_update(unsigned long ea, int ssize, |
67439b76 | 84 | unsigned long flags, |
1d15010c | 85 | enum slb_index index) |
1da177e4 | 86 | { |
26cd835e ME |
87 | struct slb_shadow *p = get_slb_shadow(); |
88 | ||
2f6093c8 MN |
89 | /* |
90 | * Clear the ESID first so the entry is not valid while we are | |
00efee7d MN |
91 | * updating it. No write barriers are needed here, provided |
92 | * we only update the current CPU's SLB shadow buffer. | |
2f6093c8 | 93 | */ |
926bc2f1 NP |
94 | WRITE_ONCE(p->save_area[index].esid, 0); |
95 | WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); | |
96 | WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); | |
2f6093c8 MN |
97 | } |
98 | ||
1d15010c | 99 | static inline void slb_shadow_clear(enum slb_index index) |
2f6093c8 | 100 | { |
0f52b3a0 | 101 | WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index)); |
1da177e4 LT |
102 | } |
103 | ||
1189be65 PM |
104 | static inline void create_shadowed_slbe(unsigned long ea, int ssize, |
105 | unsigned long flags, | |
1d15010c | 106 | enum slb_index index) |
175587cc PM |
107 | { |
108 | /* | |
109 | * Updating the shadow buffer before writing the SLB ensures | |
110 | * we don't get a stale entry here if we get preempted by PHYP | |
111 | * between these two statements. | |
112 | */ | |
1d15010c | 113 | slb_shadow_update(ea, ssize, flags, index); |
175587cc | 114 | |
0ae79068 | 115 | assert_slb_presence(false, ea); |
175587cc | 116 | asm volatile("slbmte %0,%1" : |
1189be65 | 117 | : "r" (mk_vsid_data(ea, ssize, flags)), |
1d15010c | 118 | "r" (mk_esid_data(ea, ssize, index)) |
175587cc PM |
119 | : "memory" ); |
120 | } | |
121 | ||
e7e81847 NP |
122 | /* |
123 | * Insert bolted entries into SLB (which may not be empty, so don't clear | |
124 | * slb_cache_ptr). | |
125 | */ | |
126 | void __slb_restore_bolted_realmode(void) | |
127 | { | |
128 | struct slb_shadow *p = get_slb_shadow(); | |
129 | enum slb_index index; | |
130 | ||
131 | /* No isync needed because realmode. */ | |
132 | for (index = 0; index < SLB_NUM_BOLTED; index++) { | |
133 | asm volatile("slbmte %0,%1" : | |
134 | : "r" (be64_to_cpu(p->save_area[index].vsid)), | |
135 | "r" (be64_to_cpu(p->save_area[index].esid))); | |
136 | } | |
e15a4fea | 137 | |
0ae79068 | 138 | assert_slb_presence(true, local_paca->kstack); |
e7e81847 NP |
139 | } |
140 | ||
141 | /* | |
142 | * Insert the bolted entries into an empty SLB. | |
e7e81847 NP |
143 | */ |
144 | void slb_restore_bolted_realmode(void) | |
145 | { | |
146 | __slb_restore_bolted_realmode(); | |
147 | get_paca()->slb_cache_ptr = 0; | |
126b11b2 NP |
148 | |
149 | get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; | |
150 | get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; | |
e7e81847 NP |
151 | } |
152 | ||
153 | /* | |
154 | * This flushes all SLB entries including 0, so it must be realmode. | |
155 | */ | |
156 | void slb_flush_all_realmode(void) | |
157 | { | |
e7e81847 NP |
158 | asm volatile("slbmte %0,%0; slbia" : : "r" (0)); |
159 | } | |
160 | ||
94ee4272 NP |
161 | /* |
162 | * This flushes non-bolted entries, it can be run in virtual mode. Must | |
163 | * be called with interrupts disabled. | |
164 | */ | |
165 | void slb_flush_and_restore_bolted(void) | |
1da177e4 | 166 | { |
94ee4272 NP |
167 | struct slb_shadow *p = get_slb_shadow(); |
168 | ||
169 | BUILD_BUG_ON(SLB_NUM_BOLTED != 2); | |
1da177e4 | 170 | |
5141c182 NP |
171 | WARN_ON(!irqs_disabled()); |
172 | ||
173 | /* | |
174 | * We can't take a PMU exception in the following code, so hard | |
175 | * disable interrupts. | |
176 | */ | |
177 | hard_irq_disable(); | |
178 | ||
1da177e4 LT |
179 | asm volatile("isync\n" |
180 | "slbia\n" | |
94ee4272 NP |
181 | "slbmte %0, %1\n" |
182 | "isync\n" | |
183 | :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), | |
184 | "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) | |
1da177e4 | 185 | : "memory"); |
0ae79068 | 186 | assert_slb_presence(true, get_paca()->kstack); |
1da177e4 | 187 | |
9c1e1052 | 188 | get_paca()->slb_cache_ptr = 0; |
126b11b2 NP |
189 | |
190 | get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; | |
191 | get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; | |
9c1e1052 PM |
192 | } |
193 | ||
c6d15258 MS |
194 | void slb_save_contents(struct slb_entry *slb_ptr) |
195 | { | |
196 | int i; | |
197 | unsigned long e, v; | |
198 | ||
199 | /* Save slb_cache_ptr value. */ | |
200 | get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr; | |
201 | ||
202 | if (!slb_ptr) | |
203 | return; | |
204 | ||
205 | for (i = 0; i < mmu_slb_size; i++) { | |
206 | asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i)); | |
207 | asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i)); | |
208 | slb_ptr->esid = e; | |
209 | slb_ptr->vsid = v; | |
210 | slb_ptr++; | |
211 | } | |
212 | } | |
213 | ||
214 | void slb_dump_contents(struct slb_entry *slb_ptr) | |
215 | { | |
216 | int i, n; | |
217 | unsigned long e, v; | |
218 | unsigned long llp; | |
219 | ||
220 | if (!slb_ptr) | |
221 | return; | |
222 | ||
223 | pr_err("SLB contents of cpu 0x%x\n", smp_processor_id()); | |
126b11b2 | 224 | pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr); |
c6d15258 MS |
225 | |
226 | for (i = 0; i < mmu_slb_size; i++) { | |
227 | e = slb_ptr->esid; | |
228 | v = slb_ptr->vsid; | |
229 | slb_ptr++; | |
230 | ||
231 | if (!e && !v) | |
232 | continue; | |
233 | ||
234 | pr_err("%02d %016lx %016lx\n", i, e, v); | |
235 | ||
236 | if (!(e & SLB_ESID_V)) { | |
237 | pr_err("\n"); | |
238 | continue; | |
239 | } | |
240 | llp = v & SLB_VSID_LLP; | |
241 | if (v & SLB_VSID_B_1T) { | |
242 | pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n", | |
243 | GET_ESID_1T(e), | |
244 | (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp); | |
245 | } else { | |
246 | pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n", | |
247 | GET_ESID(e), | |
248 | (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp); | |
249 | } | |
250 | } | |
251 | pr_err("----------------------------------\n"); | |
252 | ||
253 | /* Dump slb cache entires as well. */ | |
254 | pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr); | |
255 | pr_err("Valid SLB cache entries:\n"); | |
256 | n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES); | |
257 | for (i = 0; i < n; i++) | |
258 | pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); | |
259 | pr_err("Rest of SLB cache entries:\n"); | |
260 | for (i = n; i < SLB_CACHE_ENTRIES; i++) | |
261 | pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]); | |
262 | } | |
263 | ||
67439b76 MN |
264 | void slb_vmalloc_update(void) |
265 | { | |
94ee4272 NP |
266 | /* |
267 | * vmalloc is not bolted, so just have to flush non-bolted. | |
268 | */ | |
269 | slb_flush_and_restore_bolted(); | |
67439b76 MN |
270 | } |
271 | ||
5434ae74 | 272 | static bool preload_hit(struct thread_info *ti, unsigned long esid) |
89ca4e12 | 273 | { |
5434ae74 | 274 | unsigned char i; |
89ca4e12 | 275 | |
5434ae74 NP |
276 | for (i = 0; i < ti->slb_preload_nr; i++) { |
277 | unsigned char idx; | |
278 | ||
279 | idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; | |
280 | if (esid == ti->slb_preload_esid[idx]) | |
281 | return true; | |
282 | } | |
283 | return false; | |
284 | } | |
285 | ||
286 | static bool preload_add(struct thread_info *ti, unsigned long ea) | |
287 | { | |
288 | unsigned char idx; | |
289 | unsigned long esid; | |
290 | ||
291 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { | |
292 | /* EAs are stored >> 28 so 256MB segments don't need clearing */ | |
293 | if (ea & ESID_MASK_1T) | |
294 | ea &= ESID_MASK_1T; | |
295 | } | |
89ca4e12 | 296 | |
5434ae74 | 297 | esid = ea >> SID_SHIFT; |
89ca4e12 | 298 | |
5434ae74 NP |
299 | if (preload_hit(ti, esid)) |
300 | return false; | |
89ca4e12 | 301 | |
5434ae74 NP |
302 | idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR; |
303 | ti->slb_preload_esid[idx] = esid; | |
304 | if (ti->slb_preload_nr == SLB_PRELOAD_NR) | |
305 | ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; | |
306 | else | |
307 | ti->slb_preload_nr++; | |
89ca4e12 | 308 | |
5434ae74 | 309 | return true; |
89ca4e12 NP |
310 | } |
311 | ||
5434ae74 NP |
312 | static void preload_age(struct thread_info *ti) |
313 | { | |
314 | if (!ti->slb_preload_nr) | |
315 | return; | |
316 | ti->slb_preload_nr--; | |
317 | ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR; | |
318 | } | |
319 | ||
320 | void slb_setup_new_exec(void) | |
321 | { | |
322 | struct thread_info *ti = current_thread_info(); | |
323 | struct mm_struct *mm = current->mm; | |
324 | unsigned long exec = 0x10000000; | |
325 | ||
326 | WARN_ON(irqs_disabled()); | |
327 | ||
328 | /* | |
329 | * preload cache can only be used to determine whether a SLB | |
330 | * entry exists if it does not start to overflow. | |
331 | */ | |
332 | if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR) | |
333 | return; | |
334 | ||
335 | hard_irq_disable(); | |
336 | ||
337 | /* | |
338 | * We have no good place to clear the slb preload cache on exec, | |
339 | * flush_thread is about the earliest arch hook but that happens | |
340 | * after we switch to the mm and have aleady preloaded the SLBEs. | |
341 | * | |
342 | * For the most part that's probably okay to use entries from the | |
343 | * previous exec, they will age out if unused. It may turn out to | |
344 | * be an advantage to clear the cache before switching to it, | |
345 | * however. | |
346 | */ | |
347 | ||
348 | /* | |
349 | * preload some userspace segments into the SLB. | |
350 | * Almost all 32 and 64bit PowerPC executables are linked at | |
351 | * 0x10000000 so it makes sense to preload this segment. | |
352 | */ | |
353 | if (!is_kernel_addr(exec)) { | |
354 | if (preload_add(ti, exec)) | |
355 | slb_allocate_user(mm, exec); | |
356 | } | |
357 | ||
358 | /* Libraries and mmaps. */ | |
359 | if (!is_kernel_addr(mm->mmap_base)) { | |
360 | if (preload_add(ti, mm->mmap_base)) | |
361 | slb_allocate_user(mm, mm->mmap_base); | |
362 | } | |
363 | ||
364 | /* see switch_slb */ | |
365 | asm volatile("isync" : : : "memory"); | |
366 | ||
367 | local_irq_enable(); | |
368 | } | |
369 | ||
370 | void preload_new_slb_context(unsigned long start, unsigned long sp) | |
371 | { | |
372 | struct thread_info *ti = current_thread_info(); | |
373 | struct mm_struct *mm = current->mm; | |
374 | unsigned long heap = mm->start_brk; | |
375 | ||
376 | WARN_ON(irqs_disabled()); | |
377 | ||
378 | /* see above */ | |
379 | if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR) | |
380 | return; | |
381 | ||
382 | hard_irq_disable(); | |
383 | ||
384 | /* Userspace entry address. */ | |
385 | if (!is_kernel_addr(start)) { | |
386 | if (preload_add(ti, start)) | |
387 | slb_allocate_user(mm, start); | |
388 | } | |
389 | ||
390 | /* Top of stack, grows down. */ | |
391 | if (!is_kernel_addr(sp)) { | |
392 | if (preload_add(ti, sp)) | |
393 | slb_allocate_user(mm, sp); | |
394 | } | |
395 | ||
396 | /* Bottom of heap, grows up. */ | |
397 | if (heap && !is_kernel_addr(heap)) { | |
398 | if (preload_add(ti, heap)) | |
399 | slb_allocate_user(mm, heap); | |
400 | } | |
401 | ||
402 | /* see switch_slb */ | |
403 | asm volatile("isync" : : : "memory"); | |
404 | ||
405 | local_irq_enable(); | |
406 | } | |
407 | ||
408 | ||
1da177e4 LT |
409 | /* Flush all user entries from the segment table of the current processor. */ |
410 | void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |
411 | { | |
5434ae74 NP |
412 | struct thread_info *ti = task_thread_info(tsk); |
413 | unsigned char i; | |
1da177e4 | 414 | |
9c1e1052 PM |
415 | /* |
416 | * We need interrupts hard-disabled here, not just soft-disabled, | |
417 | * so that a PMU interrupt can't occur, which might try to access | |
418 | * user memory (to get a stack trace) and possible cause an SLB miss | |
419 | * which would update the slb_cache/slb_cache_ptr fields in the PACA. | |
420 | */ | |
421 | hard_irq_disable(); | |
5434ae74 | 422 | asm volatile("isync" : : : "memory"); |
82d8f4c2 NP |
423 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
424 | /* | |
425 | * SLBIA IH=3 invalidates all Class=1 SLBEs and their | |
426 | * associated lookaside structures, which matches what | |
427 | * switch_slb wants. So ARCH_300 does not use the slb | |
428 | * cache. | |
429 | */ | |
5434ae74 | 430 | asm volatile(PPC_SLBIA(3)); |
82d8f4c2 NP |
431 | } else { |
432 | unsigned long offset = get_paca()->slb_cache_ptr; | |
433 | ||
434 | if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && | |
435 | offset <= SLB_CACHE_ENTRIES) { | |
436 | unsigned long slbie_data = 0; | |
82d8f4c2 | 437 | |
82d8f4c2 | 438 | for (i = 0; i < offset; i++) { |
e15a4fea NP |
439 | unsigned long ea; |
440 | ||
441 | ea = (unsigned long) | |
82d8f4c2 | 442 | get_paca()->slb_cache[i] << SID_SHIFT; |
e15a4fea | 443 | /* |
0ae79068 ME |
444 | * Could assert_slb_presence(true) here, but |
445 | * hypervisor or machine check could have come | |
446 | * in and removed the entry at this point. | |
e15a4fea NP |
447 | */ |
448 | ||
449 | slbie_data = ea; | |
82d8f4c2 NP |
450 | slbie_data |= user_segment_size(slbie_data) |
451 | << SLBIE_SSIZE_SHIFT; | |
452 | slbie_data |= SLBIE_C; /* user slbs have C=1 */ | |
453 | asm volatile("slbie %0" : : "r" (slbie_data)); | |
454 | } | |
455 | ||
456 | /* Workaround POWER5 < DD2.1 issue */ | |
457 | if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1) | |
458 | asm volatile("slbie %0" : : "r" (slbie_data)); | |
459 | ||
82d8f4c2 NP |
460 | } else { |
461 | struct slb_shadow *p = get_slb_shadow(); | |
462 | unsigned long ksp_esid_data = | |
463 | be64_to_cpu(p->save_area[KSTACK_INDEX].esid); | |
464 | unsigned long ksp_vsid_data = | |
465 | be64_to_cpu(p->save_area[KSTACK_INDEX].vsid); | |
466 | ||
5434ae74 | 467 | asm volatile(PPC_SLBIA(1) "\n" |
82d8f4c2 NP |
468 | "slbmte %0,%1\n" |
469 | "isync" | |
470 | :: "r"(ksp_vsid_data), | |
471 | "r"(ksp_esid_data)); | |
126b11b2 NP |
472 | |
473 | get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; | |
1da177e4 | 474 | } |
1da177e4 | 475 | |
82d8f4c2 | 476 | get_paca()->slb_cache_ptr = 0; |
505ea82e | 477 | } |
126b11b2 | 478 | get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; |
54be0b9c ME |
479 | |
480 | copy_mm_to_paca(mm); | |
1da177e4 | 481 | |
1da177e4 | 482 | /* |
5434ae74 NP |
483 | * We gradually age out SLBs after a number of context switches to |
484 | * reduce reload overhead of unused entries (like we do with FP/VEC | |
485 | * reload). Each time we wrap 256 switches, take an entry out of the | |
486 | * SLB preload cache. | |
1da177e4 | 487 | */ |
5434ae74 NP |
488 | tsk->thread.load_slb++; |
489 | if (!tsk->thread.load_slb) { | |
490 | unsigned long pc = KSTK_EIP(tsk); | |
1da177e4 | 491 | |
5434ae74 NP |
492 | preload_age(ti); |
493 | preload_add(ti, pc); | |
494 | } | |
495 | ||
496 | for (i = 0; i < ti->slb_preload_nr; i++) { | |
497 | unsigned char idx; | |
498 | unsigned long ea; | |
1da177e4 | 499 | |
5434ae74 NP |
500 | idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR; |
501 | ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT; | |
1da177e4 | 502 | |
5434ae74 NP |
503 | slb_allocate_user(mm, ea); |
504 | } | |
1da177e4 | 505 | |
5434ae74 NP |
506 | /* |
507 | * Synchronize slbmte preloads with possible subsequent user memory | |
508 | * address accesses by the kernel (user mode won't happen until | |
509 | * rfid, which is safe). | |
510 | */ | |
511 | asm volatile("isync" : : : "memory"); | |
46db2f86 BK |
512 | } |
513 | ||
54be0b9c ME |
514 | void slb_set_size(u16 size) |
515 | { | |
54be0b9c | 516 | mmu_slb_size = size; |
8fed04d0 NP |
517 | } |
518 | ||
1da177e4 LT |
519 | void slb_initialize(void) |
520 | { | |
bf72aeba | 521 | unsigned long linear_llp, vmalloc_llp, io_llp; |
85376e2a | 522 | unsigned long lflags; |
3c726f8d | 523 | static int slb_encoding_inited; |
cec08e7a | 524 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
cec08e7a BH |
525 | unsigned long vmemmap_llp; |
526 | #endif | |
3c726f8d BH |
527 | |
528 | /* Prepare our SLB miss handler based on our page size */ | |
529 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | |
bf72aeba PM |
530 | io_llp = mmu_psize_defs[mmu_io_psize].sllp; |
531 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | |
532 | get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; | |
cec08e7a BH |
533 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
534 | vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp; | |
535 | #endif | |
3c726f8d BH |
536 | if (!slb_encoding_inited) { |
537 | slb_encoding_inited = 1; | |
651e2dd2 ME |
538 | pr_devel("SLB: linear LLP = %04lx\n", linear_llp); |
539 | pr_devel("SLB: io LLP = %04lx\n", io_llp); | |
cec08e7a | 540 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
651e2dd2 | 541 | pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp); |
cec08e7a | 542 | #endif |
3c726f8d BH |
543 | } |
544 | ||
09b4438d | 545 | get_paca()->stab_rr = SLB_NUM_BOLTED - 1; |
126b11b2 NP |
546 | get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1; |
547 | get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap; | |
56291e19 | 548 | |
3c726f8d | 549 | lflags = SLB_VSID_KERNEL | linear_llp; |
1da177e4 | 550 | |
2be682af | 551 | /* Invalidate the entire SLB (even entry 0) & all the ERATS */ |
175587cc PM |
552 | asm volatile("isync":::"memory"); |
553 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); | |
554 | asm volatile("isync; slbia; isync":::"memory"); | |
1d15010c | 555 | create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX); |
175587cc | 556 | |
3b575064 PM |
557 | /* For the boot cpu, we're running on the stack in init_thread_union, |
558 | * which is in the first segment of the linear mapping, and also | |
559 | * get_paca()->kstack hasn't been initialized yet. | |
560 | * For secondary cpus, we need to bolt the kernel stack entry now. | |
561 | */ | |
1d15010c | 562 | slb_shadow_clear(KSTACK_INDEX); |
3b575064 PM |
563 | if (raw_smp_processor_id() != boot_cpuid && |
564 | (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) | |
565 | create_shadowed_slbe(get_paca()->kstack, | |
1d15010c | 566 | mmu_kernel_ssize, lflags, KSTACK_INDEX); |
dfbe0d3b | 567 | |
175587cc | 568 | asm volatile("isync":::"memory"); |
1da177e4 | 569 | } |
f384796c | 570 | |
48e7b769 | 571 | static void slb_cache_update(unsigned long esid_data) |
f384796c | 572 | { |
f384796c AK |
573 | int slb_cache_index; |
574 | ||
82d8f4c2 NP |
575 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
576 | return; /* ISAv3.0B and later does not use slb_cache */ | |
577 | ||
54be0b9c | 578 | /* |
48e7b769 | 579 | * Now update slb cache entries |
54be0b9c | 580 | */ |
48e7b769 NP |
581 | slb_cache_index = local_paca->slb_cache_ptr; |
582 | if (slb_cache_index < SLB_CACHE_ENTRIES) { | |
583 | /* | |
584 | * We have space in slb cache for optimized switch_slb(). | |
585 | * Top 36 bits from esid_data as per ISA | |
586 | */ | |
587 | local_paca->slb_cache[slb_cache_index++] = esid_data >> 28; | |
588 | local_paca->slb_cache_ptr++; | |
589 | } else { | |
590 | /* | |
591 | * Our cache is full and the current cache content strictly | |
592 | * doesn't indicate the active SLB conents. Bump the ptr | |
593 | * so that switch_slb() will ignore the cache. | |
594 | */ | |
595 | local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1; | |
596 | } | |
597 | } | |
54be0b9c | 598 | |
126b11b2 | 599 | static enum slb_index alloc_slb_index(bool kernel) |
48e7b769 NP |
600 | { |
601 | enum slb_index index; | |
54be0b9c | 602 | |
126b11b2 NP |
603 | /* |
604 | * The allocation bitmaps can become out of synch with the SLB | |
605 | * when the _switch code does slbie when bolting a new stack | |
606 | * segment and it must not be anywhere else in the SLB. This leaves | |
607 | * a kernel allocated entry that is unused in the SLB. With very | |
608 | * large systems or small segment sizes, the bitmaps could slowly | |
609 | * fill with these entries. They will eventually be cleared out | |
610 | * by the round robin allocator in that case, so it's probably not | |
611 | * worth accounting for. | |
612 | */ | |
613 | ||
614 | /* | |
615 | * SLBs beyond 32 entries are allocated with stab_rr only | |
616 | * POWER7/8/9 have 32 SLB entries, this could be expanded if a | |
617 | * future CPU has more. | |
618 | */ | |
619 | if (local_paca->slb_used_bitmap != U32_MAX) { | |
620 | index = ffz(local_paca->slb_used_bitmap); | |
621 | local_paca->slb_used_bitmap |= 1U << index; | |
622 | if (kernel) | |
623 | local_paca->slb_kern_bitmap |= 1U << index; | |
624 | } else { | |
625 | /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */ | |
626 | index = local_paca->stab_rr; | |
627 | if (index < (mmu_slb_size - 1)) | |
628 | index++; | |
629 | else | |
630 | index = SLB_NUM_BOLTED; | |
631 | local_paca->stab_rr = index; | |
632 | if (index < 32) { | |
633 | if (kernel) | |
634 | local_paca->slb_kern_bitmap |= 1U << index; | |
635 | else | |
636 | local_paca->slb_kern_bitmap &= ~(1U << index); | |
637 | } | |
638 | } | |
639 | BUG_ON(index < SLB_NUM_BOLTED); | |
54be0b9c | 640 | |
48e7b769 NP |
641 | return index; |
642 | } | |
643 | ||
644 | static long slb_insert_entry(unsigned long ea, unsigned long context, | |
645 | unsigned long flags, int ssize, bool kernel) | |
646 | { | |
647 | unsigned long vsid; | |
648 | unsigned long vsid_data, esid_data; | |
649 | enum slb_index index; | |
650 | ||
651 | vsid = get_vsid(context, ea, ssize); | |
652 | if (!vsid) | |
653 | return -EFAULT; | |
654 | ||
655 | /* | |
656 | * There must not be a kernel SLB fault in alloc_slb_index or before | |
657 | * slbmte here or the allocation bitmaps could get out of whack with | |
658 | * the SLB. | |
659 | * | |
660 | * User SLB faults or preloads take this path which might get inlined | |
661 | * into the caller, so add compiler barriers here to ensure unsafe | |
662 | * memory accesses do not come between. | |
663 | */ | |
664 | barrier(); | |
665 | ||
126b11b2 | 666 | index = alloc_slb_index(kernel); |
48e7b769 NP |
667 | |
668 | vsid_data = __mk_vsid_data(vsid, ssize, flags); | |
54be0b9c ME |
669 | esid_data = mk_esid_data(ea, ssize, index); |
670 | ||
671 | /* | |
672 | * No need for an isync before or after this slbmte. The exception | |
673 | * we enter with and the rfid we exit with are context synchronizing. | |
48e7b769 NP |
674 | * User preloads should add isync afterwards in case the kernel |
675 | * accesses user memory before it returns to userspace with rfid. | |
54be0b9c | 676 | */ |
0ae79068 | 677 | assert_slb_presence(false, ea); |
48e7b769 | 678 | asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); |
54be0b9c | 679 | |
48e7b769 NP |
680 | barrier(); |
681 | ||
682 | if (!kernel) | |
683 | slb_cache_update(esid_data); | |
684 | ||
685 | return 0; | |
686 | } | |
687 | ||
688 | static long slb_allocate_kernel(unsigned long ea, unsigned long id) | |
689 | { | |
690 | unsigned long context; | |
691 | unsigned long flags; | |
692 | int ssize; | |
693 | ||
48e7b769 | 694 | if (id == KERNEL_REGION_ID) { |
4ffe713b AK |
695 | |
696 | /* We only support upto MAX_PHYSMEM_BITS */ | |
697 | if ((ea & ~REGION_MASK) > (1UL << MAX_PHYSMEM_BITS)) | |
698 | return -EFAULT; | |
699 | ||
48e7b769 | 700 | flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp; |
4ffe713b | 701 | |
48e7b769 NP |
702 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
703 | } else if (id == VMEMMAP_REGION_ID) { | |
4ffe713b AK |
704 | |
705 | if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT)) | |
706 | return -EFAULT; | |
707 | ||
48e7b769 NP |
708 | flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp; |
709 | #endif | |
710 | } else if (id == VMALLOC_REGION_ID) { | |
4ffe713b AK |
711 | |
712 | if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT)) | |
713 | return -EFAULT; | |
714 | ||
48e7b769 | 715 | if (ea < H_VMALLOC_END) |
c8b00bb7 | 716 | flags = local_paca->vmalloc_sllp; |
48e7b769 NP |
717 | else |
718 | flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; | |
f384796c | 719 | } else { |
48e7b769 | 720 | return -EFAULT; |
f384796c | 721 | } |
48e7b769 NP |
722 | |
723 | ssize = MMU_SEGSIZE_1T; | |
724 | if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) | |
725 | ssize = MMU_SEGSIZE_256M; | |
726 | ||
4ffe713b | 727 | context = get_kernel_context(ea); |
48e7b769 | 728 | return slb_insert_entry(ea, context, flags, ssize, true); |
f384796c AK |
729 | } |
730 | ||
48e7b769 | 731 | static long slb_allocate_user(struct mm_struct *mm, unsigned long ea) |
5e46e29e | 732 | { |
48e7b769 NP |
733 | unsigned long context; |
734 | unsigned long flags; | |
54be0b9c | 735 | int bpsize; |
48e7b769 | 736 | int ssize; |
655deecf NP |
737 | |
738 | /* | |
48e7b769 NP |
739 | * consider this as bad access if we take a SLB miss |
740 | * on an address above addr limit. | |
655deecf | 741 | */ |
48e7b769 NP |
742 | if (ea >= mm->context.slb_addr_limit) |
743 | return -EFAULT; | |
744 | ||
c9f80734 | 745 | context = get_user_context(&mm->context, ea); |
48e7b769 NP |
746 | if (!context) |
747 | return -EFAULT; | |
748 | ||
749 | if (unlikely(ea >= H_PGTABLE_RANGE)) { | |
750 | WARN_ON(1); | |
751 | return -EFAULT; | |
752 | } | |
753 | ||
754 | ssize = user_segment_size(ea); | |
755 | ||
54be0b9c | 756 | bpsize = get_slice_psize(mm, ea); |
48e7b769 NP |
757 | flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp; |
758 | ||
759 | return slb_insert_entry(ea, context, flags, ssize, false); | |
5e46e29e NP |
760 | } |
761 | ||
48e7b769 | 762 | long do_slb_fault(struct pt_regs *regs, unsigned long ea) |
f384796c | 763 | { |
48e7b769 | 764 | unsigned long id = REGION_ID(ea); |
5e46e29e | 765 | |
48e7b769 NP |
766 | /* IRQs are not reconciled here, so can't check irqs_disabled */ |
767 | VM_WARN_ON(mfmsr() & MSR_EE); | |
f384796c | 768 | |
48e7b769 NP |
769 | if (unlikely(!(regs->msr & MSR_RI))) |
770 | return -EINVAL; | |
f384796c AK |
771 | |
772 | /* | |
48e7b769 NP |
773 | * SLB kernel faults must be very careful not to touch anything |
774 | * that is not bolted. E.g., PACA and global variables are okay, | |
775 | * mm->context stuff is not. | |
776 | * | |
777 | * SLB user faults can access all of kernel memory, but must be | |
778 | * careful not to touch things like IRQ state because it is not | |
779 | * "reconciled" here. The difficulty is that we must use | |
780 | * fast_exception_return to return from kernel SLB faults without | |
781 | * looking at possible non-bolted memory. We could test user vs | |
782 | * kernel faults in the interrupt handler asm and do a full fault, | |
783 | * reconcile, ret_from_except for user faults which would make them | |
784 | * first class kernel code. But for performance it's probably nicer | |
785 | * if they go via fast_exception_return too. | |
f384796c | 786 | */ |
48e7b769 | 787 | if (id >= KERNEL_REGION_ID) { |
e15a4fea NP |
788 | long err; |
789 | #ifdef CONFIG_DEBUG_VM | |
790 | /* Catch recursive kernel SLB faults. */ | |
791 | BUG_ON(local_paca->in_kernel_slb_handler); | |
792 | local_paca->in_kernel_slb_handler = 1; | |
793 | #endif | |
794 | err = slb_allocate_kernel(ea, id); | |
795 | #ifdef CONFIG_DEBUG_VM | |
796 | local_paca->in_kernel_slb_handler = 0; | |
797 | #endif | |
798 | return err; | |
48e7b769 NP |
799 | } else { |
800 | struct mm_struct *mm = current->mm; | |
5434ae74 | 801 | long err; |
f384796c | 802 | |
48e7b769 NP |
803 | if (unlikely(!mm)) |
804 | return -EFAULT; | |
5e46e29e | 805 | |
5434ae74 NP |
806 | err = slb_allocate_user(mm, ea); |
807 | if (!err) | |
808 | preload_add(current_thread_info(), ea); | |
809 | ||
810 | return err; | |
48e7b769 NP |
811 | } |
812 | } | |
5e46e29e | 813 | |
48e7b769 NP |
814 | void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err) |
815 | { | |
816 | if (err == -EFAULT) { | |
817 | if (user_mode(regs)) | |
818 | _exception(SIGSEGV, regs, SEGV_BNDERR, ea); | |
819 | else | |
820 | bad_page_fault(regs, ea, SIGSEGV); | |
821 | } else if (err == -EINVAL) { | |
822 | unrecoverable_exception(regs); | |
823 | } else { | |
824 | BUG(); | |
825 | } | |
f384796c | 826 | } |