2 * PowerPC64 SLB support.
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code written by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <asm/asm-prototypes.h>
18 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
22 #include <asm/ppc-opcode.h>
23 #include <asm/cputable.h>
24 #include <asm/cacheflush.h>
26 #include <linux/compiler.h>
27 #include <linux/context_tracking.h>
28 #include <linux/mm_types.h>
31 #include <asm/code-patching.h>
34 LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
35 KSTACK_INDEX = 1, /* Kernel stack map */
38 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
40 #define slb_esid_mask(ssize) \
41 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
43 static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
46 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
49 static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
52 return (vsid << slb_vsid_shift(ssize)) | flags |
53 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
56 static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
59 return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
62 static void assert_slb_presence(bool present, unsigned long ea)
64 #ifdef CONFIG_DEBUG_VM
67 WARN_ON_ONCE(mfmsr() & MSR_EE);
69 if (!cpu_has_feature(CPU_FTR_ARCH_206))
73 * slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware
74 * ignores all other bits from 0-27, so just clear them all.
76 ea &= ~((1UL << 28) - 1);
77 asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
79 WARN_ON(present == (tmp == 0));
83 static inline void slb_shadow_update(unsigned long ea, int ssize,
87 struct slb_shadow *p = get_slb_shadow();
90 * Clear the ESID first so the entry is not valid while we are
91 * updating it. No write barriers are needed here, provided
92 * we only update the current CPU's SLB shadow buffer.
94 WRITE_ONCE(p->save_area[index].esid, 0);
95 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
96 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
99 static inline void slb_shadow_clear(enum slb_index index)
101 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index));
104 static inline void create_shadowed_slbe(unsigned long ea, int ssize,
106 enum slb_index index)
109 * Updating the shadow buffer before writing the SLB ensures
110 * we don't get a stale entry here if we get preempted by PHYP
111 * between these two statements.
113 slb_shadow_update(ea, ssize, flags, index);
115 assert_slb_presence(false, ea);
116 asm volatile("slbmte %0,%1" :
117 : "r" (mk_vsid_data(ea, ssize, flags)),
118 "r" (mk_esid_data(ea, ssize, index))
123 * Insert bolted entries into SLB (which may not be empty, so don't clear
126 void __slb_restore_bolted_realmode(void)
128 struct slb_shadow *p = get_slb_shadow();
129 enum slb_index index;
131 /* No isync needed because realmode. */
132 for (index = 0; index < SLB_NUM_BOLTED; index++) {
133 asm volatile("slbmte %0,%1" :
134 : "r" (be64_to_cpu(p->save_area[index].vsid)),
135 "r" (be64_to_cpu(p->save_area[index].esid)));
138 assert_slb_presence(true, local_paca->kstack);
142 * Insert the bolted entries into an empty SLB.
144 void slb_restore_bolted_realmode(void)
146 __slb_restore_bolted_realmode();
147 get_paca()->slb_cache_ptr = 0;
149 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
150 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
154 * This flushes all SLB entries including 0, so it must be realmode.
156 void slb_flush_all_realmode(void)
158 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
162 * This flushes non-bolted entries, it can be run in virtual mode. Must
163 * be called with interrupts disabled.
165 void slb_flush_and_restore_bolted(void)
167 struct slb_shadow *p = get_slb_shadow();
169 BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
171 WARN_ON(!irqs_disabled());
174 * We can't take a PMU exception in the following code, so hard
175 * disable interrupts.
179 asm volatile("isync\n"
183 :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
184 "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
186 assert_slb_presence(true, get_paca()->kstack);
188 get_paca()->slb_cache_ptr = 0;
190 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
191 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
194 void slb_save_contents(struct slb_entry *slb_ptr)
199 /* Save slb_cache_ptr value. */
200 get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr;
205 for (i = 0; i < mmu_slb_size; i++) {
206 asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
207 asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
214 void slb_dump_contents(struct slb_entry *slb_ptr)
223 pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
224 pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr);
226 for (i = 0; i < mmu_slb_size; i++) {
234 pr_err("%02d %016lx %016lx\n", i, e, v);
236 if (!(e & SLB_ESID_V)) {
240 llp = v & SLB_VSID_LLP;
241 if (v & SLB_VSID_B_1T) {
242 pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
244 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
246 pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
248 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
251 pr_err("----------------------------------\n");
253 /* Dump slb cache entires as well. */
254 pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
255 pr_err("Valid SLB cache entries:\n");
256 n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
257 for (i = 0; i < n; i++)
258 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
259 pr_err("Rest of SLB cache entries:\n");
260 for (i = n; i < SLB_CACHE_ENTRIES; i++)
261 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
264 void slb_vmalloc_update(void)
267 * vmalloc is not bolted, so just have to flush non-bolted.
269 slb_flush_and_restore_bolted();
272 static bool preload_hit(struct thread_info *ti, unsigned long esid)
276 for (i = 0; i < ti->slb_preload_nr; i++) {
279 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR;
280 if (esid == ti->slb_preload_esid[idx])
286 static bool preload_add(struct thread_info *ti, unsigned long ea)
291 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
292 /* EAs are stored >> 28 so 256MB segments don't need clearing */
293 if (ea & ESID_MASK_1T)
297 esid = ea >> SID_SHIFT;
299 if (preload_hit(ti, esid))
302 idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR;
303 ti->slb_preload_esid[idx] = esid;
304 if (ti->slb_preload_nr == SLB_PRELOAD_NR)
305 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
307 ti->slb_preload_nr++;
312 static void preload_age(struct thread_info *ti)
314 if (!ti->slb_preload_nr)
316 ti->slb_preload_nr--;
317 ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
320 void slb_setup_new_exec(void)
322 struct thread_info *ti = current_thread_info();
323 struct mm_struct *mm = current->mm;
324 unsigned long exec = 0x10000000;
326 WARN_ON(irqs_disabled());
329 * preload cache can only be used to determine whether a SLB
330 * entry exists if it does not start to overflow.
332 if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
338 * We have no good place to clear the slb preload cache on exec,
339 * flush_thread is about the earliest arch hook but that happens
340 * after we switch to the mm and have aleady preloaded the SLBEs.
342 * For the most part that's probably okay to use entries from the
343 * previous exec, they will age out if unused. It may turn out to
344 * be an advantage to clear the cache before switching to it,
349 * preload some userspace segments into the SLB.
350 * Almost all 32 and 64bit PowerPC executables are linked at
351 * 0x10000000 so it makes sense to preload this segment.
353 if (!is_kernel_addr(exec)) {
354 if (preload_add(ti, exec))
355 slb_allocate_user(mm, exec);
358 /* Libraries and mmaps. */
359 if (!is_kernel_addr(mm->mmap_base)) {
360 if (preload_add(ti, mm->mmap_base))
361 slb_allocate_user(mm, mm->mmap_base);
365 asm volatile("isync" : : : "memory");
370 void preload_new_slb_context(unsigned long start, unsigned long sp)
372 struct thread_info *ti = current_thread_info();
373 struct mm_struct *mm = current->mm;
374 unsigned long heap = mm->start_brk;
376 WARN_ON(irqs_disabled());
379 if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR)
384 /* Userspace entry address. */
385 if (!is_kernel_addr(start)) {
386 if (preload_add(ti, start))
387 slb_allocate_user(mm, start);
390 /* Top of stack, grows down. */
391 if (!is_kernel_addr(sp)) {
392 if (preload_add(ti, sp))
393 slb_allocate_user(mm, sp);
396 /* Bottom of heap, grows up. */
397 if (heap && !is_kernel_addr(heap)) {
398 if (preload_add(ti, heap))
399 slb_allocate_user(mm, heap);
403 asm volatile("isync" : : : "memory");
409 /* Flush all user entries from the segment table of the current processor. */
410 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
412 struct thread_info *ti = task_thread_info(tsk);
416 * We need interrupts hard-disabled here, not just soft-disabled,
417 * so that a PMU interrupt can't occur, which might try to access
418 * user memory (to get a stack trace) and possible cause an SLB miss
419 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
422 asm volatile("isync" : : : "memory");
423 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
425 * SLBIA IH=3 invalidates all Class=1 SLBEs and their
426 * associated lookaside structures, which matches what
427 * switch_slb wants. So ARCH_300 does not use the slb
430 asm volatile(PPC_SLBIA(3));
432 unsigned long offset = get_paca()->slb_cache_ptr;
434 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
435 offset <= SLB_CACHE_ENTRIES) {
436 unsigned long slbie_data = 0;
438 for (i = 0; i < offset; i++) {
442 get_paca()->slb_cache[i] << SID_SHIFT;
444 * Could assert_slb_presence(true) here, but
445 * hypervisor or machine check could have come
446 * in and removed the entry at this point.
450 slbie_data |= user_segment_size(slbie_data)
451 << SLBIE_SSIZE_SHIFT;
452 slbie_data |= SLBIE_C; /* user slbs have C=1 */
453 asm volatile("slbie %0" : : "r" (slbie_data));
456 /* Workaround POWER5 < DD2.1 issue */
457 if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
458 asm volatile("slbie %0" : : "r" (slbie_data));
461 struct slb_shadow *p = get_slb_shadow();
462 unsigned long ksp_esid_data =
463 be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
464 unsigned long ksp_vsid_data =
465 be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
467 asm volatile(PPC_SLBIA(1) "\n"
470 :: "r"(ksp_vsid_data),
473 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
476 get_paca()->slb_cache_ptr = 0;
478 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
483 * We gradually age out SLBs after a number of context switches to
484 * reduce reload overhead of unused entries (like we do with FP/VEC
485 * reload). Each time we wrap 256 switches, take an entry out of the
488 tsk->thread.load_slb++;
489 if (!tsk->thread.load_slb) {
490 unsigned long pc = KSTK_EIP(tsk);
496 for (i = 0; i < ti->slb_preload_nr; i++) {
500 idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR;
501 ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT;
503 slb_allocate_user(mm, ea);
507 * Synchronize slbmte preloads with possible subsequent user memory
508 * address accesses by the kernel (user mode won't happen until
509 * rfid, which is safe).
511 asm volatile("isync" : : : "memory");
514 void slb_set_size(u16 size)
519 void slb_initialize(void)
521 unsigned long linear_llp, vmalloc_llp, io_llp;
522 unsigned long lflags;
523 static int slb_encoding_inited;
524 #ifdef CONFIG_SPARSEMEM_VMEMMAP
525 unsigned long vmemmap_llp;
528 /* Prepare our SLB miss handler based on our page size */
529 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
530 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
531 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
532 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
533 #ifdef CONFIG_SPARSEMEM_VMEMMAP
534 vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
536 if (!slb_encoding_inited) {
537 slb_encoding_inited = 1;
538 pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
539 pr_devel("SLB: io LLP = %04lx\n", io_llp);
540 #ifdef CONFIG_SPARSEMEM_VMEMMAP
541 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
545 get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
546 get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
547 get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
549 lflags = SLB_VSID_KERNEL | linear_llp;
551 /* Invalidate the entire SLB (even entry 0) & all the ERATS */
552 asm volatile("isync":::"memory");
553 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
554 asm volatile("isync; slbia; isync":::"memory");
555 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
557 /* For the boot cpu, we're running on the stack in init_thread_union,
558 * which is in the first segment of the linear mapping, and also
559 * get_paca()->kstack hasn't been initialized yet.
560 * For secondary cpus, we need to bolt the kernel stack entry now.
562 slb_shadow_clear(KSTACK_INDEX);
563 if (raw_smp_processor_id() != boot_cpuid &&
564 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
565 create_shadowed_slbe(get_paca()->kstack,
566 mmu_kernel_ssize, lflags, KSTACK_INDEX);
568 asm volatile("isync":::"memory");
571 static void slb_cache_update(unsigned long esid_data)
575 if (cpu_has_feature(CPU_FTR_ARCH_300))
576 return; /* ISAv3.0B and later does not use slb_cache */
579 * Now update slb cache entries
581 slb_cache_index = local_paca->slb_cache_ptr;
582 if (slb_cache_index < SLB_CACHE_ENTRIES) {
584 * We have space in slb cache for optimized switch_slb().
585 * Top 36 bits from esid_data as per ISA
587 local_paca->slb_cache[slb_cache_index++] = esid_data >> 28;
588 local_paca->slb_cache_ptr++;
591 * Our cache is full and the current cache content strictly
592 * doesn't indicate the active SLB conents. Bump the ptr
593 * so that switch_slb() will ignore the cache.
595 local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
599 static enum slb_index alloc_slb_index(bool kernel)
601 enum slb_index index;
604 * The allocation bitmaps can become out of synch with the SLB
605 * when the _switch code does slbie when bolting a new stack
606 * segment and it must not be anywhere else in the SLB. This leaves
607 * a kernel allocated entry that is unused in the SLB. With very
608 * large systems or small segment sizes, the bitmaps could slowly
609 * fill with these entries. They will eventually be cleared out
610 * by the round robin allocator in that case, so it's probably not
611 * worth accounting for.
615 * SLBs beyond 32 entries are allocated with stab_rr only
616 * POWER7/8/9 have 32 SLB entries, this could be expanded if a
617 * future CPU has more.
619 if (local_paca->slb_used_bitmap != U32_MAX) {
620 index = ffz(local_paca->slb_used_bitmap);
621 local_paca->slb_used_bitmap |= 1U << index;
623 local_paca->slb_kern_bitmap |= 1U << index;
625 /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */
626 index = local_paca->stab_rr;
627 if (index < (mmu_slb_size - 1))
630 index = SLB_NUM_BOLTED;
631 local_paca->stab_rr = index;
634 local_paca->slb_kern_bitmap |= 1U << index;
636 local_paca->slb_kern_bitmap &= ~(1U << index);
639 BUG_ON(index < SLB_NUM_BOLTED);
644 static long slb_insert_entry(unsigned long ea, unsigned long context,
645 unsigned long flags, int ssize, bool kernel)
648 unsigned long vsid_data, esid_data;
649 enum slb_index index;
651 vsid = get_vsid(context, ea, ssize);
656 * There must not be a kernel SLB fault in alloc_slb_index or before
657 * slbmte here or the allocation bitmaps could get out of whack with
660 * User SLB faults or preloads take this path which might get inlined
661 * into the caller, so add compiler barriers here to ensure unsafe
662 * memory accesses do not come between.
666 index = alloc_slb_index(kernel);
668 vsid_data = __mk_vsid_data(vsid, ssize, flags);
669 esid_data = mk_esid_data(ea, ssize, index);
672 * No need for an isync before or after this slbmte. The exception
673 * we enter with and the rfid we exit with are context synchronizing.
674 * User preloads should add isync afterwards in case the kernel
675 * accesses user memory before it returns to userspace with rfid.
677 assert_slb_presence(false, ea);
678 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
683 slb_cache_update(esid_data);
688 static long slb_allocate_kernel(unsigned long ea, unsigned long id)
690 unsigned long context;
694 if (id == KERNEL_REGION_ID) {
696 /* We only support upto MAX_PHYSMEM_BITS */
697 if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS))
700 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
702 #ifdef CONFIG_SPARSEMEM_VMEMMAP
703 } else if (id == VMEMMAP_REGION_ID) {
705 if (ea >= H_VMEMMAP_END)
708 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
710 } else if (id == VMALLOC_REGION_ID) {
712 if (ea >= H_VMALLOC_END)
715 flags = local_paca->vmalloc_sllp;
717 } else if (id == IO_REGION_ID) {
719 if (ea >= H_KERN_IO_END)
722 flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
728 ssize = MMU_SEGSIZE_1T;
729 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
730 ssize = MMU_SEGSIZE_256M;
732 context = get_kernel_context(ea);
734 return slb_insert_entry(ea, context, flags, ssize, true);
737 static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
739 unsigned long context;
745 * consider this as bad access if we take a SLB miss
746 * on an address above addr limit.
748 if (ea >= mm_ctx_slb_addr_limit(&mm->context))
751 context = get_user_context(&mm->context, ea);
755 if (unlikely(ea >= H_PGTABLE_RANGE)) {
760 ssize = user_segment_size(ea);
762 bpsize = get_slice_psize(mm, ea);
763 flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
765 return slb_insert_entry(ea, context, flags, ssize, false);
768 long do_slb_fault(struct pt_regs *regs, unsigned long ea)
770 unsigned long id = get_region_id(ea);
772 /* IRQs are not reconciled here, so can't check irqs_disabled */
773 VM_WARN_ON(mfmsr() & MSR_EE);
775 if (unlikely(!(regs->msr & MSR_RI)))
779 * SLB kernel faults must be very careful not to touch anything
780 * that is not bolted. E.g., PACA and global variables are okay,
781 * mm->context stuff is not.
783 * SLB user faults can access all of kernel memory, but must be
784 * careful not to touch things like IRQ state because it is not
785 * "reconciled" here. The difficulty is that we must use
786 * fast_exception_return to return from kernel SLB faults without
787 * looking at possible non-bolted memory. We could test user vs
788 * kernel faults in the interrupt handler asm and do a full fault,
789 * reconcile, ret_from_except for user faults which would make them
790 * first class kernel code. But for performance it's probably nicer
791 * if they go via fast_exception_return too.
793 if (id >= KERNEL_REGION_ID) {
795 #ifdef CONFIG_DEBUG_VM
796 /* Catch recursive kernel SLB faults. */
797 BUG_ON(local_paca->in_kernel_slb_handler);
798 local_paca->in_kernel_slb_handler = 1;
800 err = slb_allocate_kernel(ea, id);
801 #ifdef CONFIG_DEBUG_VM
802 local_paca->in_kernel_slb_handler = 0;
806 struct mm_struct *mm = current->mm;
812 err = slb_allocate_user(mm, ea);
814 preload_add(current_thread_info(), ea);
820 void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err)
822 if (err == -EFAULT) {
824 _exception(SIGSEGV, regs, SEGV_BNDERR, ea);
826 bad_page_fault(regs, ea, SIGSEGV);
827 } else if (err == -EINVAL) {
828 unrecoverable_exception(regs);