1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * TLB flush routines for radix kernels.
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
9 #include <linux/hugetlb.h>
10 #include <linux/memblock.h>
11 #include <linux/mmu_context.h>
12 #include <linux/sched/mm.h>
14 #include <asm/ppc-opcode.h>
16 #include <asm/tlbflush.h>
17 #include <asm/trace.h>
18 #include <asm/cputhreads.h>
20 #define RIC_FLUSH_TLB 0
21 #define RIC_FLUSH_PWC 1
22 #define RIC_FLUSH_ALL 2
25 * tlbiel instruction for radix, set invalidation
26 * i.e., r=1 and is=01 or is=10 or is=11
28 static __always_inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
30 unsigned int ric, unsigned int prs)
35 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
36 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
38 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
39 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
43 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
47 asm volatile("ptesync": : :"memory");
50 * Flush the first set of the TLB, and the entire Page Walk Cache
51 * and partition table entries. Then flush the remaining sets of the
55 if (early_cpu_has_feature(CPU_FTR_HVMODE)) {
56 /* MSR[HV] should flush partition scope translations first. */
57 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
58 for (set = 1; set < num_sets; set++)
59 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
62 /* Flush process scoped entries. */
63 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
64 for (set = 1; set < num_sets; set++)
65 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
67 asm volatile("ptesync": : :"memory");
70 void radix__tlbiel_all(unsigned int action)
75 case TLB_INVAL_SCOPE_GLOBAL:
78 case TLB_INVAL_SCOPE_LPID:
85 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
86 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
88 WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
90 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
93 static __always_inline void __tlbiel_pid(unsigned long pid, int set,
96 unsigned long rb,rs,prs,r;
98 rb = PPC_BIT(53); /* IS = 1 */
99 rb |= set << PPC_BITLSHIFT(51);
100 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
101 prs = 1; /* process scoped */
102 r = 1; /* radix format */
104 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
105 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
106 trace_tlbie(0, 1, rb, rs, ric, prs, r);
109 static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
111 unsigned long rb,rs,prs,r;
113 rb = PPC_BIT(53); /* IS = 1 */
114 rs = pid << PPC_BITLSHIFT(31);
115 prs = 1; /* process scoped */
116 r = 1; /* radix format */
118 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
119 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
120 trace_tlbie(0, 0, rb, rs, ric, prs, r);
123 static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
125 unsigned long rb,rs,prs,r;
127 rb = PPC_BIT(52); /* IS = 2 */
129 prs = 0; /* partition scoped */
130 r = 1; /* radix format */
132 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
133 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
134 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
137 static __always_inline void __tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
139 unsigned long rb,rs,prs,r;
141 rb = PPC_BIT(52); /* IS = 2 */
143 prs = 1; /* process scoped */
144 r = 1; /* radix format */
146 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
147 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
148 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
151 static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid,
152 unsigned long ap, unsigned long ric)
154 unsigned long rb,rs,prs,r;
156 rb = va & ~(PPC_BITMASK(52, 63));
157 rb |= ap << PPC_BITLSHIFT(58);
158 rs = pid << PPC_BITLSHIFT(31);
159 prs = 1; /* process scoped */
160 r = 1; /* radix format */
162 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
163 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
164 trace_tlbie(0, 1, rb, rs, ric, prs, r);
167 static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
168 unsigned long ap, unsigned long ric)
170 unsigned long rb,rs,prs,r;
172 rb = va & ~(PPC_BITMASK(52, 63));
173 rb |= ap << PPC_BITLSHIFT(58);
174 rs = pid << PPC_BITLSHIFT(31);
175 prs = 1; /* process scoped */
176 r = 1; /* radix format */
178 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
179 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
180 trace_tlbie(0, 0, rb, rs, ric, prs, r);
183 static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
184 unsigned long ap, unsigned long ric)
186 unsigned long rb,rs,prs,r;
188 rb = va & ~(PPC_BITMASK(52, 63));
189 rb |= ap << PPC_BITLSHIFT(58);
191 prs = 0; /* partition scoped */
192 r = 1; /* radix format */
194 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
195 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
196 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
200 static inline void fixup_tlbie_va(unsigned long va, unsigned long pid,
203 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
204 asm volatile("ptesync": : :"memory");
205 __tlbie_va(va, 0, ap, RIC_FLUSH_TLB);
208 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
209 asm volatile("ptesync": : :"memory");
210 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
214 static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
217 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
218 asm volatile("ptesync": : :"memory");
219 __tlbie_pid(0, RIC_FLUSH_TLB);
222 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
223 asm volatile("ptesync": : :"memory");
224 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB);
228 static inline void fixup_tlbie_pid(unsigned long pid)
231 * We can use any address for the invalidation, pick one which is
232 * probably unused as an optimisation.
234 unsigned long va = ((1UL << 52) - 1);
236 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
237 asm volatile("ptesync": : :"memory");
238 __tlbie_pid(0, RIC_FLUSH_TLB);
241 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
242 asm volatile("ptesync": : :"memory");
243 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
248 static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
251 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
252 asm volatile("ptesync": : :"memory");
253 __tlbie_lpid_va(va, 0, ap, RIC_FLUSH_TLB);
256 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
257 asm volatile("ptesync": : :"memory");
258 __tlbie_lpid_va(va, lpid, ap, RIC_FLUSH_TLB);
262 static inline void fixup_tlbie_lpid(unsigned long lpid)
265 * We can use any address for the invalidation, pick one which is
266 * probably unused as an optimisation.
268 unsigned long va = ((1UL << 52) - 1);
270 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
271 asm volatile("ptesync": : :"memory");
272 __tlbie_lpid(0, RIC_FLUSH_TLB);
275 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
276 asm volatile("ptesync": : :"memory");
277 __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
282 * We use 128 set in radix mode and 256 set in hpt mode.
284 static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
288 asm volatile("ptesync": : :"memory");
291 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
292 * also flush the entire Page Walk Cache.
294 __tlbiel_pid(pid, 0, ric);
296 /* For PWC, only one flush is needed */
297 if (ric == RIC_FLUSH_PWC) {
298 asm volatile("ptesync": : :"memory");
302 /* For the remaining sets, just flush the TLB */
303 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
304 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
306 asm volatile("ptesync": : :"memory");
307 asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory");
310 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
312 asm volatile("ptesync": : :"memory");
315 * Workaround the fact that the "ric" argument to __tlbie_pid
316 * must be a compile-time contraint to match the "i" constraint
317 * in the asm statement.
321 __tlbie_pid(pid, RIC_FLUSH_TLB);
322 fixup_tlbie_pid(pid);
325 __tlbie_pid(pid, RIC_FLUSH_PWC);
329 __tlbie_pid(pid, RIC_FLUSH_ALL);
330 fixup_tlbie_pid(pid);
332 asm volatile("eieio; tlbsync; ptesync": : :"memory");
340 static void do_tlbiel_pid(void *info)
342 struct tlbiel_pid *t = info;
344 if (t->ric == RIC_FLUSH_TLB)
345 _tlbiel_pid(t->pid, RIC_FLUSH_TLB);
346 else if (t->ric == RIC_FLUSH_PWC)
347 _tlbiel_pid(t->pid, RIC_FLUSH_PWC);
349 _tlbiel_pid(t->pid, RIC_FLUSH_ALL);
352 static inline void _tlbiel_pid_multicast(struct mm_struct *mm,
353 unsigned long pid, unsigned long ric)
355 struct cpumask *cpus = mm_cpumask(mm);
356 struct tlbiel_pid t = { .pid = pid, .ric = ric };
358 on_each_cpu_mask(cpus, do_tlbiel_pid, &t, 1);
360 * Always want the CPU translations to be invalidated with tlbiel in
361 * these paths, so while coprocessors must use tlbie, we can not
362 * optimise away the tlbiel component.
364 if (atomic_read(&mm->context.copros) > 0)
365 _tlbie_pid(pid, RIC_FLUSH_ALL);
368 static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
370 asm volatile("ptesync": : :"memory");
373 * Workaround the fact that the "ric" argument to __tlbie_pid
374 * must be a compile-time contraint to match the "i" constraint
375 * in the asm statement.
379 __tlbie_lpid(lpid, RIC_FLUSH_TLB);
380 fixup_tlbie_lpid(lpid);
383 __tlbie_lpid(lpid, RIC_FLUSH_PWC);
387 __tlbie_lpid(lpid, RIC_FLUSH_ALL);
388 fixup_tlbie_lpid(lpid);
390 asm volatile("eieio; tlbsync; ptesync": : :"memory");
393 static __always_inline void _tlbie_lpid_guest(unsigned long lpid, unsigned long ric)
396 * Workaround the fact that the "ric" argument to __tlbie_pid
397 * must be a compile-time contraint to match the "i" constraint
398 * in the asm statement.
402 __tlbie_lpid_guest(lpid, RIC_FLUSH_TLB);
405 __tlbie_lpid_guest(lpid, RIC_FLUSH_PWC);
409 __tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
411 fixup_tlbie_lpid(lpid);
412 asm volatile("eieio; tlbsync; ptesync": : :"memory");
415 static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
416 unsigned long pid, unsigned long page_size,
420 unsigned long ap = mmu_get_ap(psize);
422 for (addr = start; addr < end; addr += page_size)
423 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
426 static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
427 unsigned long psize, unsigned long ric)
429 unsigned long ap = mmu_get_ap(psize);
431 asm volatile("ptesync": : :"memory");
432 __tlbiel_va(va, pid, ap, ric);
433 asm volatile("ptesync": : :"memory");
436 static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
437 unsigned long pid, unsigned long page_size,
438 unsigned long psize, bool also_pwc)
440 asm volatile("ptesync": : :"memory");
442 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
443 __tlbiel_va_range(start, end, pid, page_size, psize);
444 asm volatile("ptesync": : :"memory");
447 static inline void __tlbie_va_range(unsigned long start, unsigned long end,
448 unsigned long pid, unsigned long page_size,
452 unsigned long ap = mmu_get_ap(psize);
454 for (addr = start; addr < end; addr += page_size)
455 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
457 fixup_tlbie_va_range(addr - page_size, pid, ap);
460 static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
461 unsigned long psize, unsigned long ric)
463 unsigned long ap = mmu_get_ap(psize);
465 asm volatile("ptesync": : :"memory");
466 __tlbie_va(va, pid, ap, ric);
467 fixup_tlbie_va(va, pid, ap);
468 asm volatile("eieio; tlbsync; ptesync": : :"memory");
478 static void do_tlbiel_va(void *info)
480 struct tlbiel_va *t = info;
482 if (t->ric == RIC_FLUSH_TLB)
483 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_TLB);
484 else if (t->ric == RIC_FLUSH_PWC)
485 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_PWC);
487 _tlbiel_va(t->va, t->pid, t->psize, RIC_FLUSH_ALL);
490 static inline void _tlbiel_va_multicast(struct mm_struct *mm,
491 unsigned long va, unsigned long pid,
492 unsigned long psize, unsigned long ric)
494 struct cpumask *cpus = mm_cpumask(mm);
495 struct tlbiel_va t = { .va = va, .pid = pid, .psize = psize, .ric = ric };
496 on_each_cpu_mask(cpus, do_tlbiel_va, &t, 1);
497 if (atomic_read(&mm->context.copros) > 0)
498 _tlbie_va(va, pid, psize, RIC_FLUSH_TLB);
501 struct tlbiel_va_range {
505 unsigned long page_size;
510 static void do_tlbiel_va_range(void *info)
512 struct tlbiel_va_range *t = info;
514 _tlbiel_va_range(t->start, t->end, t->pid, t->page_size,
515 t->psize, t->also_pwc);
518 static __always_inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
519 unsigned long psize, unsigned long ric)
521 unsigned long ap = mmu_get_ap(psize);
523 asm volatile("ptesync": : :"memory");
524 __tlbie_lpid_va(va, lpid, ap, ric);
525 fixup_tlbie_lpid_va(va, lpid, ap);
526 asm volatile("eieio; tlbsync; ptesync": : :"memory");
529 static inline void _tlbie_va_range(unsigned long start, unsigned long end,
530 unsigned long pid, unsigned long page_size,
531 unsigned long psize, bool also_pwc)
533 asm volatile("ptesync": : :"memory");
535 __tlbie_pid(pid, RIC_FLUSH_PWC);
536 __tlbie_va_range(start, end, pid, page_size, psize);
537 asm volatile("eieio; tlbsync; ptesync": : :"memory");
540 static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
541 unsigned long start, unsigned long end,
542 unsigned long pid, unsigned long page_size,
543 unsigned long psize, bool also_pwc)
545 struct cpumask *cpus = mm_cpumask(mm);
546 struct tlbiel_va_range t = { .start = start, .end = end,
547 .pid = pid, .page_size = page_size,
548 .psize = psize, .also_pwc = also_pwc };
550 on_each_cpu_mask(cpus, do_tlbiel_va_range, &t, 1);
551 if (atomic_read(&mm->context.copros) > 0)
552 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
556 * Base TLB flushing operations:
558 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
559 * - flush_tlb_page(vma, vmaddr) flushes one page
560 * - flush_tlb_range(vma, start, end) flushes a range of pages
561 * - flush_tlb_kernel_range(start, end) flushes kernel pages
563 * - local_* variants of page and mm only apply to the current
566 void radix__local_flush_tlb_mm(struct mm_struct *mm)
571 pid = mm->context.id;
572 if (pid != MMU_NO_CONTEXT)
573 _tlbiel_pid(pid, RIC_FLUSH_TLB);
576 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
579 void radix__local_flush_all_mm(struct mm_struct *mm)
584 pid = mm->context.id;
585 if (pid != MMU_NO_CONTEXT)
586 _tlbiel_pid(pid, RIC_FLUSH_ALL);
589 EXPORT_SYMBOL(radix__local_flush_all_mm);
590 #endif /* CONFIG_SMP */
592 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
598 pid = mm->context.id;
599 if (pid != MMU_NO_CONTEXT)
600 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
604 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
606 #ifdef CONFIG_HUGETLB_PAGE
607 /* need the return fix for nohash.c */
608 if (is_vm_hugetlb_page(vma))
609 return radix__local_flush_hugetlb_page(vma, vmaddr);
611 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
613 EXPORT_SYMBOL(radix__local_flush_tlb_page);
615 static bool mm_is_singlethreaded(struct mm_struct *mm)
617 if (atomic_read(&mm->context.copros) > 0)
619 if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm)
624 static bool mm_needs_flush_escalation(struct mm_struct *mm)
627 * P9 nest MMU has issues with the page walk cache
628 * caching PTEs and not flushing them properly when
629 * RIC = 0 for a PID/LPID invalidate
631 if (atomic_read(&mm->context.copros) > 0)
637 static void do_exit_flush_lazy_tlb(void *arg)
639 struct mm_struct *mm = arg;
640 unsigned long pid = mm->context.id;
642 if (current->mm == mm)
643 return; /* Local CPU */
645 if (current->active_mm == mm) {
647 * Must be a kernel thread because sender is single-threaded.
651 switch_mm(mm, &init_mm, current);
652 current->active_mm = &init_mm;
655 _tlbiel_pid(pid, RIC_FLUSH_ALL);
658 static void exit_flush_lazy_tlbs(struct mm_struct *mm)
661 * Would be nice if this was async so it could be run in
662 * parallel with our local flush, but generic code does not
663 * give a good API for it. Could extend the generic code or
664 * make a special powerpc IPI for flushing TLBs.
665 * For now it's not too performance critical.
667 smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
669 mm_reset_thread_local(mm);
672 void radix__flush_tlb_mm(struct mm_struct *mm)
676 pid = mm->context.id;
677 if (unlikely(pid == MMU_NO_CONTEXT))
682 * Order loads of mm_cpumask vs previous stores to clear ptes before
683 * the invalidate. See barrier in switch_mm_irqs_off
686 if (!mm_is_thread_local(mm)) {
687 if (unlikely(mm_is_singlethreaded(mm))) {
688 exit_flush_lazy_tlbs(mm);
692 if (cputlb_use_tlbie()) {
693 if (mm_needs_flush_escalation(mm))
694 _tlbie_pid(pid, RIC_FLUSH_ALL);
696 _tlbie_pid(pid, RIC_FLUSH_TLB);
698 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
702 _tlbiel_pid(pid, RIC_FLUSH_TLB);
706 EXPORT_SYMBOL(radix__flush_tlb_mm);
708 static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
712 pid = mm->context.id;
713 if (unlikely(pid == MMU_NO_CONTEXT))
717 smp_mb(); /* see radix__flush_tlb_mm */
718 if (!mm_is_thread_local(mm)) {
719 if (unlikely(mm_is_singlethreaded(mm))) {
721 exit_flush_lazy_tlbs(mm);
725 if (cputlb_use_tlbie())
726 _tlbie_pid(pid, RIC_FLUSH_ALL);
728 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
731 _tlbiel_pid(pid, RIC_FLUSH_ALL);
735 void radix__flush_all_mm(struct mm_struct *mm)
737 __flush_all_mm(mm, false);
739 EXPORT_SYMBOL(radix__flush_all_mm);
741 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
743 tlb->need_flush_all = 1;
745 EXPORT_SYMBOL(radix__flush_tlb_pwc);
747 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
752 pid = mm->context.id;
753 if (unlikely(pid == MMU_NO_CONTEXT))
757 smp_mb(); /* see radix__flush_tlb_mm */
758 if (!mm_is_thread_local(mm)) {
759 if (unlikely(mm_is_singlethreaded(mm))) {
760 exit_flush_lazy_tlbs(mm);
763 if (cputlb_use_tlbie())
764 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
766 _tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB);
769 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
774 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
776 #ifdef CONFIG_HUGETLB_PAGE
777 if (is_vm_hugetlb_page(vma))
778 return radix__flush_hugetlb_page(vma, vmaddr);
780 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
782 EXPORT_SYMBOL(radix__flush_tlb_page);
784 #else /* CONFIG_SMP */
785 #define radix__flush_all_mm radix__local_flush_all_mm
786 #endif /* CONFIG_SMP */
788 static void do_tlbiel_kernel(void *info)
790 _tlbiel_pid(0, RIC_FLUSH_ALL);
793 static inline void _tlbiel_kernel_broadcast(void)
795 on_each_cpu(do_tlbiel_kernel, NULL, 1);
798 * Coherent accelerators don't refcount kernel memory mappings,
799 * so have to always issue a tlbie for them. This is quite a
802 _tlbie_pid(0, RIC_FLUSH_ALL);
807 * If kernel TLBIs ever become local rather than global, then
808 * drivers/misc/ocxl/link.c:ocxl_link_add_pe will need some work, as it
809 * assumes kernel TLBIs are global.
811 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
813 if (cputlb_use_tlbie())
814 _tlbie_pid(0, RIC_FLUSH_ALL);
816 _tlbiel_kernel_broadcast();
818 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
820 #define TLB_FLUSH_ALL -1UL
823 * Number of pages above which we invalidate the entire PID rather than
824 * flush individual pages, for local and global flushes respectively.
826 * tlbie goes out to the interconnect and individual ops are more costly.
827 * It also does not iterate over sets like the local tlbiel variant when
828 * invalidating a full PID, so it has a far lower threshold to change from
829 * individual page flushes to full-pid flushes.
831 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
832 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
834 static inline void __radix__flush_tlb_range(struct mm_struct *mm,
835 unsigned long start, unsigned long end,
836 bool flush_all_sizes)
840 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
841 unsigned long page_size = 1UL << page_shift;
842 unsigned long nr_pages = (end - start) >> page_shift;
845 pid = mm->context.id;
846 if (unlikely(pid == MMU_NO_CONTEXT))
850 smp_mb(); /* see radix__flush_tlb_mm */
851 if (!mm_is_thread_local(mm)) {
852 if (unlikely(mm_is_singlethreaded(mm))) {
853 if (end != TLB_FLUSH_ALL) {
854 exit_flush_lazy_tlbs(mm);
859 full = (end == TLB_FLUSH_ALL ||
860 nr_pages > tlb_single_page_flush_ceiling);
864 full = (end == TLB_FLUSH_ALL ||
865 nr_pages > tlb_local_single_page_flush_ceiling);
870 _tlbiel_pid(pid, RIC_FLUSH_TLB);
872 if (cputlb_use_tlbie()) {
873 if (mm_needs_flush_escalation(mm))
874 _tlbie_pid(pid, RIC_FLUSH_ALL);
876 _tlbie_pid(pid, RIC_FLUSH_TLB);
878 _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
882 bool hflush = flush_all_sizes;
883 bool gflush = flush_all_sizes;
884 unsigned long hstart, hend;
885 unsigned long gstart, gend;
887 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
891 hstart = (start + PMD_SIZE - 1) & PMD_MASK;
892 hend = end & PMD_MASK;
898 gstart = (start + PUD_SIZE - 1) & PUD_MASK;
899 gend = end & PUD_MASK;
905 asm volatile("ptesync": : :"memory");
906 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
908 __tlbiel_va_range(hstart, hend, pid,
909 PMD_SIZE, MMU_PAGE_2M);
911 __tlbiel_va_range(gstart, gend, pid,
912 PUD_SIZE, MMU_PAGE_1G);
913 asm volatile("ptesync": : :"memory");
914 } else if (cputlb_use_tlbie()) {
915 asm volatile("ptesync": : :"memory");
916 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
918 __tlbie_va_range(hstart, hend, pid,
919 PMD_SIZE, MMU_PAGE_2M);
921 __tlbie_va_range(gstart, gend, pid,
922 PUD_SIZE, MMU_PAGE_1G);
924 asm volatile("eieio; tlbsync; ptesync": : :"memory");
926 _tlbiel_va_range_multicast(mm,
927 start, end, pid, page_size, mmu_virtual_psize, false);
929 _tlbiel_va_range_multicast(mm,
930 hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false);
932 _tlbiel_va_range_multicast(mm,
933 gstart, gend, pid, PUD_SIZE, MMU_PAGE_1G, false);
939 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
943 #ifdef CONFIG_HUGETLB_PAGE
944 if (is_vm_hugetlb_page(vma))
945 return radix__flush_hugetlb_tlb_range(vma, start, end);
948 __radix__flush_tlb_range(vma->vm_mm, start, end, false);
950 EXPORT_SYMBOL(radix__flush_tlb_range);
952 static int radix_get_mmu_psize(int page_size)
956 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
957 psize = mmu_virtual_psize;
958 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
960 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
968 * Flush partition scoped LPID address translation for all CPUs.
970 void radix__flush_tlb_lpid_page(unsigned int lpid,
972 unsigned long page_size)
974 int psize = radix_get_mmu_psize(page_size);
976 _tlbie_lpid_va(addr, lpid, psize, RIC_FLUSH_TLB);
978 EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid_page);
981 * Flush partition scoped PWC from LPID for all CPUs.
983 void radix__flush_pwc_lpid(unsigned int lpid)
985 _tlbie_lpid(lpid, RIC_FLUSH_PWC);
987 EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
990 * Flush partition scoped translations from LPID (=LPIDR)
992 void radix__flush_all_lpid(unsigned int lpid)
994 _tlbie_lpid(lpid, RIC_FLUSH_ALL);
996 EXPORT_SYMBOL_GPL(radix__flush_all_lpid);
999 * Flush process scoped translations from LPID (=LPIDR)
1001 void radix__flush_all_lpid_guest(unsigned int lpid)
1003 _tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
1006 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
1007 unsigned long end, int psize);
1009 void radix__tlb_flush(struct mmu_gather *tlb)
1012 struct mm_struct *mm = tlb->mm;
1013 int page_size = tlb->page_size;
1014 unsigned long start = tlb->start;
1015 unsigned long end = tlb->end;
1018 * if page size is not something we understand, do a full mm flush
1020 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
1021 * that flushes the process table entry cache upon process teardown.
1022 * See the comment for radix in arch_exit_mmap().
1025 __flush_all_mm(mm, true);
1026 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1027 } else if (mm_tlb_flush_nested(mm)) {
1029 * If there is a concurrent invalidation that is clearing ptes,
1030 * then it's possible this invalidation will miss one of those
1031 * cleared ptes and miss flushing the TLB. If this invalidate
1032 * returns before the other one flushes TLBs, that can result
1033 * in it returning while there are still valid TLBs inside the
1034 * range to be invalidated.
1036 * See mm/memory.c:tlb_finish_mmu() for more details.
1038 * The solution to this is ensure the entire range is always
1039 * flushed here. The problem for powerpc is that the flushes
1040 * are page size specific, so this "forced flush" would not
1041 * do the right thing if there are a mix of page sizes in
1042 * the range to be invalidated. So use __flush_tlb_range
1043 * which invalidates all possible page sizes in the range.
1045 * PWC flush probably is not be required because the core code
1046 * shouldn't free page tables in this path, but accounting
1047 * for the possibility makes us a bit more robust.
1049 * need_flush_all is an uncommon case because page table
1050 * teardown should be done with exclusive locks held (but
1051 * after locks are dropped another invalidate could come
1052 * in), it could be optimized further if necessary.
1054 if (!tlb->need_flush_all)
1055 __radix__flush_tlb_range(mm, start, end, true);
1057 radix__flush_all_mm(mm);
1059 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
1060 if (!tlb->need_flush_all)
1061 radix__flush_tlb_mm(mm);
1063 radix__flush_all_mm(mm);
1065 if (!tlb->need_flush_all)
1066 radix__flush_tlb_range_psize(mm, start, end, psize);
1068 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
1070 tlb->need_flush_all = 0;
1073 static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
1074 unsigned long start, unsigned long end,
1075 int psize, bool also_pwc)
1078 unsigned int page_shift = mmu_psize_defs[psize].shift;
1079 unsigned long page_size = 1UL << page_shift;
1080 unsigned long nr_pages = (end - start) >> page_shift;
1083 pid = mm->context.id;
1084 if (unlikely(pid == MMU_NO_CONTEXT))
1088 smp_mb(); /* see radix__flush_tlb_mm */
1089 if (!mm_is_thread_local(mm)) {
1090 if (unlikely(mm_is_singlethreaded(mm))) {
1091 if (end != TLB_FLUSH_ALL) {
1092 exit_flush_lazy_tlbs(mm);
1097 full = (end == TLB_FLUSH_ALL ||
1098 nr_pages > tlb_single_page_flush_ceiling);
1102 full = (end == TLB_FLUSH_ALL ||
1103 nr_pages > tlb_local_single_page_flush_ceiling);
1108 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
1110 if (cputlb_use_tlbie()) {
1111 if (mm_needs_flush_escalation(mm))
1115 also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
1117 _tlbiel_pid_multicast(mm, pid,
1118 also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
1124 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
1125 else if (cputlb_use_tlbie())
1126 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
1128 _tlbiel_va_range_multicast(mm,
1129 start, end, pid, page_size, psize, also_pwc);
1134 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
1135 unsigned long end, int psize)
1137 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
1140 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
1141 unsigned long end, int psize)
1143 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
1146 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1147 void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
1149 unsigned long pid, end;
1151 pid = mm->context.id;
1152 if (unlikely(pid == MMU_NO_CONTEXT))
1155 /* 4k page size, just blow the world */
1156 if (PAGE_SIZE == 0x1000) {
1157 radix__flush_all_mm(mm);
1161 end = addr + HPAGE_PMD_SIZE;
1163 /* Otherwise first do the PWC, then iterate the pages. */
1165 smp_mb(); /* see radix__flush_tlb_mm */
1166 if (!mm_is_thread_local(mm)) {
1167 if (unlikely(mm_is_singlethreaded(mm))) {
1168 exit_flush_lazy_tlbs(mm);
1171 if (cputlb_use_tlbie())
1172 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1174 _tlbiel_va_range_multicast(mm,
1175 addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1178 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1183 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1185 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
1186 unsigned long start, unsigned long end)
1188 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
1190 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
1192 void radix__flush_tlb_all(void)
1194 unsigned long rb,prs,r,rs;
1195 unsigned long ric = RIC_FLUSH_ALL;
1197 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
1198 prs = 0; /* partition scoped */
1199 r = 1; /* radix format */
1200 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
1202 asm volatile("ptesync": : :"memory");
1204 * now flush guest entries by passing PRS = 1 and LPID != 0
1206 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1207 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
1209 * now flush host entires by passing PRS = 0 and LPID == 0
1211 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1212 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
1213 asm volatile("eieio; tlbsync; ptesync": : :"memory");
1216 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1217 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
1219 unsigned long pid = mm->context.id;
1221 if (unlikely(pid == MMU_NO_CONTEXT))
1225 * If this context hasn't run on that CPU before and KVM is
1226 * around, there's a slim chance that the guest on another
1227 * CPU just brought in obsolete translation into the TLB of
1228 * this CPU due to a bad prefetch using the guest PID on
1229 * the way into the hypervisor.
1231 * We work around this here. If KVM is possible, we check if
1232 * any sibling thread is in KVM. If it is, the window may exist
1233 * and thus we flush that PID from the core.
1235 * A potential future improvement would be to mark which PIDs
1236 * have never been used on the system and avoid it if the PID
1237 * is new and the process has no other cpumask bit set.
1239 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
1240 int cpu = smp_processor_id();
1241 int sib = cpu_first_thread_sibling(cpu);
1244 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
1247 if (!cpu_possible(sib))
1249 if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
1253 _tlbiel_pid(pid, RIC_FLUSH_ALL);
1256 EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
1257 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */