2 * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
15 #include <linux/mmu_context.h>
16 #include <linux/sched/mm.h>
18 #include <asm/ppc-opcode.h>
20 #include <asm/tlbflush.h>
21 #include <asm/trace.h>
22 #include <asm/cputhreads.h>
24 #define RIC_FLUSH_TLB 0
25 #define RIC_FLUSH_PWC 1
26 #define RIC_FLUSH_ALL 2
29 * tlbiel instruction for radix, set invalidation
30 * i.e., r=1 and is=01 or is=10 or is=11
32 static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
34 unsigned int ric, unsigned int prs)
39 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
40 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
42 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
43 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
47 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
51 asm volatile("ptesync": : :"memory");
54 * Flush the first set of the TLB, and the entire Page Walk Cache
55 * and partition table entries. Then flush the remaining sets of the
58 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
59 for (set = 1; set < num_sets; set++)
60 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
62 /* Do the same for process scoped entries. */
63 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
64 for (set = 1; set < num_sets; set++)
65 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
67 asm volatile("ptesync": : :"memory");
70 void radix__tlbiel_all(unsigned int action)
75 case TLB_INVAL_SCOPE_GLOBAL:
78 case TLB_INVAL_SCOPE_LPID:
85 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
86 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
88 WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
90 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
93 static __always_inline void __tlbiel_pid(unsigned long pid, int set,
96 unsigned long rb,rs,prs,r;
98 rb = PPC_BIT(53); /* IS = 1 */
99 rb |= set << PPC_BITLSHIFT(51);
100 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
101 prs = 1; /* process scoped */
102 r = 1; /* radix format */
104 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
105 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
106 trace_tlbie(0, 1, rb, rs, ric, prs, r);
109 static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
111 unsigned long rb,rs,prs,r;
113 rb = PPC_BIT(53); /* IS = 1 */
114 rs = pid << PPC_BITLSHIFT(31);
115 prs = 1; /* process scoped */
116 r = 1; /* radix format */
118 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
119 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
120 trace_tlbie(0, 0, rb, rs, ric, prs, r);
123 static __always_inline void __tlbiel_lpid(unsigned long lpid, int set,
126 unsigned long rb,rs,prs,r;
128 rb = PPC_BIT(52); /* IS = 2 */
129 rb |= set << PPC_BITLSHIFT(51);
130 rs = 0; /* LPID comes from LPIDR */
131 prs = 0; /* partition scoped */
132 r = 1; /* radix format */
134 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
135 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
136 trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
139 static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
141 unsigned long rb,rs,prs,r;
143 rb = PPC_BIT(52); /* IS = 2 */
145 prs = 0; /* partition scoped */
146 r = 1; /* radix format */
148 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
149 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
150 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
153 static inline void __tlbiel_lpid_guest(unsigned long lpid, int set,
156 unsigned long rb,rs,prs,r;
158 rb = PPC_BIT(52); /* IS = 2 */
159 rb |= set << PPC_BITLSHIFT(51);
160 rs = 0; /* LPID comes from LPIDR */
161 prs = 1; /* process scoped */
162 r = 1; /* radix format */
164 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
165 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
166 trace_tlbie(lpid, 1, rb, rs, ric, prs, r);
170 static inline void __tlbiel_va(unsigned long va, unsigned long pid,
171 unsigned long ap, unsigned long ric)
173 unsigned long rb,rs,prs,r;
175 rb = va & ~(PPC_BITMASK(52, 63));
176 rb |= ap << PPC_BITLSHIFT(58);
177 rs = pid << PPC_BITLSHIFT(31);
178 prs = 1; /* process scoped */
179 r = 1; /* radix format */
181 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
182 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
183 trace_tlbie(0, 1, rb, rs, ric, prs, r);
186 static inline void __tlbie_va(unsigned long va, unsigned long pid,
187 unsigned long ap, unsigned long ric)
189 unsigned long rb,rs,prs,r;
191 rb = va & ~(PPC_BITMASK(52, 63));
192 rb |= ap << PPC_BITLSHIFT(58);
193 rs = pid << PPC_BITLSHIFT(31);
194 prs = 1; /* process scoped */
195 r = 1; /* radix format */
197 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
198 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
199 trace_tlbie(0, 0, rb, rs, ric, prs, r);
202 static inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
203 unsigned long ap, unsigned long ric)
205 unsigned long rb,rs,prs,r;
207 rb = va & ~(PPC_BITMASK(52, 63));
208 rb |= ap << PPC_BITLSHIFT(58);
210 prs = 0; /* partition scoped */
211 r = 1; /* radix format */
213 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
214 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
215 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
218 static inline void fixup_tlbie(void)
220 unsigned long pid = 0;
221 unsigned long va = ((1UL << 52) - 1);
223 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
224 asm volatile("ptesync": : :"memory");
225 __tlbie_va(va, pid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
229 static inline void fixup_tlbie_lpid(unsigned long lpid)
231 unsigned long va = ((1UL << 52) - 1);
233 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
234 asm volatile("ptesync": : :"memory");
235 __tlbie_lpid_va(va, lpid, mmu_get_ap(MMU_PAGE_64K), RIC_FLUSH_TLB);
240 * We use 128 set in radix mode and 256 set in hpt mode.
242 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
246 asm volatile("ptesync": : :"memory");
249 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
250 * also flush the entire Page Walk Cache.
252 __tlbiel_pid(pid, 0, ric);
254 /* For PWC, only one flush is needed */
255 if (ric == RIC_FLUSH_PWC) {
256 asm volatile("ptesync": : :"memory");
260 /* For the remaining sets, just flush the TLB */
261 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
262 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
264 asm volatile("ptesync": : :"memory");
265 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
268 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
270 asm volatile("ptesync": : :"memory");
273 * Workaround the fact that the "ric" argument to __tlbie_pid
274 * must be a compile-time contraint to match the "i" constraint
275 * in the asm statement.
279 __tlbie_pid(pid, RIC_FLUSH_TLB);
282 __tlbie_pid(pid, RIC_FLUSH_PWC);
286 __tlbie_pid(pid, RIC_FLUSH_ALL);
289 asm volatile("eieio; tlbsync; ptesync": : :"memory");
292 static inline void _tlbiel_lpid(unsigned long lpid, unsigned long ric)
296 VM_BUG_ON(mfspr(SPRN_LPID) != lpid);
298 asm volatile("ptesync": : :"memory");
301 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
302 * also flush the entire Page Walk Cache.
304 __tlbiel_lpid(lpid, 0, ric);
306 /* For PWC, only one flush is needed */
307 if (ric == RIC_FLUSH_PWC) {
308 asm volatile("ptesync": : :"memory");
312 /* For the remaining sets, just flush the TLB */
313 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
314 __tlbiel_lpid(lpid, set, RIC_FLUSH_TLB);
316 asm volatile("ptesync": : :"memory");
317 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
320 static inline void _tlbie_lpid(unsigned long lpid, unsigned long ric)
322 asm volatile("ptesync": : :"memory");
325 * Workaround the fact that the "ric" argument to __tlbie_pid
326 * must be a compile-time contraint to match the "i" constraint
327 * in the asm statement.
331 __tlbie_lpid(lpid, RIC_FLUSH_TLB);
334 __tlbie_lpid(lpid, RIC_FLUSH_PWC);
338 __tlbie_lpid(lpid, RIC_FLUSH_ALL);
340 fixup_tlbie_lpid(lpid);
341 asm volatile("eieio; tlbsync; ptesync": : :"memory");
344 static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
348 VM_BUG_ON(mfspr(SPRN_LPID) != lpid);
350 asm volatile("ptesync": : :"memory");
353 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
354 * also flush the entire Page Walk Cache.
356 __tlbiel_lpid_guest(lpid, 0, ric);
358 /* For PWC, only one flush is needed */
359 if (ric == RIC_FLUSH_PWC) {
360 asm volatile("ptesync": : :"memory");
364 /* For the remaining sets, just flush the TLB */
365 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
366 __tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB);
368 asm volatile("ptesync": : :"memory");
369 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
373 static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
374 unsigned long pid, unsigned long page_size,
378 unsigned long ap = mmu_get_ap(psize);
380 for (addr = start; addr < end; addr += page_size)
381 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
384 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
385 unsigned long psize, unsigned long ric)
387 unsigned long ap = mmu_get_ap(psize);
389 asm volatile("ptesync": : :"memory");
390 __tlbiel_va(va, pid, ap, ric);
391 asm volatile("ptesync": : :"memory");
394 static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
395 unsigned long pid, unsigned long page_size,
396 unsigned long psize, bool also_pwc)
398 asm volatile("ptesync": : :"memory");
400 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
401 __tlbiel_va_range(start, end, pid, page_size, psize);
402 asm volatile("ptesync": : :"memory");
405 static inline void __tlbie_va_range(unsigned long start, unsigned long end,
406 unsigned long pid, unsigned long page_size,
410 unsigned long ap = mmu_get_ap(psize);
412 for (addr = start; addr < end; addr += page_size)
413 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
416 static inline void _tlbie_va(unsigned long va, unsigned long pid,
417 unsigned long psize, unsigned long ric)
419 unsigned long ap = mmu_get_ap(psize);
421 asm volatile("ptesync": : :"memory");
422 __tlbie_va(va, pid, ap, ric);
424 asm volatile("eieio; tlbsync; ptesync": : :"memory");
427 static inline void _tlbie_lpid_va(unsigned long va, unsigned long lpid,
428 unsigned long psize, unsigned long ric)
430 unsigned long ap = mmu_get_ap(psize);
432 asm volatile("ptesync": : :"memory");
433 __tlbie_lpid_va(va, lpid, ap, ric);
434 fixup_tlbie_lpid(lpid);
435 asm volatile("eieio; tlbsync; ptesync": : :"memory");
438 static inline void _tlbie_va_range(unsigned long start, unsigned long end,
439 unsigned long pid, unsigned long page_size,
440 unsigned long psize, bool also_pwc)
442 asm volatile("ptesync": : :"memory");
444 __tlbie_pid(pid, RIC_FLUSH_PWC);
445 __tlbie_va_range(start, end, pid, page_size, psize);
447 asm volatile("eieio; tlbsync; ptesync": : :"memory");
451 * Base TLB flushing operations:
453 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
454 * - flush_tlb_page(vma, vmaddr) flushes one page
455 * - flush_tlb_range(vma, start, end) flushes a range of pages
456 * - flush_tlb_kernel_range(start, end) flushes kernel pages
458 * - local_* variants of page and mm only apply to the current
461 void radix__local_flush_tlb_mm(struct mm_struct *mm)
466 pid = mm->context.id;
467 if (pid != MMU_NO_CONTEXT)
468 _tlbiel_pid(pid, RIC_FLUSH_TLB);
471 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
474 void radix__local_flush_all_mm(struct mm_struct *mm)
479 pid = mm->context.id;
480 if (pid != MMU_NO_CONTEXT)
481 _tlbiel_pid(pid, RIC_FLUSH_ALL);
484 EXPORT_SYMBOL(radix__local_flush_all_mm);
485 #endif /* CONFIG_SMP */
487 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
493 pid = mm->context.id;
494 if (pid != MMU_NO_CONTEXT)
495 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
499 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
501 #ifdef CONFIG_HUGETLB_PAGE
502 /* need the return fix for nohash.c */
503 if (is_vm_hugetlb_page(vma))
504 return radix__local_flush_hugetlb_page(vma, vmaddr);
506 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
508 EXPORT_SYMBOL(radix__local_flush_tlb_page);
510 static bool mm_is_singlethreaded(struct mm_struct *mm)
512 if (atomic_read(&mm->context.copros) > 0)
514 if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm)
519 static bool mm_needs_flush_escalation(struct mm_struct *mm)
522 * P9 nest MMU has issues with the page walk cache
523 * caching PTEs and not flushing them properly when
524 * RIC = 0 for a PID/LPID invalidate
526 if (atomic_read(&mm->context.copros) > 0)
532 static void do_exit_flush_lazy_tlb(void *arg)
534 struct mm_struct *mm = arg;
535 unsigned long pid = mm->context.id;
537 if (current->mm == mm)
538 return; /* Local CPU */
540 if (current->active_mm == mm) {
542 * Must be a kernel thread because sender is single-threaded.
546 switch_mm(mm, &init_mm, current);
547 current->active_mm = &init_mm;
550 _tlbiel_pid(pid, RIC_FLUSH_ALL);
553 static void exit_flush_lazy_tlbs(struct mm_struct *mm)
556 * Would be nice if this was async so it could be run in
557 * parallel with our local flush, but generic code does not
558 * give a good API for it. Could extend the generic code or
559 * make a special powerpc IPI for flushing TLBs.
560 * For now it's not too performance critical.
562 smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
564 mm_reset_thread_local(mm);
567 void radix__flush_tlb_mm(struct mm_struct *mm)
571 pid = mm->context.id;
572 if (unlikely(pid == MMU_NO_CONTEXT))
577 * Order loads of mm_cpumask vs previous stores to clear ptes before
578 * the invalidate. See barrier in switch_mm_irqs_off
581 if (!mm_is_thread_local(mm)) {
582 if (unlikely(mm_is_singlethreaded(mm))) {
583 exit_flush_lazy_tlbs(mm);
587 if (mm_needs_flush_escalation(mm))
588 _tlbie_pid(pid, RIC_FLUSH_ALL);
590 _tlbie_pid(pid, RIC_FLUSH_TLB);
593 _tlbiel_pid(pid, RIC_FLUSH_TLB);
597 EXPORT_SYMBOL(radix__flush_tlb_mm);
599 static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
603 pid = mm->context.id;
604 if (unlikely(pid == MMU_NO_CONTEXT))
608 smp_mb(); /* see radix__flush_tlb_mm */
609 if (!mm_is_thread_local(mm)) {
610 if (unlikely(mm_is_singlethreaded(mm))) {
612 exit_flush_lazy_tlbs(mm);
616 _tlbie_pid(pid, RIC_FLUSH_ALL);
619 _tlbiel_pid(pid, RIC_FLUSH_ALL);
623 void radix__flush_all_mm(struct mm_struct *mm)
625 __flush_all_mm(mm, false);
627 EXPORT_SYMBOL(radix__flush_all_mm);
629 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
631 tlb->need_flush_all = 1;
633 EXPORT_SYMBOL(radix__flush_tlb_pwc);
635 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
640 pid = mm->context.id;
641 if (unlikely(pid == MMU_NO_CONTEXT))
645 smp_mb(); /* see radix__flush_tlb_mm */
646 if (!mm_is_thread_local(mm)) {
647 if (unlikely(mm_is_singlethreaded(mm))) {
648 exit_flush_lazy_tlbs(mm);
651 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
654 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
659 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
661 #ifdef CONFIG_HUGETLB_PAGE
662 if (is_vm_hugetlb_page(vma))
663 return radix__flush_hugetlb_page(vma, vmaddr);
665 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
667 EXPORT_SYMBOL(radix__flush_tlb_page);
669 #else /* CONFIG_SMP */
670 #define radix__flush_all_mm radix__local_flush_all_mm
671 #endif /* CONFIG_SMP */
673 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
675 _tlbie_pid(0, RIC_FLUSH_ALL);
677 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
679 #define TLB_FLUSH_ALL -1UL
682 * Number of pages above which we invalidate the entire PID rather than
683 * flush individual pages, for local and global flushes respectively.
685 * tlbie goes out to the interconnect and individual ops are more costly.
686 * It also does not iterate over sets like the local tlbiel variant when
687 * invalidating a full PID, so it has a far lower threshold to change from
688 * individual page flushes to full-pid flushes.
690 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
691 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
693 static inline void __radix__flush_tlb_range(struct mm_struct *mm,
694 unsigned long start, unsigned long end,
695 bool flush_all_sizes)
699 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
700 unsigned long page_size = 1UL << page_shift;
701 unsigned long nr_pages = (end - start) >> page_shift;
704 pid = mm->context.id;
705 if (unlikely(pid == MMU_NO_CONTEXT))
709 smp_mb(); /* see radix__flush_tlb_mm */
710 if (!mm_is_thread_local(mm)) {
711 if (unlikely(mm_is_singlethreaded(mm))) {
712 if (end != TLB_FLUSH_ALL) {
713 exit_flush_lazy_tlbs(mm);
718 full = (end == TLB_FLUSH_ALL ||
719 nr_pages > tlb_single_page_flush_ceiling);
723 full = (end == TLB_FLUSH_ALL ||
724 nr_pages > tlb_local_single_page_flush_ceiling);
729 _tlbiel_pid(pid, RIC_FLUSH_TLB);
731 if (mm_needs_flush_escalation(mm))
732 _tlbie_pid(pid, RIC_FLUSH_ALL);
734 _tlbie_pid(pid, RIC_FLUSH_TLB);
737 bool hflush = flush_all_sizes;
738 bool gflush = flush_all_sizes;
739 unsigned long hstart, hend;
740 unsigned long gstart, gend;
742 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
746 hstart = (start + PMD_SIZE - 1) & PMD_MASK;
747 hend = end & PMD_MASK;
753 gstart = (start + PUD_SIZE - 1) & PUD_MASK;
754 gend = end & PUD_MASK;
759 asm volatile("ptesync": : :"memory");
761 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
763 __tlbiel_va_range(hstart, hend, pid,
764 PMD_SIZE, MMU_PAGE_2M);
766 __tlbiel_va_range(gstart, gend, pid,
767 PUD_SIZE, MMU_PAGE_1G);
768 asm volatile("ptesync": : :"memory");
770 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
772 __tlbie_va_range(hstart, hend, pid,
773 PMD_SIZE, MMU_PAGE_2M);
775 __tlbie_va_range(gstart, gend, pid,
776 PUD_SIZE, MMU_PAGE_1G);
778 asm volatile("eieio; tlbsync; ptesync": : :"memory");
784 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
788 #ifdef CONFIG_HUGETLB_PAGE
789 if (is_vm_hugetlb_page(vma))
790 return radix__flush_hugetlb_tlb_range(vma, start, end);
793 __radix__flush_tlb_range(vma->vm_mm, start, end, false);
795 EXPORT_SYMBOL(radix__flush_tlb_range);
797 static int radix_get_mmu_psize(int page_size)
801 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
802 psize = mmu_virtual_psize;
803 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
805 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
813 * Flush partition scoped LPID address translation for all CPUs.
815 void radix__flush_tlb_lpid_page(unsigned int lpid,
817 unsigned long page_size)
819 int psize = radix_get_mmu_psize(page_size);
821 _tlbie_lpid_va(addr, lpid, psize, RIC_FLUSH_TLB);
823 EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid_page);
826 * Flush partition scoped PWC from LPID for all CPUs.
828 void radix__flush_pwc_lpid(unsigned int lpid)
830 _tlbie_lpid(lpid, RIC_FLUSH_PWC);
832 EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
835 * Flush partition scoped translations from LPID (=LPIDR)
837 void radix__flush_tlb_lpid(unsigned int lpid)
839 _tlbie_lpid(lpid, RIC_FLUSH_ALL);
841 EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid);
844 * Flush partition scoped translations from LPID (=LPIDR)
846 void radix__local_flush_tlb_lpid(unsigned int lpid)
848 _tlbiel_lpid(lpid, RIC_FLUSH_ALL);
850 EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid);
853 * Flush process scoped translations from LPID (=LPIDR).
854 * Important difference, the guest normally manages its own translations,
855 * but some cases e.g., vCPU CPU migration require KVM to flush.
857 void radix__local_flush_tlb_lpid_guest(unsigned int lpid)
859 _tlbiel_lpid_guest(lpid, RIC_FLUSH_ALL);
861 EXPORT_SYMBOL_GPL(radix__local_flush_tlb_lpid_guest);
864 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
865 unsigned long end, int psize);
867 void radix__tlb_flush(struct mmu_gather *tlb)
870 struct mm_struct *mm = tlb->mm;
871 int page_size = tlb->page_size;
872 unsigned long start = tlb->start;
873 unsigned long end = tlb->end;
876 * if page size is not something we understand, do a full mm flush
878 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
879 * that flushes the process table entry cache upon process teardown.
880 * See the comment for radix in arch_exit_mmap().
883 __flush_all_mm(mm, true);
884 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
885 } else if (mm_tlb_flush_nested(mm)) {
887 * If there is a concurrent invalidation that is clearing ptes,
888 * then it's possible this invalidation will miss one of those
889 * cleared ptes and miss flushing the TLB. If this invalidate
890 * returns before the other one flushes TLBs, that can result
891 * in it returning while there are still valid TLBs inside the
892 * range to be invalidated.
894 * See mm/memory.c:tlb_finish_mmu() for more details.
896 * The solution to this is ensure the entire range is always
897 * flushed here. The problem for powerpc is that the flushes
898 * are page size specific, so this "forced flush" would not
899 * do the right thing if there are a mix of page sizes in
900 * the range to be invalidated. So use __flush_tlb_range
901 * which invalidates all possible page sizes in the range.
903 * PWC flush probably is not be required because the core code
904 * shouldn't free page tables in this path, but accounting
905 * for the possibility makes us a bit more robust.
907 * need_flush_all is an uncommon case because page table
908 * teardown should be done with exclusive locks held (but
909 * after locks are dropped another invalidate could come
910 * in), it could be optimized further if necessary.
912 if (!tlb->need_flush_all)
913 __radix__flush_tlb_range(mm, start, end, true);
915 radix__flush_all_mm(mm);
917 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
918 if (!tlb->need_flush_all)
919 radix__flush_tlb_mm(mm);
921 radix__flush_all_mm(mm);
923 if (!tlb->need_flush_all)
924 radix__flush_tlb_range_psize(mm, start, end, psize);
926 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
928 tlb->need_flush_all = 0;
931 static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
932 unsigned long start, unsigned long end,
933 int psize, bool also_pwc)
936 unsigned int page_shift = mmu_psize_defs[psize].shift;
937 unsigned long page_size = 1UL << page_shift;
938 unsigned long nr_pages = (end - start) >> page_shift;
941 pid = mm->context.id;
942 if (unlikely(pid == MMU_NO_CONTEXT))
946 smp_mb(); /* see radix__flush_tlb_mm */
947 if (!mm_is_thread_local(mm)) {
948 if (unlikely(mm_is_singlethreaded(mm))) {
949 if (end != TLB_FLUSH_ALL) {
950 exit_flush_lazy_tlbs(mm);
955 full = (end == TLB_FLUSH_ALL ||
956 nr_pages > tlb_single_page_flush_ceiling);
960 full = (end == TLB_FLUSH_ALL ||
961 nr_pages > tlb_local_single_page_flush_ceiling);
966 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
968 if (mm_needs_flush_escalation(mm))
971 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
975 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
977 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
982 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
983 unsigned long end, int psize)
985 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
988 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
989 unsigned long end, int psize)
991 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
994 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
995 void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
997 unsigned long pid, end;
999 pid = mm->context.id;
1000 if (unlikely(pid == MMU_NO_CONTEXT))
1003 /* 4k page size, just blow the world */
1004 if (PAGE_SIZE == 0x1000) {
1005 radix__flush_all_mm(mm);
1009 end = addr + HPAGE_PMD_SIZE;
1011 /* Otherwise first do the PWC, then iterate the pages. */
1013 smp_mb(); /* see radix__flush_tlb_mm */
1014 if (!mm_is_thread_local(mm)) {
1015 if (unlikely(mm_is_singlethreaded(mm))) {
1016 exit_flush_lazy_tlbs(mm);
1019 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1022 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
1027 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1029 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
1030 unsigned long start, unsigned long end)
1032 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
1034 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
1036 void radix__flush_tlb_all(void)
1038 unsigned long rb,prs,r,rs;
1039 unsigned long ric = RIC_FLUSH_ALL;
1041 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
1042 prs = 0; /* partition scoped */
1043 r = 1; /* radix format */
1044 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
1046 asm volatile("ptesync": : :"memory");
1048 * now flush guest entries by passing PRS = 1 and LPID != 0
1050 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1051 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
1053 * now flush host entires by passing PRS = 0 and LPID == 0
1055 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1056 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
1057 asm volatile("eieio; tlbsync; ptesync": : :"memory");
1060 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1061 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
1063 unsigned long pid = mm->context.id;
1065 if (unlikely(pid == MMU_NO_CONTEXT))
1069 * If this context hasn't run on that CPU before and KVM is
1070 * around, there's a slim chance that the guest on another
1071 * CPU just brought in obsolete translation into the TLB of
1072 * this CPU due to a bad prefetch using the guest PID on
1073 * the way into the hypervisor.
1075 * We work around this here. If KVM is possible, we check if
1076 * any sibling thread is in KVM. If it is, the window may exist
1077 * and thus we flush that PID from the core.
1079 * A potential future improvement would be to mark which PIDs
1080 * have never been used on the system and avoid it if the PID
1081 * is new and the process has no other cpumask bit set.
1083 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
1084 int cpu = smp_processor_id();
1085 int sib = cpu_first_thread_sibling(cpu);
1088 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
1091 if (!cpu_possible(sib))
1093 if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
1097 _tlbiel_pid(pid, RIC_FLUSH_ALL);
1100 EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
1101 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */