1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ASM_POWERPC_MMU_CONTEXT_H
3 #define __ASM_POWERPC_MMU_CONTEXT_H
6 #include <linux/kernel.h>
8 #include <linux/sched.h>
9 #include <linux/spinlock.h>
11 #include <asm/cputable.h>
12 #include <asm/cputhreads.h>
15 * Most if the context management is out of line
17 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18 extern void destroy_context(struct mm_struct *mm);
19 #ifdef CONFIG_SPAPR_TCE_IOMMU
20 struct mm_iommu_table_group_mem_t;
22 extern int isolate_lru_page(struct page *page); /* from internal.h */
23 extern bool mm_iommu_preregistered(struct mm_struct *mm);
24 extern long mm_iommu_get(struct mm_struct *mm,
25 unsigned long ua, unsigned long entries,
26 struct mm_iommu_table_group_mem_t **pmem);
27 extern long mm_iommu_put(struct mm_struct *mm,
28 struct mm_iommu_table_group_mem_t *mem);
29 extern void mm_iommu_init(struct mm_struct *mm);
30 extern void mm_iommu_cleanup(struct mm_struct *mm);
31 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
32 unsigned long ua, unsigned long size);
33 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
34 struct mm_struct *mm, unsigned long ua, unsigned long size);
35 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
36 unsigned long ua, unsigned long entries);
37 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
38 unsigned long ua, unsigned long *hpa);
39 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
40 unsigned long ua, unsigned long *hpa);
41 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
42 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
44 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
45 extern void set_context(unsigned long id, pgd_t *pgd);
47 #ifdef CONFIG_PPC_BOOK3S_64
48 extern void radix__switch_mmu_context(struct mm_struct *prev,
49 struct mm_struct *next);
50 static inline void switch_mmu_context(struct mm_struct *prev,
51 struct mm_struct *next,
52 struct task_struct *tsk)
55 return radix__switch_mmu_context(prev, next);
56 return switch_slb(tsk, next);
59 extern int hash__alloc_context_id(void);
60 extern void hash__reserve_context_id(int id);
61 extern void __destroy_context(int context_id);
62 static inline void mmu_context_init(void) { }
64 static inline int alloc_extended_context(struct mm_struct *mm,
69 int index = ea >> MAX_EA_BITS_PER_CONTEXT;
71 context_id = hash__alloc_context_id();
75 VM_WARN_ON(mm->context.extended_id[index]);
76 mm->context.extended_id[index] = context_id;
80 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
84 context_id = get_ea_context(&mm->context, ea);
91 extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
92 struct task_struct *tsk);
93 extern unsigned long __init_new_context(void);
94 extern void __destroy_context(unsigned long context_id);
95 extern void mmu_context_init(void);
96 static inline int alloc_extended_context(struct mm_struct *mm,
99 /* non book3s_64 should never find this called */
104 static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
110 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
111 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
113 static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
116 extern void switch_cop(struct mm_struct *next);
117 extern int use_cop(unsigned long acop, struct mm_struct *mm);
118 extern void drop_cop(unsigned long acop, struct mm_struct *mm);
120 #ifdef CONFIG_PPC_BOOK3S_64
121 static inline void inc_mm_active_cpus(struct mm_struct *mm)
123 atomic_inc(&mm->context.active_cpus);
126 static inline void dec_mm_active_cpus(struct mm_struct *mm)
128 atomic_dec(&mm->context.active_cpus);
131 static inline void mm_context_add_copro(struct mm_struct *mm)
134 * If any copro is in use, increment the active CPU count
135 * in order to force TLB invalidations to be global as to
136 * propagate to the Nest MMU.
138 if (atomic_inc_return(&mm->context.copros) == 1)
139 inc_mm_active_cpus(mm);
142 static inline void mm_context_remove_copro(struct mm_struct *mm)
146 c = atomic_dec_if_positive(&mm->context.copros);
148 /* Detect imbalance between add and remove */
152 * Need to broadcast a global flush of the full mm before
153 * decrementing active_cpus count, as the next TLBI may be
154 * local and the nMMU and/or PSL need to be cleaned up.
155 * Should be rare enough so that it's acceptable.
157 * Skip on hash, as we don't know how to do the proper flush
158 * for the time being. Invalidations will remain global if
161 if (c == 0 && radix_enabled()) {
163 dec_mm_active_cpus(mm);
167 static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
168 static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
169 static inline void mm_context_add_copro(struct mm_struct *mm) { }
170 static inline void mm_context_remove_copro(struct mm_struct *mm) { }
174 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
175 struct task_struct *tsk);
177 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
178 struct task_struct *tsk)
182 local_irq_save(flags);
183 switch_mm_irqs_off(prev, next, tsk);
184 local_irq_restore(flags);
186 #define switch_mm_irqs_off switch_mm_irqs_off
189 #define deactivate_mm(tsk,mm) do { } while (0)
192 * After we have set current->mm to a new value, this activates
193 * the context for the new mm so we see the new mappings.
195 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
197 switch_mm(prev, next, current);
200 /* We don't currently use enter_lazy_tlb() for anything */
201 static inline void enter_lazy_tlb(struct mm_struct *mm,
202 struct task_struct *tsk)
204 /* 64-bit Book3E keeps track of current PGD in the PACA */
205 #ifdef CONFIG_PPC_BOOK3E_64
206 get_paca()->pgd = NULL;
210 static inline int arch_dup_mmap(struct mm_struct *oldmm,
211 struct mm_struct *mm)
216 #ifndef CONFIG_PPC_BOOK3S_64
217 static inline void arch_exit_mmap(struct mm_struct *mm)
221 extern void arch_exit_mmap(struct mm_struct *mm);
224 static inline void arch_unmap(struct mm_struct *mm,
225 struct vm_area_struct *vma,
226 unsigned long start, unsigned long end)
228 if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
229 mm->context.vdso_base = 0;
232 static inline void arch_bprm_mm_init(struct mm_struct *mm,
233 struct vm_area_struct *vma)
237 #ifdef CONFIG_PPC_MEM_KEYS
238 bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
239 bool execute, bool foreign);
240 #else /* CONFIG_PPC_MEM_KEYS */
241 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
242 bool write, bool execute, bool foreign)
244 /* by default, allow everything */
248 #define pkey_mm_init(mm)
249 #define thread_pkey_regs_save(thread)
250 #define thread_pkey_regs_restore(new_thread, old_thread)
251 #define thread_pkey_regs_init(thread)
253 static inline int vma_pkey(struct vm_area_struct *vma)
258 static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
263 #endif /* CONFIG_PPC_MEM_KEYS */
265 #endif /* __KERNEL__ */
266 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */