Commit | Line | Data |
---|---|---|
047ea784 PM |
1 | #ifndef __ASM_POWERPC_MMU_CONTEXT_H |
2 | #define __ASM_POWERPC_MMU_CONTEXT_H | |
88ced031 | 3 | #ifdef __KERNEL__ |
047ea784 | 4 | |
5e696617 BH |
5 | #include <linux/kernel.h> |
6 | #include <linux/mm.h> | |
7 | #include <linux/sched.h> | |
8 | #include <linux/spinlock.h> | |
80a7cc6c KG |
9 | #include <asm/mmu.h> |
10 | #include <asm/cputable.h> | |
5e696617 | 11 | #include <asm/cputhreads.h> |
80a7cc6c KG |
12 | |
13 | /* | |
5e696617 | 14 | * Most if the context management is out of line |
80a7cc6c | 15 | */ |
1da177e4 LT |
16 | extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
17 | extern void destroy_context(struct mm_struct *mm); | |
15b244a8 AK |
18 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
19 | struct mm_iommu_table_group_mem_t; | |
20 | ||
21 | extern bool mm_iommu_preregistered(void); | |
22 | extern long mm_iommu_get(unsigned long ua, unsigned long entries, | |
23 | struct mm_iommu_table_group_mem_t **pmem); | |
24 | extern long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem); | |
25 | extern void mm_iommu_init(mm_context_t *ctx); | |
26 | extern void mm_iommu_cleanup(mm_context_t *ctx); | |
27 | extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua, | |
28 | unsigned long size); | |
29 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua, | |
30 | unsigned long entries); | |
31 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, | |
32 | unsigned long ua, unsigned long *hpa); | |
33 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); | |
34 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); | |
35 | #endif | |
1da177e4 | 36 | extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm); |
5e696617 | 37 | extern void set_context(unsigned long id, pgd_t *pgd); |
1da177e4 | 38 | |
6f0ef0f5 | 39 | #ifdef CONFIG_PPC_BOOK3S_64 |
7e381c0f AK |
40 | extern void radix__switch_mmu_context(struct mm_struct *prev, |
41 | struct mm_struct *next); | |
d2adba3f AK |
42 | static inline void switch_mmu_context(struct mm_struct *prev, |
43 | struct mm_struct *next, | |
44 | struct task_struct *tsk) | |
45 | { | |
7e381c0f AK |
46 | if (radix_enabled()) |
47 | return radix__switch_mmu_context(prev, next); | |
d2adba3f AK |
48 | return switch_slb(tsk, next); |
49 | } | |
50 | ||
e85a4710 AG |
51 | extern int __init_new_context(void); |
52 | extern void __destroy_context(int context_id); | |
6f0ef0f5 BH |
53 | static inline void mmu_context_init(void) { } |
54 | #else | |
d2adba3f AK |
55 | extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, |
56 | struct task_struct *tsk); | |
c83ec269 AG |
57 | extern unsigned long __init_new_context(void); |
58 | extern void __destroy_context(unsigned long context_id); | |
6f0ef0f5 BH |
59 | extern void mmu_context_init(void); |
60 | #endif | |
61 | ||
851d2e2f THFL |
62 | extern void switch_cop(struct mm_struct *next); |
63 | extern int use_cop(unsigned long acop, struct mm_struct *mm); | |
64 | extern void drop_cop(unsigned long acop, struct mm_struct *mm); | |
65 | ||
1da177e4 LT |
66 | /* |
67 | * switch_mm is the entry point called from the architecture independent | |
0a0fca9d | 68 | * code in kernel/sched/core.c |
1da177e4 LT |
69 | */ |
70 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
71 | struct task_struct *tsk) | |
72 | { | |
5e696617 | 73 | /* Mark this context has been used on the new CPU */ |
56aa4129 | 74 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); |
5e696617 BH |
75 | |
76 | /* 32-bit keeps track of the current PGDIR in the thread struct */ | |
77 | #ifdef CONFIG_PPC32 | |
78 | tsk->thread.pgdir = next->pgd; | |
79 | #endif /* CONFIG_PPC32 */ | |
1da177e4 | 80 | |
25d21ad6 BH |
81 | /* 64-bit Book3E keeps track of current PGD in the PACA */ |
82 | #ifdef CONFIG_PPC_BOOK3E_64 | |
83 | get_paca()->pgd = next->pgd; | |
84 | #endif | |
5e696617 | 85 | /* Nothing else to do if we aren't actually switching */ |
1da177e4 LT |
86 | if (prev == next) |
87 | return; | |
88 | ||
851d2e2f THFL |
89 | #ifdef CONFIG_PPC_ICSWX |
90 | /* Switch coprocessor context only if prev or next uses a coprocessor */ | |
91 | if (prev->context.acop || next->context.acop) | |
92 | switch_cop(next); | |
93 | #endif /* CONFIG_PPC_ICSWX */ | |
94 | ||
5e696617 BH |
95 | /* We must stop all altivec streams before changing the HW |
96 | * context | |
97 | */ | |
1da177e4 LT |
98 | #ifdef CONFIG_ALTIVEC |
99 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
100 | asm volatile ("dssall"); | |
101 | #endif /* CONFIG_ALTIVEC */ | |
d2adba3f AK |
102 | /* |
103 | * The actual HW switching method differs between the various | |
104 | * sub architectures. Out of line for now | |
5e696617 | 105 | */ |
d2adba3f | 106 | switch_mmu_context(prev, next, tsk); |
1da177e4 LT |
107 | } |
108 | ||
109 | #define deactivate_mm(tsk,mm) do { } while (0) | |
110 | ||
111 | /* | |
112 | * After we have set current->mm to a new value, this activates | |
113 | * the context for the new mm so we see the new mappings. | |
114 | */ | |
115 | static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) | |
116 | { | |
117 | unsigned long flags; | |
118 | ||
119 | local_irq_save(flags); | |
120 | switch_mm(prev, next, current); | |
121 | local_irq_restore(flags); | |
122 | } | |
123 | ||
5e696617 BH |
124 | /* We don't currently use enter_lazy_tlb() for anything */ |
125 | static inline void enter_lazy_tlb(struct mm_struct *mm, | |
126 | struct task_struct *tsk) | |
127 | { | |
25d21ad6 BH |
128 | /* 64-bit Book3E keeps track of current PGD in the PACA */ |
129 | #ifdef CONFIG_PPC_BOOK3E_64 | |
130 | get_paca()->pgd = NULL; | |
131 | #endif | |
5e696617 BH |
132 | } |
133 | ||
83d3f0e9 LD |
134 | static inline void arch_dup_mmap(struct mm_struct *oldmm, |
135 | struct mm_struct *mm) | |
136 | { | |
137 | } | |
138 | ||
139 | static inline void arch_exit_mmap(struct mm_struct *mm) | |
140 | { | |
141 | } | |
142 | ||
143 | static inline void arch_unmap(struct mm_struct *mm, | |
144 | struct vm_area_struct *vma, | |
145 | unsigned long start, unsigned long end) | |
146 | { | |
147 | if (start <= mm->context.vdso_base && mm->context.vdso_base < end) | |
148 | mm->context.vdso_base = 0; | |
149 | } | |
150 | ||
151 | static inline void arch_bprm_mm_init(struct mm_struct *mm, | |
152 | struct vm_area_struct *vma) | |
153 | { | |
154 | } | |
155 | ||
1b2ee126 | 156 | static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, |
d61172b4 | 157 | bool write, bool execute, bool foreign) |
33a709b2 DH |
158 | { |
159 | /* by default, allow everything */ | |
160 | return true; | |
161 | } | |
162 | ||
163 | static inline bool arch_pte_access_permitted(pte_t pte, bool write) | |
164 | { | |
165 | /* by default, allow everything */ | |
166 | return true; | |
167 | } | |
88ced031 | 168 | #endif /* __KERNEL__ */ |
047ea784 | 169 | #endif /* __ASM_POWERPC_MMU_CONTEXT_H */ |