Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifdef __KERNEL__ |
2 | #ifndef __PPC_MMU_CONTEXT_H | |
3 | #define __PPC_MMU_CONTEXT_H | |
4 | ||
1977f032 JS |
5 | #include <linux/bitops.h> |
6 | ||
1da177e4 | 7 | #include <asm/atomic.h> |
1da177e4 LT |
8 | #include <asm/mmu.h> |
9 | #include <asm/cputable.h> | |
d6dd61c8 | 10 | #include <asm-generic/mm_hooks.h> |
1da177e4 LT |
11 | |
12 | /* | |
13 | * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs | |
14 | * (virtual segment identifiers) for each context. Although the | |
15 | * hardware supports 24-bit VSIDs, and thus >1 million contexts, | |
16 | * we only use 32,768 of them. That is ample, since there can be | |
17 | * at most around 30,000 tasks in the system anyway, and it means | |
18 | * that we can use a bitmap to indicate which contexts are in use. | |
19 | * Using a bitmap means that we entirely avoid all of the problems | |
20 | * that we used to have when the context number overflowed, | |
21 | * particularly on SMP systems. | |
22 | * -- paulus. | |
23 | */ | |
24 | ||
25 | /* | |
26 | * This function defines the mapping from contexts to VSIDs (virtual | |
27 | * segment IDs). We use a skew on both the context and the high 4 bits | |
28 | * of the 32-bit virtual address (the "effective segment ID") in order | |
29 | * to spread out the entries in the MMU hash table. Note, if this | |
30 | * function is changed then arch/ppc/mm/hashtable.S will have to be | |
31 | * changed to correspond. | |
32 | */ | |
33 | #define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ | |
34 | & 0xffffff) | |
35 | ||
36 | /* | |
37 | The MPC8xx has only 16 contexts. We rotate through them on each | |
38 | task switch. A better way would be to keep track of tasks that | |
39 | own contexts, and implement an LRU usage. That way very active | |
40 | tasks don't always have to pay the TLB reload overhead. The | |
41 | kernel pages are mapped shared, so the kernel can run on behalf | |
42 | of any task that makes a kernel entry. Shared does not mean they | |
43 | are not protected, just that the ASID comparison is not performed. | |
44 | -- Dan | |
45 | ||
46 | The IBM4xx has 256 contexts, so we can just rotate through these | |
47 | as a way of "switching" contexts. If the TID of the TLB is zero, | |
48 | the PID/TID comparison is disabled, so we can use a TID of zero | |
49 | to represent all kernel pages as shared among all contexts. | |
50 | -- Dan | |
51 | */ | |
52 | ||
53 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
54 | { | |
55 | } | |
56 | ||
57 | #ifdef CONFIG_8xx | |
58 | #define NO_CONTEXT 16 | |
59 | #define LAST_CONTEXT 15 | |
60 | #define FIRST_CONTEXT 0 | |
61 | ||
62 | #elif defined(CONFIG_4xx) | |
63 | #define NO_CONTEXT 256 | |
64 | #define LAST_CONTEXT 255 | |
65 | #define FIRST_CONTEXT 1 | |
66 | ||
33d9e9b5 | 67 | #elif defined(CONFIG_E200) || defined(CONFIG_E500) |
1da177e4 LT |
68 | #define NO_CONTEXT 256 |
69 | #define LAST_CONTEXT 255 | |
70 | #define FIRST_CONTEXT 1 | |
71 | ||
72 | #else | |
73 | ||
74 | /* PPC 6xx, 7xx CPUs */ | |
6218a761 | 75 | #define NO_CONTEXT ((unsigned long) -1) |
1da177e4 LT |
76 | #define LAST_CONTEXT 32767 |
77 | #define FIRST_CONTEXT 1 | |
78 | #endif | |
79 | ||
80 | /* | |
81 | * Set the current MMU context. | |
82 | * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by | |
83 | * loading up the segment registers for the user part of the address space. | |
84 | * | |
85 | * Since the PGD is immediately available, it is much faster to simply | |
86 | * pass this along as a second parameter, which is required for 8xx and | |
87 | * can be used for debugging on all processors (if you happen to have | |
88 | * an Abatron). | |
89 | */ | |
6218a761 | 90 | extern void set_context(unsigned long contextid, pgd_t *pgd); |
1da177e4 LT |
91 | |
92 | /* | |
93 | * Bitmap of contexts in use. | |
94 | * The size of this bitmap is LAST_CONTEXT + 1 bits. | |
95 | */ | |
96 | extern unsigned long context_map[]; | |
97 | ||
98 | /* | |
99 | * This caches the next context number that we expect to be free. | |
100 | * Its use is an optimization only, we can't rely on this context | |
101 | * number to be free, but it usually will be. | |
102 | */ | |
6218a761 | 103 | extern unsigned long next_mmu_context; |
1da177e4 LT |
104 | |
105 | /* | |
106 | * If we don't have sufficient contexts to give one to every task | |
107 | * that could be in the system, we need to be able to steal contexts. | |
108 | * These variables support that. | |
109 | */ | |
110 | #if LAST_CONTEXT < 30000 | |
111 | #define FEW_CONTEXTS 1 | |
112 | extern atomic_t nr_free_contexts; | |
113 | extern struct mm_struct *context_mm[LAST_CONTEXT+1]; | |
114 | extern void steal_context(void); | |
115 | #endif | |
116 | ||
117 | /* | |
118 | * Get a new mmu context for the address space described by `mm'. | |
119 | */ | |
120 | static inline void get_mmu_context(struct mm_struct *mm) | |
121 | { | |
6218a761 | 122 | unsigned long ctx; |
1da177e4 | 123 | |
6218a761 | 124 | if (mm->context.id != NO_CONTEXT) |
1da177e4 LT |
125 | return; |
126 | #ifdef FEW_CONTEXTS | |
127 | while (atomic_dec_if_positive(&nr_free_contexts) < 0) | |
128 | steal_context(); | |
129 | #endif | |
130 | ctx = next_mmu_context; | |
131 | while (test_and_set_bit(ctx, context_map)) { | |
132 | ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); | |
133 | if (ctx > LAST_CONTEXT) | |
134 | ctx = 0; | |
135 | } | |
136 | next_mmu_context = (ctx + 1) & LAST_CONTEXT; | |
6218a761 | 137 | mm->context.id = ctx; |
1da177e4 LT |
138 | #ifdef FEW_CONTEXTS |
139 | context_mm[ctx] = mm; | |
140 | #endif | |
141 | } | |
142 | ||
143 | /* | |
144 | * Set up the context for a new address space. | |
145 | */ | |
6218a761 PM |
146 | static inline int init_new_context(struct task_struct *t, struct mm_struct *mm) |
147 | { | |
148 | mm->context.id = NO_CONTEXT; | |
149 | mm->context.vdso_base = 0; | |
150 | return 0; | |
151 | } | |
1da177e4 LT |
152 | |
153 | /* | |
154 | * We're finished using the context for an address space. | |
155 | */ | |
156 | static inline void destroy_context(struct mm_struct *mm) | |
157 | { | |
ddca3b80 | 158 | preempt_disable(); |
6218a761 PM |
159 | if (mm->context.id != NO_CONTEXT) { |
160 | clear_bit(mm->context.id, context_map); | |
161 | mm->context.id = NO_CONTEXT; | |
1da177e4 LT |
162 | #ifdef FEW_CONTEXTS |
163 | atomic_inc(&nr_free_contexts); | |
164 | #endif | |
165 | } | |
ddca3b80 | 166 | preempt_enable(); |
1da177e4 LT |
167 | } |
168 | ||
169 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
170 | struct task_struct *tsk) | |
171 | { | |
172 | #ifdef CONFIG_ALTIVEC | |
10b35d99 KG |
173 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
174 | asm volatile ("dssall;\n" | |
1da177e4 LT |
175 | #ifndef CONFIG_POWER4 |
176 | "sync;\n" /* G4 needs a sync here, G5 apparently not */ | |
177 | #endif | |
1da177e4 LT |
178 | : : ); |
179 | #endif /* CONFIG_ALTIVEC */ | |
180 | ||
181 | tsk->thread.pgdir = next->pgd; | |
182 | ||
183 | /* No need to flush userspace segments if the mm doesnt change */ | |
184 | if (prev == next) | |
185 | return; | |
186 | ||
187 | /* Setup new userspace context */ | |
188 | get_mmu_context(next); | |
6218a761 | 189 | set_context(next->context.id, next->pgd); |
1da177e4 LT |
190 | } |
191 | ||
192 | #define deactivate_mm(tsk,mm) do { } while (0) | |
193 | ||
194 | /* | |
195 | * After we have set current->mm to a new value, this activates | |
196 | * the context for the new mm so we see the new mappings. | |
197 | */ | |
198 | #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current) | |
199 | ||
200 | extern void mmu_context_init(void); | |
201 | ||
202 | #endif /* __PPC_MMU_CONTEXT_H */ | |
203 | #endif /* __KERNEL__ */ |