Commit | Line | Data |
---|---|---|
5e696617 BH |
1 | /* |
2 | * This file contains the routines for handling the MMU on those | |
3 | * PowerPC implementations where the MMU is not using the hash | |
4 | * table, such as 8xx, 4xx, BookE's etc... | |
5 | * | |
6 | * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> | |
7 | * IBM Corp. | |
8 | * | |
9 | * Derived from previous arch/powerpc/mm/mmu_context.c | |
10 | * and arch/powerpc/include/asm/mmu_context.h | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | * | |
2ca8cf73 BH |
17 | * TODO: |
18 | * | |
19 | * - The global context lock will not scale very well | |
20 | * - The maps should be dynamically allocated to allow for processors | |
21 | * that support more PID bits at runtime | |
22 | * - Implement flush_tlb_mm() by making the context stale and picking | |
23 | * a new one | |
24 | * - More aggressively clear stale map bits and maybe find some way to | |
25 | * also clear mm->cpu_vm_mask bits when processes are migrated | |
5e696617 BH |
26 | */ |
27 | ||
f1167fb3 BH |
28 | //#define DEBUG_MAP_CONSISTENCY |
29 | //#define DEBUG_CLAMP_LAST_CONTEXT 31 | |
fcce8109 BH |
30 | //#define DEBUG_HARDER |
31 | ||
32 | /* We don't use DEBUG because it tends to be compiled in always nowadays | |
33 | * and this would generate way too much output | |
34 | */ | |
35 | #ifdef DEBUG_HARDER | |
36 | #define pr_hard(args...) printk(KERN_DEBUG args) | |
37 | #define pr_hardcont(args...) printk(KERN_CONT args) | |
38 | #else | |
39 | #define pr_hard(args...) do { } while(0) | |
40 | #define pr_hardcont(args...) do { } while(0) | |
41 | #endif | |
2ca8cf73 BH |
42 | |
43 | #include <linux/kernel.h> | |
5e696617 BH |
44 | #include <linux/mm.h> |
45 | #include <linux/init.h> | |
77520351 BH |
46 | #include <linux/spinlock.h> |
47 | #include <linux/bootmem.h> | |
48 | #include <linux/notifier.h> | |
49 | #include <linux/cpu.h> | |
5a0e3ad6 | 50 | #include <linux/slab.h> |
5e696617 BH |
51 | |
52 | #include <asm/mmu_context.h> | |
53 | #include <asm/tlbflush.h> | |
5e696617 | 54 | |
debddd95 LC |
55 | #include "mmu_decl.h" |
56 | ||
77520351 | 57 | static unsigned int first_context, last_context; |
2ca8cf73 | 58 | static unsigned int next_context, nr_free_contexts; |
77520351 BH |
59 | static unsigned long *context_map; |
60 | static unsigned long *stale_map[NR_CPUS]; | |
61 | static struct mm_struct **context_mm; | |
be833f33 | 62 | static DEFINE_RAW_SPINLOCK(context_lock); |
debddd95 | 63 | static bool no_selective_tlbil; |
5e696617 | 64 | |
77520351 BH |
65 | #define CTX_MAP_SIZE \ |
66 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) | |
67 | ||
68 | ||
5e696617 | 69 | /* Steal a context from a task that has one at the moment. |
2ca8cf73 BH |
70 | * |
71 | * This is used when we are running out of available PID numbers | |
72 | * on the processors. | |
73 | * | |
5e696617 BH |
74 | * This isn't an LRU system, it just frees up each context in |
75 | * turn (sort-of pseudo-random replacement :). This would be the | |
76 | * place to implement an LRU scheme if anyone was motivated to do it. | |
77 | * -- paulus | |
2ca8cf73 BH |
78 | * |
79 | * For context stealing, we use a slightly different approach for | |
80 | * SMP and UP. Basically, the UP one is simpler and doesn't use | |
81 | * the stale map as we can just flush the local CPU | |
82 | * -- benh | |
5e696617 | 83 | */ |
2ca8cf73 BH |
84 | #ifdef CONFIG_SMP |
85 | static unsigned int steal_context_smp(unsigned int id) | |
5e696617 BH |
86 | { |
87 | struct mm_struct *mm; | |
fcce8109 | 88 | unsigned int cpu, max, i; |
5e696617 | 89 | |
77520351 | 90 | max = last_context - first_context; |
5e696617 | 91 | |
2ca8cf73 BH |
92 | /* Attempt to free next_context first and then loop until we manage */ |
93 | while (max--) { | |
94 | /* Pick up the victim mm */ | |
95 | mm = context_mm[id]; | |
5e696617 | 96 | |
2ca8cf73 BH |
97 | /* We have a candidate victim, check if it's active, on SMP |
98 | * we cannot steal active contexts | |
99 | */ | |
100 | if (mm->context.active) { | |
101 | id++; | |
77520351 BH |
102 | if (id > last_context) |
103 | id = first_context; | |
2ca8cf73 BH |
104 | continue; |
105 | } | |
fcce8109 | 106 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
2ca8cf73 BH |
107 | |
108 | /* Mark this mm has having no context anymore */ | |
109 | mm->context.id = MMU_NO_CONTEXT; | |
110 | ||
fcce8109 BH |
111 | /* Mark it stale on all CPUs that used this mm. For threaded |
112 | * implementations, we set it on all threads on each core | |
113 | * represented in the mask. A future implementation will use | |
114 | * a core map instead but this will do for now. | |
115 | */ | |
116 | for_each_cpu(cpu, mm_cpumask(mm)) { | |
99d86705 | 117 | for (i = cpu_first_thread_sibling(cpu); |
39a421ff SW |
118 | i <= cpu_last_thread_sibling(cpu); i++) { |
119 | if (stale_map[i]) | |
120 | __set_bit(id, stale_map[i]); | |
121 | } | |
fcce8109 BH |
122 | cpu = i - 1; |
123 | } | |
2ca8cf73 BH |
124 | return id; |
125 | } | |
126 | ||
127 | /* This will happen if you have more CPUs than available contexts, | |
128 | * all we can do here is wait a bit and try again | |
129 | */ | |
be833f33 | 130 | raw_spin_unlock(&context_lock); |
2ca8cf73 | 131 | cpu_relax(); |
be833f33 | 132 | raw_spin_lock(&context_lock); |
3035c863 BH |
133 | |
134 | /* This will cause the caller to try again */ | |
135 | return MMU_NO_CONTEXT; | |
2ca8cf73 BH |
136 | } |
137 | #endif /* CONFIG_SMP */ | |
138 | ||
debddd95 LC |
139 | static unsigned int steal_all_contexts(void) |
140 | { | |
141 | struct mm_struct *mm; | |
142 | int cpu = smp_processor_id(); | |
143 | unsigned int id; | |
144 | ||
145 | for (id = first_context; id <= last_context; id++) { | |
146 | /* Pick up the victim mm */ | |
147 | mm = context_mm[id]; | |
148 | ||
149 | pr_hardcont(" | steal %d from 0x%p", id, mm); | |
150 | ||
151 | /* Mark this mm as having no context anymore */ | |
152 | mm->context.id = MMU_NO_CONTEXT; | |
153 | if (id != first_context) { | |
154 | context_mm[id] = NULL; | |
155 | __clear_bit(id, context_map); | |
156 | #ifdef DEBUG_MAP_CONSISTENCY | |
157 | mm->context.active = 0; | |
158 | #endif | |
159 | } | |
160 | __clear_bit(id, stale_map[cpu]); | |
161 | } | |
162 | ||
163 | /* Flush the TLB for all contexts (not to be used on SMP) */ | |
164 | _tlbil_all(); | |
165 | ||
166 | nr_free_contexts = last_context - first_context; | |
167 | ||
168 | return first_context; | |
169 | } | |
170 | ||
2ca8cf73 BH |
171 | /* Note that this will also be called on SMP if all other CPUs are |
172 | * offlined, which means that it may be called for cpu != 0. For | |
173 | * this to work, we somewhat assume that CPUs that are onlined | |
174 | * come up with a fully clean TLB (or are cleaned when offlined) | |
5e696617 | 175 | */ |
2ca8cf73 | 176 | static unsigned int steal_context_up(unsigned int id) |
5e696617 | 177 | { |
2ca8cf73 BH |
178 | struct mm_struct *mm; |
179 | int cpu = smp_processor_id(); | |
5e696617 | 180 | |
2ca8cf73 BH |
181 | /* Pick up the victim mm */ |
182 | mm = context_mm[id]; | |
183 | ||
fcce8109 | 184 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
5e696617 | 185 | |
2ca8cf73 BH |
186 | /* Flush the TLB for that context */ |
187 | local_flush_tlb_mm(mm); | |
188 | ||
8e35961b HS |
189 | /* Mark this mm has having no context anymore */ |
190 | mm->context.id = MMU_NO_CONTEXT; | |
191 | ||
2ca8cf73 BH |
192 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
193 | __clear_bit(id, stale_map[cpu]); | |
194 | ||
195 | return id; | |
196 | } | |
197 | ||
198 | #ifdef DEBUG_MAP_CONSISTENCY | |
199 | static void context_check_map(void) | |
200 | { | |
201 | unsigned int id, nrf, nact; | |
202 | ||
203 | nrf = nact = 0; | |
77520351 | 204 | for (id = first_context; id <= last_context; id++) { |
2ca8cf73 BH |
205 | int used = test_bit(id, context_map); |
206 | if (!used) | |
207 | nrf++; | |
208 | if (used != (context_mm[id] != NULL)) | |
209 | pr_err("MMU: Context %d is %s and MM is %p !\n", | |
210 | id, used ? "used" : "free", context_mm[id]); | |
211 | if (context_mm[id] != NULL) | |
212 | nact += context_mm[id]->context.active; | |
5e696617 | 213 | } |
2ca8cf73 BH |
214 | if (nrf != nr_free_contexts) { |
215 | pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", | |
216 | nr_free_contexts, nrf); | |
217 | nr_free_contexts = nrf; | |
218 | } | |
219 | if (nact > num_online_cpus()) | |
220 | pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", | |
221 | nact, num_online_cpus()); | |
77520351 BH |
222 | if (first_context > 0 && !test_bit(0, context_map)) |
223 | pr_err("MMU: Context 0 has been freed !!!\n"); | |
5e696617 | 224 | } |
2ca8cf73 BH |
225 | #else |
226 | static void context_check_map(void) { } | |
227 | #endif | |
5e696617 | 228 | |
d2adba3f AK |
229 | void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, |
230 | struct task_struct *tsk) | |
5e696617 | 231 | { |
67050b5c | 232 | unsigned int i, id, cpu = smp_processor_id(); |
2ca8cf73 | 233 | unsigned long *map; |
5e696617 | 234 | |
2ca8cf73 | 235 | /* No lockless fast path .. yet */ |
be833f33 | 236 | raw_spin_lock(&context_lock); |
2ca8cf73 | 237 | |
fcce8109 BH |
238 | pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", |
239 | cpu, next, next->context.active, next->context.id); | |
2ca8cf73 BH |
240 | |
241 | #ifdef CONFIG_SMP | |
242 | /* Mark us active and the previous one not anymore */ | |
243 | next->context.active++; | |
244 | if (prev) { | |
fcce8109 | 245 | pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); |
2ca8cf73 BH |
246 | WARN_ON(prev->context.active < 1); |
247 | prev->context.active--; | |
248 | } | |
3035c863 BH |
249 | |
250 | again: | |
2ca8cf73 BH |
251 | #endif /* CONFIG_SMP */ |
252 | ||
253 | /* If we already have a valid assigned context, skip all that */ | |
254 | id = next->context.id; | |
fcce8109 BH |
255 | if (likely(id != MMU_NO_CONTEXT)) { |
256 | #ifdef DEBUG_MAP_CONSISTENCY | |
257 | if (context_mm[id] != next) | |
258 | pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", | |
259 | next, id, id, context_mm[id]); | |
260 | #endif | |
2ca8cf73 | 261 | goto ctxt_ok; |
fcce8109 | 262 | } |
2ca8cf73 BH |
263 | |
264 | /* We really don't have a context, let's try to acquire one */ | |
265 | id = next_context; | |
77520351 BH |
266 | if (id > last_context) |
267 | id = first_context; | |
2ca8cf73 BH |
268 | map = context_map; |
269 | ||
270 | /* No more free contexts, let's try to steal one */ | |
271 | if (nr_free_contexts == 0) { | |
272 | #ifdef CONFIG_SMP | |
273 | if (num_online_cpus() > 1) { | |
274 | id = steal_context_smp(id); | |
3035c863 BH |
275 | if (id == MMU_NO_CONTEXT) |
276 | goto again; | |
5156ddce | 277 | goto stolen; |
2ca8cf73 BH |
278 | } |
279 | #endif /* CONFIG_SMP */ | |
debddd95 LC |
280 | if (no_selective_tlbil) |
281 | id = steal_all_contexts(); | |
282 | else | |
283 | id = steal_context_up(id); | |
2ca8cf73 BH |
284 | goto stolen; |
285 | } | |
286 | nr_free_contexts--; | |
287 | ||
288 | /* We know there's at least one free context, try to find it */ | |
289 | while (__test_and_set_bit(id, map)) { | |
77520351 BH |
290 | id = find_next_zero_bit(map, last_context+1, id); |
291 | if (id > last_context) | |
292 | id = first_context; | |
2ca8cf73 BH |
293 | } |
294 | stolen: | |
295 | next_context = id + 1; | |
296 | context_mm[id] = next; | |
297 | next->context.id = id; | |
fcce8109 | 298 | pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); |
2ca8cf73 BH |
299 | |
300 | context_check_map(); | |
301 | ctxt_ok: | |
302 | ||
303 | /* If that context got marked stale on this CPU, then flush the | |
304 | * local TLB for it and unmark it before we use it | |
305 | */ | |
306 | if (test_bit(id, stale_map[cpu])) { | |
fcce8109 | 307 | pr_hardcont(" | stale flush %d [%d..%d]", |
99d86705 VS |
308 | id, cpu_first_thread_sibling(cpu), |
309 | cpu_last_thread_sibling(cpu)); | |
fcce8109 | 310 | |
2ca8cf73 BH |
311 | local_flush_tlb_mm(next); |
312 | ||
313 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ | |
99d86705 VS |
314 | for (i = cpu_first_thread_sibling(cpu); |
315 | i <= cpu_last_thread_sibling(cpu); i++) { | |
39a421ff SW |
316 | if (stale_map[i]) |
317 | __clear_bit(id, stale_map[i]); | |
67050b5c | 318 | } |
2ca8cf73 BH |
319 | } |
320 | ||
321 | /* Flick the MMU and release lock */ | |
fcce8109 | 322 | pr_hardcont(" -> %d\n", id); |
2ca8cf73 | 323 | set_context(id, next->pgd); |
be833f33 | 324 | raw_spin_unlock(&context_lock); |
5e696617 BH |
325 | } |
326 | ||
327 | /* | |
328 | * Set up the context for a new address space. | |
329 | */ | |
330 | int init_new_context(struct task_struct *t, struct mm_struct *mm) | |
331 | { | |
fcce8109 BH |
332 | pr_hard("initing context for mm @%p\n", mm); |
333 | ||
2ca8cf73 BH |
334 | mm->context.id = MMU_NO_CONTEXT; |
335 | mm->context.active = 0; | |
336 | ||
41151e77 | 337 | #ifdef CONFIG_PPC_MM_SLICES |
62ccf5bf | 338 | slice_set_user_psize(mm, mmu_virtual_psize); |
41151e77 BB |
339 | #endif |
340 | ||
5e696617 BH |
341 | return 0; |
342 | } | |
343 | ||
344 | /* | |
345 | * We're finished using the context for an address space. | |
346 | */ | |
347 | void destroy_context(struct mm_struct *mm) | |
348 | { | |
b46b6942 | 349 | unsigned long flags; |
2ca8cf73 BH |
350 | unsigned int id; |
351 | ||
352 | if (mm->context.id == MMU_NO_CONTEXT) | |
353 | return; | |
354 | ||
355 | WARN_ON(mm->context.active != 0); | |
356 | ||
be833f33 | 357 | raw_spin_lock_irqsave(&context_lock, flags); |
2ca8cf73 BH |
358 | id = mm->context.id; |
359 | if (id != MMU_NO_CONTEXT) { | |
360 | __clear_bit(id, context_map); | |
361 | mm->context.id = MMU_NO_CONTEXT; | |
362 | #ifdef DEBUG_MAP_CONSISTENCY | |
363 | mm->context.active = 0; | |
2ca8cf73 | 364 | #endif |
3035c863 | 365 | context_mm[id] = NULL; |
2ca8cf73 | 366 | nr_free_contexts++; |
5e696617 | 367 | } |
be833f33 | 368 | raw_spin_unlock_irqrestore(&context_lock, flags); |
5e696617 BH |
369 | } |
370 | ||
77520351 | 371 | #ifdef CONFIG_SMP |
da3ed651 | 372 | static int mmu_ctx_cpu_prepare(unsigned int cpu) |
77520351 | 373 | { |
77520351 BH |
374 | /* We don't touch CPU 0 map, it's allocated at aboot and kept |
375 | * around forever | |
376 | */ | |
0d35e162 | 377 | if (cpu == boot_cpuid) |
da3ed651 SAS |
378 | return 0; |
379 | ||
380 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); | |
381 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); | |
382 | return 0; | |
77520351 BH |
383 | } |
384 | ||
da3ed651 SAS |
385 | static int mmu_ctx_cpu_dead(unsigned int cpu) |
386 | { | |
387 | #ifdef CONFIG_HOTPLUG_CPU | |
388 | if (cpu == boot_cpuid) | |
389 | return 0; | |
390 | ||
391 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); | |
392 | kfree(stale_map[cpu]); | |
393 | stale_map[cpu] = NULL; | |
394 | ||
395 | /* We also clear the cpu_vm_mask bits of CPUs going away */ | |
396 | clear_tasks_mm_cpumask(cpu); | |
397 | #endif | |
398 | return 0; | |
399 | } | |
77520351 BH |
400 | |
401 | #endif /* CONFIG_SMP */ | |
5e696617 BH |
402 | |
403 | /* | |
404 | * Initialize the context management stuff. | |
405 | */ | |
406 | void __init mmu_context_init(void) | |
407 | { | |
2ca8cf73 BH |
408 | /* Mark init_mm as being active on all possible CPUs since |
409 | * we'll get called with prev == init_mm the first time | |
410 | * we schedule on a given CPU | |
411 | */ | |
412 | init_mm.context.active = NR_CPUS; | |
413 | ||
77520351 BH |
414 | /* |
415 | * The MPC8xx has only 16 contexts. We rotate through them on each | |
416 | * task switch. A better way would be to keep track of tasks that | |
417 | * own contexts, and implement an LRU usage. That way very active | |
418 | * tasks don't always have to pay the TLB reload overhead. The | |
419 | * kernel pages are mapped shared, so the kernel can run on behalf | |
420 | * of any task that makes a kernel entry. Shared does not mean they | |
421 | * are not protected, just that the ASID comparison is not performed. | |
422 | * -- Dan | |
423 | * | |
424 | * The IBM4xx has 256 contexts, so we can just rotate through these | |
425 | * as a way of "switching" contexts. If the TID of the TLB is zero, | |
426 | * the PID/TID comparison is disabled, so we can use a TID of zero | |
427 | * to represent all kernel pages as shared among all contexts. | |
428 | * -- Dan | |
e7f75ad0 DK |
429 | * |
430 | * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We | |
431 | * should normally never have to steal though the facility is | |
432 | * present if needed. | |
433 | * -- BenH | |
77520351 BH |
434 | */ |
435 | if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { | |
436 | first_context = 0; | |
437 | last_context = 15; | |
debddd95 | 438 | no_selective_tlbil = true; |
e7f75ad0 DK |
439 | } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { |
440 | first_context = 1; | |
441 | last_context = 65535; | |
debddd95 | 442 | no_selective_tlbil = false; |
cd68098b | 443 | } else { |
77520351 BH |
444 | first_context = 1; |
445 | last_context = 255; | |
debddd95 | 446 | no_selective_tlbil = false; |
77520351 BH |
447 | } |
448 | ||
449 | #ifdef DEBUG_CLAMP_LAST_CONTEXT | |
450 | last_context = DEBUG_CLAMP_LAST_CONTEXT; | |
451 | #endif | |
452 | /* | |
453 | * Allocate the maps used by context management | |
454 | */ | |
e39f223f ME |
455 | context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0); |
456 | context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0); | |
0d35e162 | 457 | #ifndef CONFIG_SMP |
e39f223f | 458 | stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0); |
0d35e162 | 459 | #else |
e39f223f | 460 | stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0); |
77520351 | 461 | |
da3ed651 SAS |
462 | cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, |
463 | "powerpc/mmu/ctx:prepare", | |
464 | mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead); | |
77520351 BH |
465 | #endif |
466 | ||
467 | printk(KERN_INFO | |
ff7c6600 | 468 | "MMU: Allocated %zu bytes of context maps for %d contexts\n", |
77520351 BH |
469 | 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)), |
470 | last_context - first_context + 1); | |
471 | ||
5e696617 BH |
472 | /* |
473 | * Some processors have too few contexts to reserve one for | |
474 | * init_mm, and require using context 0 for a normal task. | |
475 | * Other processors reserve the use of context zero for the kernel. | |
77520351 | 476 | * This code assumes first_context < 32. |
5e696617 | 477 | */ |
77520351 BH |
478 | context_map[0] = (1 << first_context) - 1; |
479 | next_context = first_context; | |
480 | nr_free_contexts = last_context - first_context + 1; | |
5e696617 BH |
481 | } |
482 |