Commit | Line | Data |
---|---|---|
f1f3347d VG |
1 | /* |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * vineetg: May 2011 | |
9 | * -Refactored get_new_mmu_context( ) to only handle live-mm. | |
10 | * retiring-mm handled in other hooks | |
11 | * | |
12 | * Vineetg: March 25th, 2008: Bug #92690 | |
13 | * -Major rewrite of Core ASID allocation routine get_new_mmu_context | |
14 | * | |
15 | * Amit Bhor, Sameer Dhavale: Codito Technologies 2004 | |
16 | */ | |
17 | ||
18 | #ifndef _ASM_ARC_MMU_CONTEXT_H | |
19 | #define _ASM_ARC_MMU_CONTEXT_H | |
20 | ||
21 | #include <asm/arcregs.h> | |
22 | #include <asm/tlb.h> | |
23 | ||
24 | #include <asm-generic/mm_hooks.h> | |
25 | ||
26 | /* ARC700 ASID Management | |
27 | * | |
28 | * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries | |
29 | * with same vaddr (different tasks) to co-exit. This provides for | |
30 | * "Fast Context Switch" i.e. no TLB flush on ctxt-switch | |
31 | * | |
32 | * Linux assigns each task a unique ASID. A simple round-robin allocation | |
63eca94c | 33 | * of H/w ASID is done using software tracker @asid_cpu. |
f1f3347d VG |
34 | * When it reaches max 255, the allocation cycle starts afresh by flushing |
35 | * the entire TLB and wrapping ASID back to zero. | |
36 | * | |
947bf103 VG |
37 | * A new allocation cycle, post rollover, could potentially reassign an ASID |
38 | * to a different task. Thus the rule is to refresh the ASID in a new cycle. | |
63eca94c | 39 | * The 32 bit @asid_cpu (and mm->asid) have 8 bits MMU PID and rest 24 bits |
947bf103 VG |
40 | * serve as cycle/generation indicator and natural 32 bit unsigned math |
41 | * automagically increments the generation when lower 8 bits rollover. | |
f1f3347d VG |
42 | */ |
43 | ||
947bf103 VG |
44 | #define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */ |
45 | #define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK) | |
46 | ||
47 | #define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1) | |
48 | #define MM_CTXT_NO_ASID 0UL | |
f1f3347d | 49 | |
63eca94c VG |
50 | #define asid_mm(mm, cpu) mm->context.asid[cpu] |
51 | #define hw_pid(mm, cpu) (asid_mm(mm, cpu) & MM_CTXT_ASID_MASK) | |
f1f3347d | 52 | |
63eca94c VG |
53 | DECLARE_PER_CPU(unsigned int, asid_cache); |
54 | #define asid_cpu(cpu) per_cpu(asid_cache, cpu) | |
f1f3347d VG |
55 | |
56 | /* | |
3daa48d1 VG |
57 | * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) |
58 | * Also set the MMU PID register to existing/updated ASID | |
f1f3347d VG |
59 | */ |
60 | static inline void get_new_mmu_context(struct mm_struct *mm) | |
61 | { | |
63eca94c | 62 | const unsigned int cpu = smp_processor_id(); |
f1f3347d VG |
63 | unsigned long flags; |
64 | ||
65 | local_irq_save(flags); | |
66 | ||
3daa48d1 VG |
67 | /* |
68 | * Move to new ASID if it was not from current alloc-cycle/generation. | |
947bf103 VG |
69 | * This is done by ensuring that the generation bits in both mm->ASID |
70 | * and cpu's ASID counter are exactly same. | |
3daa48d1 VG |
71 | * |
72 | * Note: Callers needing new ASID unconditionally, independent of | |
73 | * generation, e.g. local_flush_tlb_mm() for forking parent, | |
74 | * first need to destroy the context, setting it to invalid | |
75 | * value. | |
76 | */ | |
63eca94c | 77 | if (!((asid_mm(mm, cpu) ^ asid_cpu(cpu)) & MM_CTXT_CYCLE_MASK)) |
3daa48d1 VG |
78 | goto set_hw; |
79 | ||
947bf103 | 80 | /* move to new ASID and handle rollover */ |
63eca94c | 81 | if (unlikely(!(++asid_cpu(cpu) & MM_CTXT_ASID_MASK))) { |
f1f3347d | 82 | |
5ea72a90 | 83 | local_flush_tlb_all(); |
f1f3347d | 84 | |
947bf103 | 85 | /* |
2547476a | 86 | * Above check for rollover of 8 bit ASID in 32 bit container. |
947bf103 VG |
87 | * If the container itself wrapped around, set it to a non zero |
88 | * "generation" to distinguish from no context | |
89 | */ | |
63eca94c VG |
90 | if (!asid_cpu(cpu)) |
91 | asid_cpu(cpu) = MM_CTXT_FIRST_CYCLE; | |
947bf103 | 92 | } |
f1f3347d VG |
93 | |
94 | /* Assign new ASID to tsk */ | |
63eca94c | 95 | asid_mm(mm, cpu) = asid_cpu(cpu); |
f1f3347d | 96 | |
3daa48d1 | 97 | set_hw: |
63eca94c | 98 | write_aux_reg(ARC_REG_PID, hw_pid(mm, cpu) | MMU_ENABLE); |
f1f3347d VG |
99 | |
100 | local_irq_restore(flags); | |
101 | } | |
102 | ||
103 | /* | |
104 | * Initialize the context related info for a new mm_struct | |
105 | * instance. | |
106 | */ | |
107 | static inline int | |
108 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
109 | { | |
63eca94c VG |
110 | int i; |
111 | ||
112 | for_each_possible_cpu(i) | |
113 | asid_mm(mm, i) = MM_CTXT_NO_ASID; | |
114 | ||
f1f3347d VG |
115 | return 0; |
116 | } | |
117 | ||
63eca94c VG |
118 | static inline void destroy_context(struct mm_struct *mm) |
119 | { | |
120 | unsigned long flags; | |
121 | ||
122 | /* Needed to elide CONFIG_DEBUG_PREEMPT warning */ | |
123 | local_irq_save(flags); | |
124 | asid_mm(mm, smp_processor_id()) = MM_CTXT_NO_ASID; | |
125 | local_irq_restore(flags); | |
126 | } | |
127 | ||
f1f3347d VG |
128 | /* Prepare the MMU for task: setup PID reg with allocated ASID |
129 | If task doesn't have an ASID (never alloc or stolen, get a new ASID) | |
130 | */ | |
131 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
132 | struct task_struct *tsk) | |
133 | { | |
5ea72a90 VG |
134 | const int cpu = smp_processor_id(); |
135 | ||
136 | /* | |
137 | * Note that the mm_cpumask is "aggregating" only, we don't clear it | |
138 | * for the switched-out task, unlike some other arches. | |
139 | * It is used to enlist cpus for sending TLB flush IPIs and not sending | |
140 | * it to CPUs where a task once ran-on, could cause stale TLB entry | |
141 | * re-use, specially for a multi-threaded task. | |
142 | * e.g. T1 runs on C1, migrates to C3. T2 running on C2 munmaps. | |
143 | * For a non-aggregating mm_cpumask, IPI not sent C1, and if T1 | |
144 | * were to re-migrate to C1, it could access the unmapped region | |
145 | * via any existing stale TLB entries. | |
146 | */ | |
147 | cpumask_set_cpu(cpu, mm_cpumask(next)); | |
148 | ||
41195d23 | 149 | #ifndef CONFIG_SMP |
f1f3347d VG |
150 | /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */ |
151 | write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); | |
41195d23 | 152 | #endif |
f1f3347d | 153 | |
3daa48d1 | 154 | get_new_mmu_context(next); |
f1f3347d VG |
155 | } |
156 | ||
c6011553 VG |
157 | /* |
158 | * Called at the time of execve() to get a new ASID | |
159 | * Note the subtlety here: get_new_mmu_context() behaves differently here | |
160 | * vs. in switch_mm(). Here it always returns a new ASID, because mm has | |
161 | * an unallocated "initial" value, while in latter, it moves to a new ASID, | |
162 | * only if it was unallocated | |
163 | */ | |
164 | #define activate_mm(prev, next) switch_mm(prev, next, NULL) | |
165 | ||
f1f3347d VG |
166 | /* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping |
167 | * for retiring-mm. However destroy_context( ) still needs to do that because | |
168 | * between mm_release( ) = >deactive_mm( ) and | |
169 | * mmput => .. => __mmdrop( ) => destroy_context( ) | |
170 | * there is a good chance that task gets sched-out/in, making it's ASID valid | |
171 | * again (this teased me for a whole day). | |
172 | */ | |
173 | #define deactivate_mm(tsk, mm) do { } while (0) | |
174 | ||
f1f3347d VG |
175 | #define enter_lazy_tlb(mm, tsk) |
176 | ||
177 | #endif /* __ASM_ARC_MMU_CONTEXT_H */ |