Commit | Line | Data |
---|---|---|
51533b61 MS |
1 | /* |
2 | * Low level TLB handling. | |
3 | * | |
4 | * Copyright (C) 2000-2003, Axis Communications AB. | |
5 | * | |
6 | * Authors: Bjorn Wesen <bjornw@axis.com> | |
7 | * Tobias Anderberg <tobiasa@axis.com>, CRISv32 port. | |
8 | */ | |
589ee628 | 9 | #include <linux/mm_types.h> |
51533b61 MS |
10 | |
11 | #include <asm/tlb.h> | |
12 | #include <asm/mmu_context.h> | |
556dcee7 JN |
13 | #include <arch/hwregs/asm/mmu_defs_asm.h> |
14 | #include <arch/hwregs/supp_reg.h> | |
51533b61 MS |
15 | |
16 | #define UPDATE_TLB_SEL_IDX(val) \ | |
52d82ef1 JN |
17 | do { \ |
18 | unsigned long tlb_sel; \ | |
51533b61 MS |
19 | \ |
20 | tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, val); \ | |
21 | SUPP_REG_WR(RW_MM_TLB_SEL, tlb_sel); \ | |
22 | } while(0) | |
23 | ||
24 | #define UPDATE_TLB_HILO(tlb_hi, tlb_lo) \ | |
25 | do { \ | |
26 | SUPP_REG_WR(RW_MM_TLB_HI, tlb_hi); \ | |
27 | SUPP_REG_WR(RW_MM_TLB_LO, tlb_lo); \ | |
28 | } while(0) | |
29 | ||
30 | /* | |
31 | * The TLB can host up to 256 different mm contexts at the same time. The running | |
32 | * context is found in the PID register. Each TLB entry contains a page_id that | |
33 | * has to match the PID register to give a hit. page_id_map keeps track of which | |
52d82ef1 JN |
34 | * mm's is assigned to which page_id's, making sure it's known when to |
35 | * invalidate TLB entries. | |
51533b61 MS |
36 | * |
37 | * The last page_id is never running, it is used as an invalid page_id so that | |
38 | * it's possible to make TLB entries that will nerver match. | |
39 | * | |
40 | * Note; the flushes needs to be atomic otherwise an interrupt hander that uses | |
41 | * vmalloc'ed memory might cause a TLB load in the middle of a flush. | |
42 | */ | |
43 | ||
44 | /* Flush all TLB entries. */ | |
45 | void | |
46 | __flush_tlb_all(void) | |
47 | { | |
48 | int i; | |
49 | int mmu; | |
50 | unsigned long flags; | |
51 | unsigned long mmu_tlb_hi; | |
52 | unsigned long mmu_tlb_sel; | |
53 | ||
54 | /* | |
55 | * Mask with 0xf so similar TLB entries aren't written in the same 4-way | |
56 | * entry group. | |
57 | */ | |
5cf885d0 | 58 | local_irq_save(flags); |
51533b61 MS |
59 | |
60 | for (mmu = 1; mmu <= 2; mmu++) { | |
61 | SUPP_BANK_SEL(mmu); /* Select the MMU */ | |
62 | for (i = 0; i < NUM_TLB_ENTRIES; i++) { | |
63 | /* Store invalid entry */ | |
64 | mmu_tlb_sel = REG_FIELD(mmu, rw_mm_tlb_sel, idx, i); | |
65 | ||
66 | mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, INVALID_PAGEID) | |
67 | | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, i & 0xf)); | |
68 | ||
69 | SUPP_REG_WR(RW_MM_TLB_SEL, mmu_tlb_sel); | |
70 | SUPP_REG_WR(RW_MM_TLB_HI, mmu_tlb_hi); | |
71 | SUPP_REG_WR(RW_MM_TLB_LO, 0); | |
72 | } | |
73 | } | |
74 | ||
75 | local_irq_restore(flags); | |
76 | } | |
77 | ||
78 | /* Flush an entire user address space. */ | |
79 | void | |
80 | __flush_tlb_mm(struct mm_struct *mm) | |
81 | { | |
82 | int i; | |
83 | int mmu; | |
84 | unsigned long flags; | |
85 | unsigned long page_id; | |
86 | unsigned long tlb_hi; | |
87 | unsigned long mmu_tlb_hi; | |
88 | ||
89 | page_id = mm->context.page_id; | |
90 | ||
91 | if (page_id == NO_CONTEXT) | |
92 | return; | |
93 | ||
94 | /* Mark the TLB entries that match the page_id as invalid. */ | |
5cf885d0 | 95 | local_irq_save(flags); |
51533b61 MS |
96 | |
97 | for (mmu = 1; mmu <= 2; mmu++) { | |
98 | SUPP_BANK_SEL(mmu); | |
99 | for (i = 0; i < NUM_TLB_ENTRIES; i++) { | |
100 | UPDATE_TLB_SEL_IDX(i); | |
101 | ||
102 | /* Get the page_id */ | |
103 | SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi); | |
104 | ||
105 | /* Check if the page_id match. */ | |
106 | if ((tlb_hi & 0xff) == page_id) { | |
107 | mmu_tlb_hi = (REG_FIELD(mmu, rw_mm_tlb_hi, pid, | |
108 | INVALID_PAGEID) | |
109 | | REG_FIELD(mmu, rw_mm_tlb_hi, vpn, | |
110 | i & 0xf)); | |
111 | ||
112 | UPDATE_TLB_HILO(mmu_tlb_hi, 0); | |
113 | } | |
114 | } | |
115 | } | |
116 | ||
117 | local_irq_restore(flags); | |
118 | } | |
119 | ||
120 | /* Invalidate a single page. */ | |
121 | void | |
122 | __flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | |
123 | { | |
124 | int i; | |
125 | int mmu; | |
126 | unsigned long page_id; | |
127 | unsigned long flags; | |
128 | unsigned long tlb_hi; | |
129 | unsigned long mmu_tlb_hi; | |
130 | ||
131 | page_id = vma->vm_mm->context.page_id; | |
132 | ||
133 | if (page_id == NO_CONTEXT) | |
134 | return; | |
135 | ||
136 | addr &= PAGE_MASK; | |
137 | ||
138 | /* | |
139 | * Invalidate those TLB entries that match both the mm context and the | |
140 | * requested virtual address. | |
141 | */ | |
5cf885d0 | 142 | local_irq_save(flags); |
51533b61 MS |
143 | |
144 | for (mmu = 1; mmu <= 2; mmu++) { | |
145 | SUPP_BANK_SEL(mmu); | |
146 | for (i = 0; i < NUM_TLB_ENTRIES; i++) { | |
147 | UPDATE_TLB_SEL_IDX(i); | |
148 | SUPP_REG_RD(RW_MM_TLB_HI, tlb_hi); | |
149 | ||
150 | /* Check if page_id and address matches */ | |
151 | if (((tlb_hi & 0xff) == page_id) && | |
152 | ((tlb_hi & PAGE_MASK) == addr)) { | |
153 | mmu_tlb_hi = REG_FIELD(mmu, rw_mm_tlb_hi, pid, | |
154 | INVALID_PAGEID) | addr; | |
155 | ||
156 | UPDATE_TLB_HILO(mmu_tlb_hi, 0); | |
157 | } | |
158 | } | |
159 | } | |
160 | ||
161 | local_irq_restore(flags); | |
162 | } | |
163 | ||
164 | /* | |
165 | * Initialize the context related info for a new mm_struct | |
166 | * instance. | |
167 | */ | |
168 | ||
169 | int | |
170 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
171 | { | |
172 | mm->context.page_id = NO_CONTEXT; | |
173 | return 0; | |
174 | } | |
175 | ||
a7e4705b HD |
176 | static DEFINE_SPINLOCK(mmu_context_lock); |
177 | ||
51533b61 MS |
178 | /* Called in schedule() just before actually doing the switch_to. */ |
179 | void | |
180 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
181 | struct task_struct *tsk) | |
182 | { | |
52d82ef1 JN |
183 | if (prev != next) { |
184 | int cpu = smp_processor_id(); | |
185 | ||
186 | /* Make sure there is a MMU context. */ | |
187 | spin_lock(&mmu_context_lock); | |
188 | get_mmu_context(next); | |
b9d65c04 | 189 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
52d82ef1 JN |
190 | spin_unlock(&mmu_context_lock); |
191 | ||
192 | /* | |
3ad2f3fb | 193 | * Remember the pgd for the fault handlers. Keep a separate |
52d82ef1 JN |
194 | * copy of it because current and active_mm might be invalid |
195 | * at points where * there's still a need to derefer the pgd. | |
196 | */ | |
197 | per_cpu(current_pgd, cpu) = next->pgd; | |
198 | ||
199 | /* Switch context in the MMU. */ | |
200 | if (tsk && task_thread_info(tsk)) { | |
201 | SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | | |
202 | task_thread_info(tsk)->tls); | |
203 | } else { | |
204 | SPEC_REG_WR(SPEC_REG_PID, next->context.page_id); | |
205 | } | |
206 | } | |
51533b61 MS |
207 | } |
208 |