Commit | Line | Data |
---|---|---|
f1f3347d VG |
1 | /* |
2 | * TLB Management (flush/create/diagnostics) for ARC700 | |
3 | * | |
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
d79e678d VG |
9 | * |
10 | * vineetg: Aug 2011 | |
11 | * -Reintroduce duplicate PD fixup - some customer chips still have the issue | |
12 | * | |
13 | * vineetg: May 2011 | |
14 | * -No need to flush_cache_page( ) for each call to update_mmu_cache() | |
15 | * some of the LMBench tests improved amazingly | |
16 | * = page-fault thrice as fast (75 usec to 28 usec) | |
17 | * = mmap twice as fast (9.6 msec to 4.6 msec), | |
18 | * = fork (5.3 msec to 3.7 msec) | |
19 | * | |
20 | * vineetg: April 2011 : | |
21 | * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, | |
22 | * helps avoid a shift when preparing PD0 from PTE | |
23 | * | |
24 | * vineetg: April 2011 : Preparing for MMU V3 | |
25 | * -MMU v2/v3 BCRs decoded differently | |
26 | * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512 | |
27 | * -tlb_entry_erase( ) can be void | |
28 | * -local_flush_tlb_range( ): | |
29 | * = need not "ceil" @end | |
30 | * = walks MMU only if range spans < 32 entries, as opposed to 256 | |
31 | * | |
32 | * Vineetg: Sept 10th 2008 | |
33 | * -Changes related to MMU v2 (Rel 4.8) | |
34 | * | |
35 | * Vineetg: Aug 29th 2008 | |
36 | * -In TLB Flush operations (Metal Fix MMU) there is a explict command to | |
37 | * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd, | |
38 | * it fails. Thus need to load it with ANY valid value before invoking | |
39 | * TLBIVUTLB cmd | |
40 | * | |
41 | * Vineetg: Aug 21th 2008: | |
42 | * -Reduced the duration of IRQ lockouts in TLB Flush routines | |
43 | * -Multiple copies of TLB erase code seperated into a "single" function | |
44 | * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID | |
45 | * in interrupt-safe region. | |
46 | * | |
47 | * Vineetg: April 23rd Bug #93131 | |
48 | * Problem: tlb_flush_kernel_range() doesnt do anything if the range to | |
49 | * flush is more than the size of TLB itself. | |
50 | * | |
51 | * Rahul Trivedi : Codito Technologies 2004 | |
f1f3347d VG |
52 | */ |
53 | ||
54 | #include <linux/module.h> | |
55 | #include <asm/arcregs.h> | |
d79e678d | 56 | #include <asm/setup.h> |
f1f3347d | 57 | #include <asm/mmu_context.h> |
da1677b0 | 58 | #include <asm/mmu.h> |
f1f3347d | 59 | |
d79e678d VG |
60 | /* Need for ARC MMU v2 |
61 | * | |
62 | * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc. | |
63 | * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages | |
64 | * map into same set, there would be contention for the 2 ways causing severe | |
65 | * Thrashing. | |
66 | * | |
67 | * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has | |
68 | * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways. | |
69 | * Given this, the thrasing problem should never happen because once the 3 | |
70 | * J-TLB entries are created (even though 3rd will knock out one of the prev | |
71 | * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy | |
72 | * | |
73 | * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs. | |
74 | * This is a simple design for keeping them in sync. So what do we do? | |
75 | * The solution which James came up was pretty neat. It utilised the assoc | |
76 | * of uTLBs by not invalidating always but only when absolutely necessary. | |
77 | * | |
78 | * - Existing TLB commands work as before | |
79 | * - New command (TLBWriteNI) for TLB write without clearing uTLBs | |
80 | * - New command (TLBIVUTLB) to invalidate uTLBs. | |
81 | * | |
82 | * The uTLBs need only be invalidated when pages are being removed from the | |
83 | * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB | |
84 | * as a result of a miss, the removed entry is still allowed to exist in the | |
85 | * uTLBs as it is still valid and present in the OS page table. This allows the | |
86 | * full associativity of the uTLBs to hide the limited associativity of the main | |
87 | * TLB. | |
88 | * | |
89 | * During a miss handler, the new "TLBWriteNI" command is used to load | |
90 | * entries without clearing the uTLBs. | |
91 | * | |
92 | * When the OS page table is updated, TLB entries that may be associated with a | |
93 | * removed page are removed (flushed) from the TLB using TLBWrite. In this | |
94 | * circumstance, the uTLBs must also be cleared. This is done by using the | |
95 | * existing TLBWrite command. An explicit IVUTLB is also required for those | |
96 | * corner cases when TLBWrite was not executed at all because the corresp | |
97 | * J-TLB entry got evicted/replaced. | |
98 | */ | |
99 | ||
da1677b0 | 100 | |
f1f3347d VG |
101 | /* A copy of the ASID from the PID reg is kept in asid_cache */ |
102 | int asid_cache = FIRST_ASID; | |
103 | ||
104 | /* ASID to mm struct mapping. We have one extra entry corresponding to | |
105 | * NO_ASID to save us a compare when clearing the mm entry for old asid | |
106 | * see get_new_mmu_context (asm-arc/mmu_context.h) | |
107 | */ | |
108 | struct mm_struct *asid_mm_map[NUM_ASID + 1]; | |
cc562d2e | 109 | |
d79e678d VG |
110 | /* |
111 | * Utility Routine to erase a J-TLB entry | |
112 | * The procedure is to look it up in the MMU. If found, ERASE it by | |
113 | * issuing a TlbWrite CMD with PD0 = PD1 = 0 | |
114 | */ | |
115 | ||
116 | static void __tlb_entry_erase(void) | |
117 | { | |
118 | write_aux_reg(ARC_REG_TLBPD1, 0); | |
119 | write_aux_reg(ARC_REG_TLBPD0, 0); | |
120 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
121 | } | |
122 | ||
123 | static void tlb_entry_erase(unsigned int vaddr_n_asid) | |
124 | { | |
125 | unsigned int idx; | |
126 | ||
127 | /* Locate the TLB entry for this vaddr + ASID */ | |
128 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); | |
129 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | |
130 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
131 | ||
132 | /* No error means entry found, zero it out */ | |
133 | if (likely(!(idx & TLB_LKUP_ERR))) { | |
134 | __tlb_entry_erase(); | |
135 | } else { /* Some sort of Error */ | |
136 | ||
137 | /* Duplicate entry error */ | |
138 | if (idx & 0x1) { | |
139 | /* TODO we need to handle this case too */ | |
140 | pr_emerg("unhandled Duplicate flush for %x\n", | |
141 | vaddr_n_asid); | |
142 | } | |
143 | /* else entry not found so nothing to do */ | |
144 | } | |
145 | } | |
146 | ||
147 | /**************************************************************************** | |
148 | * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs) | |
149 | * | |
150 | * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB | |
151 | * | |
152 | * utlb_invalidate ( ) | |
153 | * -For v2 MMU calls Flush uTLB Cmd | |
154 | * -For v1 MMU does nothing (except for Metal Fix v1 MMU) | |
155 | * This is because in v1 TLBWrite itself invalidate uTLBs | |
156 | ***************************************************************************/ | |
157 | ||
158 | static void utlb_invalidate(void) | |
159 | { | |
160 | #if (CONFIG_ARC_MMU_VER >= 2) | |
161 | ||
162 | #if (CONFIG_ARC_MMU_VER < 3) | |
163 | /* MMU v2 introduced the uTLB Flush command. | |
164 | * There was however an obscure hardware bug, where uTLB flush would | |
165 | * fail when a prior probe for J-TLB (both totally unrelated) would | |
166 | * return lkup err - because the entry didnt exist in MMU. | |
167 | * The Workround was to set Index reg with some valid value, prior to | |
168 | * flush. This was fixed in MMU v3 hence not needed any more | |
169 | */ | |
170 | unsigned int idx; | |
171 | ||
172 | /* make sure INDEX Reg is valid */ | |
173 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
174 | ||
175 | /* If not write some dummy val */ | |
176 | if (unlikely(idx & TLB_LKUP_ERR)) | |
177 | write_aux_reg(ARC_REG_TLBINDEX, 0xa); | |
178 | #endif | |
179 | ||
180 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB); | |
181 | #endif | |
182 | ||
183 | } | |
184 | ||
185 | /* | |
186 | * Un-conditionally (without lookup) erase the entire MMU contents | |
187 | */ | |
188 | ||
189 | noinline void local_flush_tlb_all(void) | |
190 | { | |
191 | unsigned long flags; | |
192 | unsigned int entry; | |
193 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
194 | ||
195 | local_irq_save(flags); | |
196 | ||
197 | /* Load PD0 and PD1 with template for a Blank Entry */ | |
198 | write_aux_reg(ARC_REG_TLBPD1, 0); | |
199 | write_aux_reg(ARC_REG_TLBPD0, 0); | |
200 | ||
201 | for (entry = 0; entry < mmu->num_tlb; entry++) { | |
202 | /* write this entry to the TLB */ | |
203 | write_aux_reg(ARC_REG_TLBINDEX, entry); | |
204 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
205 | } | |
206 | ||
207 | utlb_invalidate(); | |
208 | ||
209 | local_irq_restore(flags); | |
210 | } | |
211 | ||
212 | /* | |
213 | * Flush the entrie MM for userland. The fastest way is to move to Next ASID | |
214 | */ | |
215 | noinline void local_flush_tlb_mm(struct mm_struct *mm) | |
216 | { | |
217 | /* | |
218 | * Small optimisation courtesy IA64 | |
219 | * flush_mm called during fork,exit,munmap etc, multiple times as well. | |
220 | * Only for fork( ) do we need to move parent to a new MMU ctxt, | |
221 | * all other cases are NOPs, hence this check. | |
222 | */ | |
223 | if (atomic_read(&mm->mm_users) == 0) | |
224 | return; | |
225 | ||
226 | /* | |
227 | * Workaround for Android weirdism: | |
228 | * A binder VMA could end up in a task such that vma->mm != tsk->mm | |
229 | * old code would cause h/w - s/w ASID to get out of sync | |
230 | */ | |
231 | if (current->mm != mm) | |
232 | destroy_context(mm); | |
233 | else | |
234 | get_new_mmu_context(mm); | |
235 | } | |
236 | ||
237 | /* | |
238 | * Flush a Range of TLB entries for userland. | |
239 | * @start is inclusive, while @end is exclusive | |
240 | * Difference between this and Kernel Range Flush is | |
241 | * -Here the fastest way (if range is too large) is to move to next ASID | |
242 | * without doing any explicit Shootdown | |
243 | * -In case of kernel Flush, entry has to be shot down explictly | |
244 | */ | |
245 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
246 | unsigned long end) | |
247 | { | |
248 | unsigned long flags; | |
249 | unsigned int asid; | |
250 | ||
251 | /* If range @start to @end is more than 32 TLB entries deep, | |
252 | * its better to move to a new ASID rather than searching for | |
253 | * individual entries and then shooting them down | |
254 | * | |
255 | * The calc above is rough, doesn't account for unaligned parts, | |
256 | * since this is heuristics based anyways | |
257 | */ | |
258 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { | |
259 | local_flush_tlb_mm(vma->vm_mm); | |
260 | return; | |
261 | } | |
262 | ||
263 | /* | |
264 | * @start moved to page start: this alone suffices for checking | |
265 | * loop end condition below, w/o need for aligning @end to end | |
266 | * e.g. 2000 to 4001 will anyhow loop twice | |
267 | */ | |
268 | start &= PAGE_MASK; | |
269 | ||
270 | local_irq_save(flags); | |
271 | asid = vma->vm_mm->context.asid; | |
272 | ||
273 | if (asid != NO_ASID) { | |
274 | while (start < end) { | |
275 | tlb_entry_erase(start | (asid & 0xff)); | |
276 | start += PAGE_SIZE; | |
277 | } | |
278 | } | |
279 | ||
280 | utlb_invalidate(); | |
281 | ||
282 | local_irq_restore(flags); | |
283 | } | |
284 | ||
285 | /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective) | |
286 | * @start, @end interpreted as kvaddr | |
287 | * Interestingly, shared TLB entries can also be flushed using just | |
288 | * @start,@end alone (interpreted as user vaddr), although technically SASID | |
289 | * is also needed. However our smart TLbProbe lookup takes care of that. | |
290 | */ | |
291 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
292 | { | |
293 | unsigned long flags; | |
294 | ||
295 | /* exactly same as above, except for TLB entry not taking ASID */ | |
296 | ||
297 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { | |
298 | local_flush_tlb_all(); | |
299 | return; | |
300 | } | |
301 | ||
302 | start &= PAGE_MASK; | |
303 | ||
304 | local_irq_save(flags); | |
305 | while (start < end) { | |
306 | tlb_entry_erase(start); | |
307 | start += PAGE_SIZE; | |
308 | } | |
309 | ||
310 | utlb_invalidate(); | |
311 | ||
312 | local_irq_restore(flags); | |
313 | } | |
314 | ||
315 | /* | |
316 | * Delete TLB entry in MMU for a given page (??? address) | |
317 | * NOTE One TLB entry contains translation for single PAGE | |
318 | */ | |
319 | ||
320 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
321 | { | |
322 | unsigned long flags; | |
323 | ||
324 | /* Note that it is critical that interrupts are DISABLED between | |
325 | * checking the ASID and using it flush the TLB entry | |
326 | */ | |
327 | local_irq_save(flags); | |
328 | ||
329 | if (vma->vm_mm->context.asid != NO_ASID) { | |
330 | tlb_entry_erase((page & PAGE_MASK) | | |
331 | (vma->vm_mm->context.asid & 0xff)); | |
332 | utlb_invalidate(); | |
333 | } | |
334 | ||
335 | local_irq_restore(flags); | |
336 | } | |
cc562d2e VG |
337 | |
338 | /* | |
339 | * Routine to create a TLB entry | |
340 | */ | |
341 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |
342 | { | |
343 | unsigned long flags; | |
64b703ef | 344 | unsigned int idx, asid_or_sasid, rwx; |
cc562d2e VG |
345 | unsigned long pd0_flags; |
346 | ||
347 | /* | |
348 | * create_tlb() assumes that current->mm == vma->mm, since | |
349 | * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) | |
350 | * -completes the lazy write to SASID reg (again valid for curr tsk) | |
351 | * | |
352 | * Removing the assumption involves | |
353 | * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. | |
354 | * -Fix the TLB paranoid debug code to not trigger false negatives. | |
355 | * -More importantly it makes this handler inconsistent with fast-path | |
356 | * TLB Refill handler which always deals with "current" | |
357 | * | |
358 | * Lets see the use cases when current->mm != vma->mm and we land here | |
359 | * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault | |
360 | * Here VM wants to pre-install a TLB entry for user stack while | |
361 | * current->mm still points to pre-execve mm (hence the condition). | |
362 | * However the stack vaddr is soon relocated (randomization) and | |
363 | * move_page_tables() tries to undo that TLB entry. | |
364 | * Thus not creating TLB entry is not any worse. | |
365 | * | |
366 | * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a | |
367 | * breakpoint in debugged task. Not creating a TLB now is not | |
368 | * performance critical. | |
369 | * | |
370 | * Both the cases above are not good enough for code churn. | |
371 | */ | |
372 | if (current->active_mm != vma->vm_mm) | |
373 | return; | |
374 | ||
375 | local_irq_save(flags); | |
376 | ||
377 | tlb_paranoid_check(vma->vm_mm->context.asid, address); | |
378 | ||
379 | address &= PAGE_MASK; | |
380 | ||
381 | /* update this PTE credentials */ | |
382 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); | |
383 | ||
384 | /* Create HW TLB entry Flags (in PD0) from PTE Flags */ | |
385 | #if (CONFIG_ARC_MMU_VER <= 2) | |
386 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1); | |
387 | #else | |
388 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0)); | |
389 | #endif | |
390 | ||
391 | /* ASID for this task */ | |
392 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; | |
393 | ||
394 | write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); | |
395 | ||
64b703ef VG |
396 | /* |
397 | * ARC MMU provides fully orthogonal access bits for K/U mode, | |
398 | * however Linux only saves 1 set to save PTE real-estate | |
399 | * Here we convert 3 PTE bits into 6 MMU bits: | |
400 | * -Kernel only entries have Kr Kw Kx 0 0 0 | |
401 | * -User entries have mirrored K and U bits | |
402 | */ | |
403 | rwx = pte_val(*ptep) & PTE_BITS_RWX; | |
404 | ||
405 | if (pte_val(*ptep) & _PAGE_GLOBAL) | |
406 | rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */ | |
407 | else | |
408 | rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ | |
409 | ||
cc562d2e | 410 | /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ |
64b703ef VG |
411 | write_aux_reg(ARC_REG_TLBPD1, |
412 | rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1)); | |
cc562d2e VG |
413 | |
414 | /* First verify if entry for this vaddr+ASID already exists */ | |
415 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | |
416 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
417 | ||
418 | /* | |
419 | * If Not already present get a free slot from MMU. | |
420 | * Otherwise, Probe would have located the entry and set INDEX Reg | |
421 | * with existing location. This will cause Write CMD to over-write | |
422 | * existing entry with new PD0 and PD1 | |
423 | */ | |
424 | if (likely(idx & TLB_LKUP_ERR)) | |
425 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | |
426 | ||
427 | /* | |
428 | * Commit the Entry to MMU | |
429 | * It doesnt sound safe to use the TLBWriteNI cmd here | |
430 | * which doesn't flush uTLBs. I'd rather be safe than sorry. | |
431 | */ | |
432 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
433 | ||
434 | local_irq_restore(flags); | |
435 | } | |
436 | ||
eacd0e95 VG |
437 | /* |
438 | * Called at the end of pagefault, for a userspace mapped page | |
439 | * -pre-install the corresponding TLB entry into MMU | |
4102b533 VG |
440 | * -Finalize the delayed D-cache flush of kernel mapping of page due to |
441 | * flush_dcache_page(), copy_user_page() | |
442 | * | |
443 | * Note that flush (when done) involves both WBACK - so physical page is | |
444 | * in sync as well as INV - so any non-congruent aliases don't remain | |
cc562d2e | 445 | */ |
24603fdd | 446 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, |
cc562d2e VG |
447 | pte_t *ptep) |
448 | { | |
24603fdd | 449 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; |
4102b533 | 450 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; |
29b93c68 | 451 | struct page *page = pfn_to_page(pte_pfn(*ptep)); |
24603fdd VG |
452 | |
453 | create_tlb(vma, vaddr, ptep); | |
cc562d2e | 454 | |
29b93c68 VG |
455 | if (page == ZERO_PAGE(0)) { |
456 | return; | |
457 | } | |
458 | ||
4102b533 VG |
459 | /* |
460 | * Exec page : Independent of aliasing/page-color considerations, | |
461 | * since icache doesn't snoop dcache on ARC, any dirty | |
462 | * K-mapping of a code page needs to be wback+inv so that | |
463 | * icache fetch by userspace sees code correctly. | |
464 | * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it | |
465 | * so userspace sees the right data. | |
466 | * (Avoids the flush for Non-exec + congruent mapping case) | |
467 | */ | |
3e87974d VG |
468 | if ((vma->vm_flags & VM_EXEC) || |
469 | addr_not_cache_congruent(paddr, vaddr)) { | |
eacd0e95 | 470 | |
2ed21dae | 471 | int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); |
eacd0e95 | 472 | if (dirty) { |
4102b533 | 473 | /* wback + inv dcache lines */ |
6ec18a81 | 474 | __flush_dcache_page(paddr, paddr); |
4102b533 VG |
475 | |
476 | /* invalidate any existing icache lines */ | |
477 | if (vma->vm_flags & VM_EXEC) | |
478 | __inv_icache_page(paddr, vaddr); | |
eacd0e95 | 479 | } |
24603fdd | 480 | } |
cc562d2e VG |
481 | } |
482 | ||
483 | /* Read the Cache Build Confuration Registers, Decode them and save into | |
484 | * the cpuinfo structure for later use. | |
485 | * No Validation is done here, simply read/convert the BCRs | |
486 | */ | |
ce759956 | 487 | void read_decode_mmu_bcr(void) |
cc562d2e | 488 | { |
cc562d2e | 489 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; |
da1677b0 VG |
490 | unsigned int tmp; |
491 | struct bcr_mmu_1_2 { | |
492 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
493 | unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; | |
494 | #else | |
495 | unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; | |
496 | #endif | |
497 | } *mmu2; | |
498 | ||
499 | struct bcr_mmu_3 { | |
500 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
501 | unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, | |
502 | u_itlb:4, u_dtlb:4; | |
503 | #else | |
504 | unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, | |
505 | ways:4, ver:8; | |
506 | #endif | |
507 | } *mmu3; | |
cc562d2e VG |
508 | |
509 | tmp = read_aux_reg(ARC_REG_MMU_BCR); | |
510 | mmu->ver = (tmp >> 24); | |
511 | ||
512 | if (mmu->ver <= 2) { | |
513 | mmu2 = (struct bcr_mmu_1_2 *)&tmp; | |
514 | mmu->pg_sz = PAGE_SIZE; | |
515 | mmu->sets = 1 << mmu2->sets; | |
516 | mmu->ways = 1 << mmu2->ways; | |
517 | mmu->u_dtlb = mmu2->u_dtlb; | |
518 | mmu->u_itlb = mmu2->u_itlb; | |
519 | } else { | |
520 | mmu3 = (struct bcr_mmu_3 *)&tmp; | |
521 | mmu->pg_sz = 512 << mmu3->pg_sz; | |
522 | mmu->sets = 1 << mmu3->sets; | |
523 | mmu->ways = 1 << mmu3->ways; | |
524 | mmu->u_dtlb = mmu3->u_dtlb; | |
525 | mmu->u_itlb = mmu3->u_itlb; | |
526 | } | |
527 | ||
528 | mmu->num_tlb = mmu->sets * mmu->ways; | |
529 | } | |
530 | ||
af617428 VG |
531 | char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) |
532 | { | |
533 | int n = 0; | |
e3edeb67 | 534 | struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; |
af617428 VG |
535 | |
536 | n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ", | |
537 | p_mmu->ver, TO_KB(p_mmu->pg_sz)); | |
538 | ||
539 | n += scnprintf(buf + n, len - n, | |
540 | "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n", | |
541 | p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, | |
542 | p_mmu->u_dtlb, p_mmu->u_itlb, | |
8235703e | 543 | IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : ""); |
af617428 VG |
544 | |
545 | return buf; | |
546 | } | |
547 | ||
ce759956 | 548 | void arc_mmu_init(void) |
cc562d2e | 549 | { |
af617428 VG |
550 | char str[256]; |
551 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
552 | ||
553 | printk(arc_mmu_mumbojumbo(0, str, sizeof(str))); | |
554 | ||
555 | /* For efficiency sake, kernel is compile time built for a MMU ver | |
556 | * This must match the hardware it is running on. | |
557 | * Linux built for MMU V2, if run on MMU V1 will break down because V1 | |
558 | * hardware doesn't understand cmds such as WriteNI, or IVUTLB | |
559 | * On the other hand, Linux built for V1 if run on MMU V2 will do | |
560 | * un-needed workarounds to prevent memcpy thrashing. | |
561 | * Similarly MMU V3 has new features which won't work on older MMU | |
562 | */ | |
563 | if (mmu->ver != CONFIG_ARC_MMU_VER) { | |
564 | panic("MMU ver %d doesn't match kernel built for %d...\n", | |
565 | mmu->ver, CONFIG_ARC_MMU_VER); | |
566 | } | |
567 | ||
568 | if (mmu->pg_sz != PAGE_SIZE) | |
569 | panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); | |
570 | ||
cc562d2e VG |
571 | /* |
572 | * ASID mgmt data structures are compile time init | |
573 | * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes | |
574 | */ | |
575 | ||
576 | local_flush_tlb_all(); | |
577 | ||
578 | /* Enable the MMU */ | |
579 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); | |
41195d23 VG |
580 | |
581 | /* In smp we use this reg for interrupt 1 scratch */ | |
582 | #ifndef CONFIG_SMP | |
583 | /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ | |
584 | write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); | |
585 | #endif | |
cc562d2e VG |
586 | } |
587 | ||
588 | /* | |
589 | * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4} | |
590 | * The mapping is Column-first. | |
591 | * --------------------- ----------- | |
592 | * |way0|way1|way2|way3| |way0|way1| | |
593 | * --------------------- ----------- | |
594 | * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 | | |
595 | * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 | | |
596 | * ~ ~ ~ ~ | |
597 | * [set127] | 508| 509| 510| 511| | 254| 255| | |
598 | * --------------------- ----------- | |
599 | * For normal operations we don't(must not) care how above works since | |
600 | * MMU cmd getIndex(vaddr) abstracts that out. | |
601 | * However for walking WAYS of a SET, we need to know this | |
602 | */ | |
603 | #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) | |
604 | ||
605 | /* Handling of Duplicate PD (TLB entry) in MMU. | |
606 | * -Could be due to buggy customer tapeouts or obscure kernel bugs | |
607 | * -MMU complaints not at the time of duplicate PD installation, but at the | |
608 | * time of lookup matching multiple ways. | |
609 | * -Ideally these should never happen - but if they do - workaround by deleting | |
610 | * the duplicate one. | |
611 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) | |
612 | */ | |
613 | volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */ | |
614 | ||
615 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, | |
616 | struct pt_regs *regs) | |
617 | { | |
618 | int set, way, n; | |
619 | unsigned int pd0[4], pd1[4]; /* assume max 4 ways */ | |
620 | unsigned long flags, is_valid; | |
621 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
622 | ||
623 | local_irq_save(flags); | |
624 | ||
625 | /* re-enable the MMU */ | |
626 | write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID)); | |
627 | ||
628 | /* loop thru all sets of TLB */ | |
629 | for (set = 0; set < mmu->sets; set++) { | |
630 | ||
631 | /* read out all the ways of current set */ | |
632 | for (way = 0, is_valid = 0; way < mmu->ways; way++) { | |
633 | write_aux_reg(ARC_REG_TLBINDEX, | |
634 | SET_WAY_TO_IDX(mmu, set, way)); | |
635 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); | |
636 | pd0[way] = read_aux_reg(ARC_REG_TLBPD0); | |
637 | pd1[way] = read_aux_reg(ARC_REG_TLBPD1); | |
638 | is_valid |= pd0[way] & _PAGE_PRESENT; | |
639 | } | |
640 | ||
641 | /* If all the WAYS in SET are empty, skip to next SET */ | |
642 | if (!is_valid) | |
643 | continue; | |
644 | ||
645 | /* Scan the set for duplicate ways: needs a nested loop */ | |
646 | for (way = 0; way < mmu->ways; way++) { | |
647 | if (!pd0[way]) | |
648 | continue; | |
649 | ||
650 | for (n = way + 1; n < mmu->ways; n++) { | |
651 | if ((pd0[way] & PAGE_MASK) == | |
652 | (pd0[n] & PAGE_MASK)) { | |
653 | ||
654 | if (dup_pd_verbose) { | |
655 | pr_info("Duplicate PD's @" | |
656 | "[%d:%d]/[%d:%d]\n", | |
657 | set, way, set, n); | |
658 | pr_info("TLBPD0[%u]: %08x\n", | |
659 | way, pd0[way]); | |
660 | } | |
661 | ||
662 | /* | |
663 | * clear entry @way and not @n. This is | |
664 | * critical to our optimised loop | |
665 | */ | |
666 | pd0[way] = pd1[way] = 0; | |
667 | write_aux_reg(ARC_REG_TLBINDEX, | |
668 | SET_WAY_TO_IDX(mmu, set, way)); | |
669 | __tlb_entry_erase(); | |
670 | } | |
671 | } | |
672 | } | |
673 | } | |
674 | ||
675 | local_irq_restore(flags); | |
676 | } | |
677 | ||
678 | /*********************************************************************** | |
679 | * Diagnostic Routines | |
680 | * -Called from Low Level TLB Hanlders if things don;t look good | |
681 | **********************************************************************/ | |
682 | ||
683 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | |
684 | ||
685 | /* | |
686 | * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS | |
687 | * don't match | |
688 | */ | |
689 | void print_asid_mismatch(int is_fast_path) | |
690 | { | |
691 | int pid_sw, pid_hw; | |
692 | pid_sw = current->active_mm->context.asid; | |
693 | pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; | |
694 | ||
695 | pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", | |
696 | is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw); | |
697 | ||
698 | __asm__ __volatile__("flag 1"); | |
699 | } | |
700 | ||
701 | void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr) | |
702 | { | |
703 | unsigned int pid_hw; | |
704 | ||
705 | pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; | |
706 | ||
707 | if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID))) | |
708 | print_asid_mismatch(0); | |
709 | } | |
710 | #endif |