Commit | Line | Data |
---|---|---|
f1f3347d VG |
1 | /* |
2 | * TLB Management (flush/create/diagnostics) for ARC700 | |
3 | * | |
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
d79e678d VG |
9 | * |
10 | * vineetg: Aug 2011 | |
11 | * -Reintroduce duplicate PD fixup - some customer chips still have the issue | |
12 | * | |
13 | * vineetg: May 2011 | |
14 | * -No need to flush_cache_page( ) for each call to update_mmu_cache() | |
15 | * some of the LMBench tests improved amazingly | |
16 | * = page-fault thrice as fast (75 usec to 28 usec) | |
17 | * = mmap twice as fast (9.6 msec to 4.6 msec), | |
18 | * = fork (5.3 msec to 3.7 msec) | |
19 | * | |
20 | * vineetg: April 2011 : | |
21 | * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, | |
22 | * helps avoid a shift when preparing PD0 from PTE | |
23 | * | |
24 | * vineetg: April 2011 : Preparing for MMU V3 | |
25 | * -MMU v2/v3 BCRs decoded differently | |
26 | * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512 | |
27 | * -tlb_entry_erase( ) can be void | |
28 | * -local_flush_tlb_range( ): | |
29 | * = need not "ceil" @end | |
30 | * = walks MMU only if range spans < 32 entries, as opposed to 256 | |
31 | * | |
32 | * Vineetg: Sept 10th 2008 | |
33 | * -Changes related to MMU v2 (Rel 4.8) | |
34 | * | |
35 | * Vineetg: Aug 29th 2008 | |
36 | * -In TLB Flush operations (Metal Fix MMU) there is a explict command to | |
37 | * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd, | |
38 | * it fails. Thus need to load it with ANY valid value before invoking | |
39 | * TLBIVUTLB cmd | |
40 | * | |
41 | * Vineetg: Aug 21th 2008: | |
42 | * -Reduced the duration of IRQ lockouts in TLB Flush routines | |
43 | * -Multiple copies of TLB erase code seperated into a "single" function | |
44 | * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID | |
45 | * in interrupt-safe region. | |
46 | * | |
47 | * Vineetg: April 23rd Bug #93131 | |
48 | * Problem: tlb_flush_kernel_range() doesnt do anything if the range to | |
49 | * flush is more than the size of TLB itself. | |
50 | * | |
51 | * Rahul Trivedi : Codito Technologies 2004 | |
f1f3347d VG |
52 | */ |
53 | ||
54 | #include <linux/module.h> | |
483e9bcb | 55 | #include <linux/bug.h> |
f1f3347d | 56 | #include <asm/arcregs.h> |
d79e678d | 57 | #include <asm/setup.h> |
f1f3347d | 58 | #include <asm/mmu_context.h> |
da1677b0 | 59 | #include <asm/mmu.h> |
f1f3347d | 60 | |
d79e678d VG |
61 | /* Need for ARC MMU v2 |
62 | * | |
63 | * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc. | |
64 | * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages | |
65 | * map into same set, there would be contention for the 2 ways causing severe | |
66 | * Thrashing. | |
67 | * | |
68 | * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has | |
69 | * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways. | |
70 | * Given this, the thrasing problem should never happen because once the 3 | |
71 | * J-TLB entries are created (even though 3rd will knock out one of the prev | |
72 | * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy | |
73 | * | |
74 | * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs. | |
75 | * This is a simple design for keeping them in sync. So what do we do? | |
76 | * The solution which James came up was pretty neat. It utilised the assoc | |
77 | * of uTLBs by not invalidating always but only when absolutely necessary. | |
78 | * | |
79 | * - Existing TLB commands work as before | |
80 | * - New command (TLBWriteNI) for TLB write without clearing uTLBs | |
81 | * - New command (TLBIVUTLB) to invalidate uTLBs. | |
82 | * | |
83 | * The uTLBs need only be invalidated when pages are being removed from the | |
84 | * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB | |
85 | * as a result of a miss, the removed entry is still allowed to exist in the | |
86 | * uTLBs as it is still valid and present in the OS page table. This allows the | |
87 | * full associativity of the uTLBs to hide the limited associativity of the main | |
88 | * TLB. | |
89 | * | |
90 | * During a miss handler, the new "TLBWriteNI" command is used to load | |
91 | * entries without clearing the uTLBs. | |
92 | * | |
93 | * When the OS page table is updated, TLB entries that may be associated with a | |
94 | * removed page are removed (flushed) from the TLB using TLBWrite. In this | |
95 | * circumstance, the uTLBs must also be cleared. This is done by using the | |
96 | * existing TLBWrite command. An explicit IVUTLB is also required for those | |
97 | * corner cases when TLBWrite was not executed at all because the corresp | |
98 | * J-TLB entry got evicted/replaced. | |
99 | */ | |
100 | ||
da1677b0 | 101 | |
f1f3347d | 102 | /* A copy of the ASID from the PID reg is kept in asid_cache */ |
63eca94c | 103 | DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; |
cc562d2e | 104 | |
d79e678d VG |
105 | /* |
106 | * Utility Routine to erase a J-TLB entry | |
483e9bcb | 107 | * Caller needs to setup Index Reg (manually or via getIndex) |
d79e678d | 108 | */ |
483e9bcb | 109 | static inline void __tlb_entry_erase(void) |
d79e678d VG |
110 | { |
111 | write_aux_reg(ARC_REG_TLBPD1, 0); | |
112 | write_aux_reg(ARC_REG_TLBPD0, 0); | |
113 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
114 | } | |
115 | ||
d7a512bf VG |
116 | #if (CONFIG_ARC_MMU_VER < 4) |
117 | ||
483e9bcb | 118 | static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) |
d79e678d VG |
119 | { |
120 | unsigned int idx; | |
121 | ||
d79e678d | 122 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); |
483e9bcb | 123 | |
d79e678d VG |
124 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); |
125 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
126 | ||
483e9bcb VG |
127 | return idx; |
128 | } | |
129 | ||
130 | static void tlb_entry_erase(unsigned int vaddr_n_asid) | |
131 | { | |
132 | unsigned int idx; | |
133 | ||
134 | /* Locate the TLB entry for this vaddr + ASID */ | |
135 | idx = tlb_entry_lkup(vaddr_n_asid); | |
136 | ||
d79e678d VG |
137 | /* No error means entry found, zero it out */ |
138 | if (likely(!(idx & TLB_LKUP_ERR))) { | |
139 | __tlb_entry_erase(); | |
483e9bcb | 140 | } else { |
d79e678d | 141 | /* Duplicate entry error */ |
483e9bcb VG |
142 | WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n", |
143 | vaddr_n_asid); | |
d79e678d VG |
144 | } |
145 | } | |
146 | ||
147 | /**************************************************************************** | |
148 | * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs) | |
149 | * | |
150 | * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB | |
151 | * | |
152 | * utlb_invalidate ( ) | |
153 | * -For v2 MMU calls Flush uTLB Cmd | |
154 | * -For v1 MMU does nothing (except for Metal Fix v1 MMU) | |
155 | * This is because in v1 TLBWrite itself invalidate uTLBs | |
156 | ***************************************************************************/ | |
157 | ||
158 | static void utlb_invalidate(void) | |
159 | { | |
160 | #if (CONFIG_ARC_MMU_VER >= 2) | |
161 | ||
483e9bcb | 162 | #if (CONFIG_ARC_MMU_VER == 2) |
d79e678d VG |
163 | /* MMU v2 introduced the uTLB Flush command. |
164 | * There was however an obscure hardware bug, where uTLB flush would | |
165 | * fail when a prior probe for J-TLB (both totally unrelated) would | |
166 | * return lkup err - because the entry didnt exist in MMU. | |
167 | * The Workround was to set Index reg with some valid value, prior to | |
168 | * flush. This was fixed in MMU v3 hence not needed any more | |
169 | */ | |
170 | unsigned int idx; | |
171 | ||
172 | /* make sure INDEX Reg is valid */ | |
173 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
174 | ||
175 | /* If not write some dummy val */ | |
176 | if (unlikely(idx & TLB_LKUP_ERR)) | |
177 | write_aux_reg(ARC_REG_TLBINDEX, 0xa); | |
178 | #endif | |
179 | ||
180 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB); | |
181 | #endif | |
182 | ||
183 | } | |
184 | ||
483e9bcb VG |
185 | static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) |
186 | { | |
187 | unsigned int idx; | |
188 | ||
189 | /* | |
190 | * First verify if entry for this vaddr+ASID already exists | |
191 | * This also sets up PD0 (vaddr, ASID..) for final commit | |
192 | */ | |
193 | idx = tlb_entry_lkup(pd0); | |
194 | ||
195 | /* | |
196 | * If Not already present get a free slot from MMU. | |
197 | * Otherwise, Probe would have located the entry and set INDEX Reg | |
198 | * with existing location. This will cause Write CMD to over-write | |
199 | * existing entry with new PD0 and PD1 | |
200 | */ | |
201 | if (likely(idx & TLB_LKUP_ERR)) | |
202 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | |
203 | ||
204 | /* setup the other half of TLB entry (pfn, rwx..) */ | |
205 | write_aux_reg(ARC_REG_TLBPD1, pd1); | |
206 | ||
207 | /* | |
208 | * Commit the Entry to MMU | |
209 | * It doesnt sound safe to use the TLBWriteNI cmd here | |
210 | * which doesn't flush uTLBs. I'd rather be safe than sorry. | |
211 | */ | |
212 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
213 | } | |
214 | ||
d7a512bf VG |
215 | #else /* CONFIG_ARC_MMU_VER >= 4) */ |
216 | ||
217 | static void utlb_invalidate(void) | |
218 | { | |
219 | /* No need since uTLB is always in sync with JTLB */ | |
220 | } | |
221 | ||
222 | static void tlb_entry_erase(unsigned int vaddr_n_asid) | |
223 | { | |
224 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT); | |
225 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry); | |
226 | } | |
227 | ||
228 | static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) | |
229 | { | |
230 | write_aux_reg(ARC_REG_TLBPD0, pd0); | |
231 | write_aux_reg(ARC_REG_TLBPD1, pd1); | |
232 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry); | |
233 | } | |
234 | ||
235 | #endif | |
236 | ||
d79e678d VG |
237 | /* |
238 | * Un-conditionally (without lookup) erase the entire MMU contents | |
239 | */ | |
240 | ||
241 | noinline void local_flush_tlb_all(void) | |
242 | { | |
243 | unsigned long flags; | |
244 | unsigned int entry; | |
245 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
246 | ||
247 | local_irq_save(flags); | |
248 | ||
249 | /* Load PD0 and PD1 with template for a Blank Entry */ | |
250 | write_aux_reg(ARC_REG_TLBPD1, 0); | |
251 | write_aux_reg(ARC_REG_TLBPD0, 0); | |
252 | ||
253 | for (entry = 0; entry < mmu->num_tlb; entry++) { | |
254 | /* write this entry to the TLB */ | |
255 | write_aux_reg(ARC_REG_TLBINDEX, entry); | |
256 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
257 | } | |
258 | ||
fe6c1b86 VG |
259 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { |
260 | const int stlb_idx = 0x800; | |
261 | ||
262 | /* Blank sTLB entry */ | |
263 | write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ); | |
264 | ||
265 | for (entry = stlb_idx; entry < stlb_idx + 16; entry++) { | |
266 | write_aux_reg(ARC_REG_TLBINDEX, entry); | |
267 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
268 | } | |
269 | } | |
270 | ||
d79e678d VG |
271 | utlb_invalidate(); |
272 | ||
273 | local_irq_restore(flags); | |
274 | } | |
275 | ||
276 | /* | |
277 | * Flush the entrie MM for userland. The fastest way is to move to Next ASID | |
278 | */ | |
279 | noinline void local_flush_tlb_mm(struct mm_struct *mm) | |
280 | { | |
281 | /* | |
282 | * Small optimisation courtesy IA64 | |
283 | * flush_mm called during fork,exit,munmap etc, multiple times as well. | |
284 | * Only for fork( ) do we need to move parent to a new MMU ctxt, | |
285 | * all other cases are NOPs, hence this check. | |
286 | */ | |
287 | if (atomic_read(&mm->mm_users) == 0) | |
288 | return; | |
289 | ||
290 | /* | |
3daa48d1 VG |
291 | * - Move to a new ASID, but only if the mm is still wired in |
292 | * (Android Binder ended up calling this for vma->mm != tsk->mm, | |
293 | * causing h/w - s/w ASID to get out of sync) | |
294 | * - Also get_new_mmu_context() new implementation allocates a new | |
295 | * ASID only if it is not allocated already - so unallocate first | |
d79e678d | 296 | */ |
3daa48d1 VG |
297 | destroy_context(mm); |
298 | if (current->mm == mm) | |
d79e678d VG |
299 | get_new_mmu_context(mm); |
300 | } | |
301 | ||
302 | /* | |
303 | * Flush a Range of TLB entries for userland. | |
304 | * @start is inclusive, while @end is exclusive | |
305 | * Difference between this and Kernel Range Flush is | |
306 | * -Here the fastest way (if range is too large) is to move to next ASID | |
307 | * without doing any explicit Shootdown | |
308 | * -In case of kernel Flush, entry has to be shot down explictly | |
309 | */ | |
310 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
311 | unsigned long end) | |
312 | { | |
63eca94c | 313 | const unsigned int cpu = smp_processor_id(); |
d79e678d | 314 | unsigned long flags; |
d79e678d VG |
315 | |
316 | /* If range @start to @end is more than 32 TLB entries deep, | |
317 | * its better to move to a new ASID rather than searching for | |
318 | * individual entries and then shooting them down | |
319 | * | |
320 | * The calc above is rough, doesn't account for unaligned parts, | |
321 | * since this is heuristics based anyways | |
322 | */ | |
323 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { | |
324 | local_flush_tlb_mm(vma->vm_mm); | |
325 | return; | |
326 | } | |
327 | ||
328 | /* | |
329 | * @start moved to page start: this alone suffices for checking | |
330 | * loop end condition below, w/o need for aligning @end to end | |
331 | * e.g. 2000 to 4001 will anyhow loop twice | |
332 | */ | |
333 | start &= PAGE_MASK; | |
334 | ||
335 | local_irq_save(flags); | |
d79e678d | 336 | |
63eca94c | 337 | if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { |
d79e678d | 338 | while (start < end) { |
63eca94c | 339 | tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); |
d79e678d VG |
340 | start += PAGE_SIZE; |
341 | } | |
342 | } | |
343 | ||
344 | utlb_invalidate(); | |
345 | ||
346 | local_irq_restore(flags); | |
347 | } | |
348 | ||
349 | /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective) | |
350 | * @start, @end interpreted as kvaddr | |
351 | * Interestingly, shared TLB entries can also be flushed using just | |
352 | * @start,@end alone (interpreted as user vaddr), although technically SASID | |
353 | * is also needed. However our smart TLbProbe lookup takes care of that. | |
354 | */ | |
355 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
356 | { | |
357 | unsigned long flags; | |
358 | ||
359 | /* exactly same as above, except for TLB entry not taking ASID */ | |
360 | ||
361 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { | |
362 | local_flush_tlb_all(); | |
363 | return; | |
364 | } | |
365 | ||
366 | start &= PAGE_MASK; | |
367 | ||
368 | local_irq_save(flags); | |
369 | while (start < end) { | |
370 | tlb_entry_erase(start); | |
371 | start += PAGE_SIZE; | |
372 | } | |
373 | ||
374 | utlb_invalidate(); | |
375 | ||
376 | local_irq_restore(flags); | |
377 | } | |
378 | ||
379 | /* | |
380 | * Delete TLB entry in MMU for a given page (??? address) | |
381 | * NOTE One TLB entry contains translation for single PAGE | |
382 | */ | |
383 | ||
384 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
385 | { | |
63eca94c | 386 | const unsigned int cpu = smp_processor_id(); |
d79e678d VG |
387 | unsigned long flags; |
388 | ||
389 | /* Note that it is critical that interrupts are DISABLED between | |
390 | * checking the ASID and using it flush the TLB entry | |
391 | */ | |
392 | local_irq_save(flags); | |
393 | ||
63eca94c VG |
394 | if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { |
395 | tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); | |
d79e678d VG |
396 | utlb_invalidate(); |
397 | } | |
398 | ||
399 | local_irq_restore(flags); | |
400 | } | |
cc562d2e | 401 | |
5ea72a90 VG |
402 | #ifdef CONFIG_SMP |
403 | ||
404 | struct tlb_args { | |
405 | struct vm_area_struct *ta_vma; | |
406 | unsigned long ta_start; | |
407 | unsigned long ta_end; | |
408 | }; | |
409 | ||
410 | static inline void ipi_flush_tlb_page(void *arg) | |
411 | { | |
412 | struct tlb_args *ta = arg; | |
413 | ||
414 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); | |
415 | } | |
416 | ||
417 | static inline void ipi_flush_tlb_range(void *arg) | |
418 | { | |
419 | struct tlb_args *ta = arg; | |
420 | ||
421 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | |
422 | } | |
423 | ||
c7119d56 VG |
424 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
425 | static inline void ipi_flush_pmd_tlb_range(void *arg) | |
426 | { | |
427 | struct tlb_args *ta = arg; | |
428 | ||
429 | local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | |
430 | } | |
431 | #endif | |
432 | ||
5ea72a90 VG |
433 | static inline void ipi_flush_tlb_kernel_range(void *arg) |
434 | { | |
435 | struct tlb_args *ta = (struct tlb_args *)arg; | |
436 | ||
437 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); | |
438 | } | |
439 | ||
440 | void flush_tlb_all(void) | |
441 | { | |
442 | on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1); | |
443 | } | |
444 | ||
445 | void flush_tlb_mm(struct mm_struct *mm) | |
446 | { | |
447 | on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, | |
448 | mm, 1); | |
449 | } | |
450 | ||
451 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |
452 | { | |
453 | struct tlb_args ta = { | |
454 | .ta_vma = vma, | |
455 | .ta_start = uaddr | |
456 | }; | |
457 | ||
458 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); | |
459 | } | |
460 | ||
461 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
462 | unsigned long end) | |
463 | { | |
464 | struct tlb_args ta = { | |
465 | .ta_vma = vma, | |
466 | .ta_start = start, | |
467 | .ta_end = end | |
468 | }; | |
469 | ||
470 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); | |
471 | } | |
472 | ||
c7119d56 VG |
473 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
474 | void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
475 | unsigned long end) | |
476 | { | |
477 | struct tlb_args ta = { | |
478 | .ta_vma = vma, | |
479 | .ta_start = start, | |
480 | .ta_end = end | |
481 | }; | |
482 | ||
483 | on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); | |
484 | } | |
485 | #endif | |
486 | ||
5ea72a90 VG |
487 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
488 | { | |
489 | struct tlb_args ta = { | |
490 | .ta_start = start, | |
491 | .ta_end = end | |
492 | }; | |
493 | ||
494 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | |
495 | } | |
496 | #endif | |
497 | ||
cc562d2e VG |
498 | /* |
499 | * Routine to create a TLB entry | |
500 | */ | |
501 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |
502 | { | |
503 | unsigned long flags; | |
483e9bcb VG |
504 | unsigned int asid_or_sasid, rwx; |
505 | unsigned long pd0, pd1; | |
cc562d2e VG |
506 | |
507 | /* | |
508 | * create_tlb() assumes that current->mm == vma->mm, since | |
509 | * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) | |
510 | * -completes the lazy write to SASID reg (again valid for curr tsk) | |
511 | * | |
512 | * Removing the assumption involves | |
513 | * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. | |
514 | * -Fix the TLB paranoid debug code to not trigger false negatives. | |
515 | * -More importantly it makes this handler inconsistent with fast-path | |
516 | * TLB Refill handler which always deals with "current" | |
517 | * | |
518 | * Lets see the use cases when current->mm != vma->mm and we land here | |
519 | * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault | |
520 | * Here VM wants to pre-install a TLB entry for user stack while | |
521 | * current->mm still points to pre-execve mm (hence the condition). | |
522 | * However the stack vaddr is soon relocated (randomization) and | |
523 | * move_page_tables() tries to undo that TLB entry. | |
524 | * Thus not creating TLB entry is not any worse. | |
525 | * | |
526 | * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a | |
527 | * breakpoint in debugged task. Not creating a TLB now is not | |
528 | * performance critical. | |
529 | * | |
530 | * Both the cases above are not good enough for code churn. | |
531 | */ | |
532 | if (current->active_mm != vma->vm_mm) | |
533 | return; | |
534 | ||
535 | local_irq_save(flags); | |
536 | ||
63eca94c | 537 | tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); |
cc562d2e VG |
538 | |
539 | address &= PAGE_MASK; | |
540 | ||
541 | /* update this PTE credentials */ | |
542 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); | |
543 | ||
d091fcb9 | 544 | /* Create HW TLB(PD0,PD1) from PTE */ |
cc562d2e VG |
545 | |
546 | /* ASID for this task */ | |
547 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; | |
548 | ||
483e9bcb | 549 | pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); |
cc562d2e | 550 | |
64b703ef VG |
551 | /* |
552 | * ARC MMU provides fully orthogonal access bits for K/U mode, | |
553 | * however Linux only saves 1 set to save PTE real-estate | |
554 | * Here we convert 3 PTE bits into 6 MMU bits: | |
555 | * -Kernel only entries have Kr Kw Kx 0 0 0 | |
556 | * -User entries have mirrored K and U bits | |
557 | */ | |
558 | rwx = pte_val(*ptep) & PTE_BITS_RWX; | |
559 | ||
560 | if (pte_val(*ptep) & _PAGE_GLOBAL) | |
561 | rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */ | |
562 | else | |
563 | rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ | |
564 | ||
483e9bcb | 565 | pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1); |
cc562d2e | 566 | |
483e9bcb | 567 | tlb_entry_insert(pd0, pd1); |
cc562d2e VG |
568 | |
569 | local_irq_restore(flags); | |
570 | } | |
571 | ||
eacd0e95 VG |
572 | /* |
573 | * Called at the end of pagefault, for a userspace mapped page | |
574 | * -pre-install the corresponding TLB entry into MMU | |
4102b533 VG |
575 | * -Finalize the delayed D-cache flush of kernel mapping of page due to |
576 | * flush_dcache_page(), copy_user_page() | |
577 | * | |
578 | * Note that flush (when done) involves both WBACK - so physical page is | |
579 | * in sync as well as INV - so any non-congruent aliases don't remain | |
cc562d2e | 580 | */ |
24603fdd | 581 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, |
cc562d2e VG |
582 | pte_t *ptep) |
583 | { | |
24603fdd | 584 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; |
4102b533 | 585 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; |
29b93c68 | 586 | struct page *page = pfn_to_page(pte_pfn(*ptep)); |
24603fdd VG |
587 | |
588 | create_tlb(vma, vaddr, ptep); | |
cc562d2e | 589 | |
29b93c68 VG |
590 | if (page == ZERO_PAGE(0)) { |
591 | return; | |
592 | } | |
593 | ||
4102b533 VG |
594 | /* |
595 | * Exec page : Independent of aliasing/page-color considerations, | |
596 | * since icache doesn't snoop dcache on ARC, any dirty | |
597 | * K-mapping of a code page needs to be wback+inv so that | |
598 | * icache fetch by userspace sees code correctly. | |
599 | * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it | |
600 | * so userspace sees the right data. | |
601 | * (Avoids the flush for Non-exec + congruent mapping case) | |
602 | */ | |
3e87974d VG |
603 | if ((vma->vm_flags & VM_EXEC) || |
604 | addr_not_cache_congruent(paddr, vaddr)) { | |
eacd0e95 | 605 | |
2ed21dae | 606 | int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); |
eacd0e95 | 607 | if (dirty) { |
4102b533 | 608 | /* wback + inv dcache lines */ |
6ec18a81 | 609 | __flush_dcache_page(paddr, paddr); |
4102b533 VG |
610 | |
611 | /* invalidate any existing icache lines */ | |
612 | if (vma->vm_flags & VM_EXEC) | |
613 | __inv_icache_page(paddr, vaddr); | |
eacd0e95 | 614 | } |
24603fdd | 615 | } |
cc562d2e VG |
616 | } |
617 | ||
fe6c1b86 VG |
618 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
619 | ||
620 | /* | |
621 | * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP | |
622 | * support. | |
623 | * | |
624 | * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a | |
625 | * new bit "SZ" in TLB page desciptor to distinguish between them. | |
626 | * Super Page size is configurable in hardware (4K to 16M), but fixed once | |
627 | * RTL builds. | |
628 | * | |
629 | * The exact THP size a Linx configuration will support is a function of: | |
630 | * - MMU page size (typical 8K, RTL fixed) | |
631 | * - software page walker address split between PGD:PTE:PFN (typical | |
632 | * 11:8:13, but can be changed with 1 line) | |
633 | * So for above default, THP size supported is 8K * (2^8) = 2M | |
634 | * | |
635 | * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime | |
636 | * reduces to 1 level (as PTE is folded into PGD and canonically referred | |
637 | * to as PMD). | |
638 | * Thus THP PMD accessors are implemented in terms of PTE (just like sparc) | |
639 | */ | |
640 | ||
641 | void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, | |
642 | pmd_t *pmd) | |
643 | { | |
644 | pte_t pte = __pte(pmd_val(*pmd)); | |
645 | update_mmu_cache(vma, addr, &pte); | |
646 | } | |
647 | ||
648 | void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, | |
649 | pgtable_t pgtable) | |
650 | { | |
651 | struct list_head *lh = (struct list_head *) pgtable; | |
652 | ||
653 | assert_spin_locked(&mm->page_table_lock); | |
654 | ||
655 | /* FIFO */ | |
656 | if (!pmd_huge_pte(mm, pmdp)) | |
657 | INIT_LIST_HEAD(lh); | |
658 | else | |
659 | list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp)); | |
660 | pmd_huge_pte(mm, pmdp) = pgtable; | |
661 | } | |
662 | ||
663 | pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) | |
664 | { | |
665 | struct list_head *lh; | |
666 | pgtable_t pgtable; | |
667 | ||
668 | assert_spin_locked(&mm->page_table_lock); | |
669 | ||
670 | pgtable = pmd_huge_pte(mm, pmdp); | |
671 | lh = (struct list_head *) pgtable; | |
672 | if (list_empty(lh)) | |
673 | pmd_huge_pte(mm, pmdp) = NULL; | |
674 | else { | |
675 | pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next; | |
676 | list_del(lh); | |
677 | } | |
678 | ||
679 | pte_val(pgtable[0]) = 0; | |
680 | pte_val(pgtable[1]) = 0; | |
681 | ||
682 | return pgtable; | |
683 | } | |
684 | ||
c7119d56 VG |
685 | void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, |
686 | unsigned long end) | |
722fe8fd VG |
687 | { |
688 | unsigned int cpu; | |
689 | unsigned long flags; | |
690 | ||
691 | local_irq_save(flags); | |
692 | ||
693 | cpu = smp_processor_id(); | |
694 | ||
695 | if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { | |
696 | unsigned int asid = hw_pid(vma->vm_mm, cpu); | |
697 | ||
698 | /* No need to loop here: this will always be for 1 Huge Page */ | |
699 | tlb_entry_erase(start | _PAGE_HW_SZ | asid); | |
700 | } | |
701 | ||
702 | local_irq_restore(flags); | |
703 | } | |
704 | ||
fe6c1b86 VG |
705 | #endif |
706 | ||
cc562d2e VG |
707 | /* Read the Cache Build Confuration Registers, Decode them and save into |
708 | * the cpuinfo structure for later use. | |
709 | * No Validation is done here, simply read/convert the BCRs | |
710 | */ | |
ce759956 | 711 | void read_decode_mmu_bcr(void) |
cc562d2e | 712 | { |
cc562d2e | 713 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; |
da1677b0 VG |
714 | unsigned int tmp; |
715 | struct bcr_mmu_1_2 { | |
716 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
717 | unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8; | |
718 | #else | |
719 | unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8; | |
720 | #endif | |
721 | } *mmu2; | |
722 | ||
723 | struct bcr_mmu_3 { | |
724 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
725 | unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4, | |
726 | u_itlb:4, u_dtlb:4; | |
727 | #else | |
728 | unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4, | |
729 | ways:4, ver:8; | |
730 | #endif | |
731 | } *mmu3; | |
cc562d2e | 732 | |
d7a512bf VG |
733 | struct bcr_mmu_4 { |
734 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
735 | unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1, | |
736 | n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3; | |
737 | #else | |
738 | /* DTLB ITLB JES JE JA */ | |
739 | unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2, | |
740 | pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8; | |
741 | #endif | |
742 | } *mmu4; | |
743 | ||
cc562d2e VG |
744 | tmp = read_aux_reg(ARC_REG_MMU_BCR); |
745 | mmu->ver = (tmp >> 24); | |
746 | ||
747 | if (mmu->ver <= 2) { | |
748 | mmu2 = (struct bcr_mmu_1_2 *)&tmp; | |
40b552d9 | 749 | mmu->pg_sz_k = TO_KB(PAGE_SIZE); |
cc562d2e VG |
750 | mmu->sets = 1 << mmu2->sets; |
751 | mmu->ways = 1 << mmu2->ways; | |
752 | mmu->u_dtlb = mmu2->u_dtlb; | |
753 | mmu->u_itlb = mmu2->u_itlb; | |
d7a512bf | 754 | } else if (mmu->ver == 3) { |
cc562d2e | 755 | mmu3 = (struct bcr_mmu_3 *)&tmp; |
40b552d9 | 756 | mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1); |
cc562d2e VG |
757 | mmu->sets = 1 << mmu3->sets; |
758 | mmu->ways = 1 << mmu3->ways; | |
759 | mmu->u_dtlb = mmu3->u_dtlb; | |
760 | mmu->u_itlb = mmu3->u_itlb; | |
d7a512bf VG |
761 | } else { |
762 | mmu4 = (struct bcr_mmu_4 *)&tmp; | |
763 | mmu->pg_sz_k = 1 << (mmu4->sz0 - 1); | |
764 | mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11); | |
765 | mmu->sets = 64 << mmu4->n_entry; | |
766 | mmu->ways = mmu4->n_ways * 2; | |
767 | mmu->u_dtlb = mmu4->u_dtlb * 4; | |
768 | mmu->u_itlb = mmu4->u_itlb * 4; | |
cc562d2e VG |
769 | } |
770 | ||
771 | mmu->num_tlb = mmu->sets * mmu->ways; | |
772 | } | |
773 | ||
af617428 VG |
774 | char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) |
775 | { | |
776 | int n = 0; | |
e3edeb67 | 777 | struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; |
d7a512bf VG |
778 | char super_pg[64] = ""; |
779 | ||
780 | if (p_mmu->s_pg_sz_m) | |
781 | scnprintf(super_pg, 64, "%dM Super Page%s, ", | |
6ce18798 VG |
782 | p_mmu->s_pg_sz_m, |
783 | IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? "" : " (not used)"); | |
af617428 | 784 | |
af617428 | 785 | n += scnprintf(buf + n, len - n, |
d7a512bf VG |
786 | "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n", |
787 | p_mmu->ver, p_mmu->pg_sz_k, super_pg, | |
af617428 VG |
788 | p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, |
789 | p_mmu->u_dtlb, p_mmu->u_itlb, | |
56372082 | 790 | IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : ""); |
af617428 VG |
791 | |
792 | return buf; | |
793 | } | |
794 | ||
ce759956 | 795 | void arc_mmu_init(void) |
cc562d2e | 796 | { |
af617428 VG |
797 | char str[256]; |
798 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
799 | ||
800 | printk(arc_mmu_mumbojumbo(0, str, sizeof(str))); | |
801 | ||
802 | /* For efficiency sake, kernel is compile time built for a MMU ver | |
803 | * This must match the hardware it is running on. | |
804 | * Linux built for MMU V2, if run on MMU V1 will break down because V1 | |
805 | * hardware doesn't understand cmds such as WriteNI, or IVUTLB | |
806 | * On the other hand, Linux built for V1 if run on MMU V2 will do | |
807 | * un-needed workarounds to prevent memcpy thrashing. | |
808 | * Similarly MMU V3 has new features which won't work on older MMU | |
809 | */ | |
810 | if (mmu->ver != CONFIG_ARC_MMU_VER) { | |
811 | panic("MMU ver %d doesn't match kernel built for %d...\n", | |
812 | mmu->ver, CONFIG_ARC_MMU_VER); | |
813 | } | |
814 | ||
40b552d9 | 815 | if (mmu->pg_sz_k != TO_KB(PAGE_SIZE)) |
af617428 VG |
816 | panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); |
817 | ||
6ce18798 VG |
818 | if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && |
819 | mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE)) | |
820 | panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n", | |
821 | (unsigned long)TO_MB(HPAGE_PMD_SIZE)); | |
822 | ||
cc562d2e VG |
823 | /* Enable the MMU */ |
824 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); | |
41195d23 VG |
825 | |
826 | /* In smp we use this reg for interrupt 1 scratch */ | |
827 | #ifndef CONFIG_SMP | |
828 | /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ | |
829 | write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); | |
830 | #endif | |
cc562d2e VG |
831 | } |
832 | ||
833 | /* | |
834 | * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4} | |
835 | * The mapping is Column-first. | |
836 | * --------------------- ----------- | |
837 | * |way0|way1|way2|way3| |way0|way1| | |
838 | * --------------------- ----------- | |
839 | * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 | | |
840 | * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 | | |
841 | * ~ ~ ~ ~ | |
842 | * [set127] | 508| 509| 510| 511| | 254| 255| | |
843 | * --------------------- ----------- | |
844 | * For normal operations we don't(must not) care how above works since | |
845 | * MMU cmd getIndex(vaddr) abstracts that out. | |
846 | * However for walking WAYS of a SET, we need to know this | |
847 | */ | |
848 | #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) | |
849 | ||
850 | /* Handling of Duplicate PD (TLB entry) in MMU. | |
851 | * -Could be due to buggy customer tapeouts or obscure kernel bugs | |
852 | * -MMU complaints not at the time of duplicate PD installation, but at the | |
853 | * time of lookup matching multiple ways. | |
854 | * -Ideally these should never happen - but if they do - workaround by deleting | |
855 | * the duplicate one. | |
856 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) | |
857 | */ | |
858 | volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */ | |
859 | ||
860 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, | |
861 | struct pt_regs *regs) | |
862 | { | |
863 | int set, way, n; | |
cc562d2e VG |
864 | unsigned long flags, is_valid; |
865 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
0a4c40a3 | 866 | unsigned int pd0[mmu->ways], pd1[mmu->ways]; |
cc562d2e VG |
867 | |
868 | local_irq_save(flags); | |
869 | ||
870 | /* re-enable the MMU */ | |
871 | write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID)); | |
872 | ||
873 | /* loop thru all sets of TLB */ | |
874 | for (set = 0; set < mmu->sets; set++) { | |
875 | ||
876 | /* read out all the ways of current set */ | |
877 | for (way = 0, is_valid = 0; way < mmu->ways; way++) { | |
878 | write_aux_reg(ARC_REG_TLBINDEX, | |
879 | SET_WAY_TO_IDX(mmu, set, way)); | |
880 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); | |
881 | pd0[way] = read_aux_reg(ARC_REG_TLBPD0); | |
882 | pd1[way] = read_aux_reg(ARC_REG_TLBPD1); | |
883 | is_valid |= pd0[way] & _PAGE_PRESENT; | |
884 | } | |
885 | ||
886 | /* If all the WAYS in SET are empty, skip to next SET */ | |
887 | if (!is_valid) | |
888 | continue; | |
889 | ||
890 | /* Scan the set for duplicate ways: needs a nested loop */ | |
0a4c40a3 | 891 | for (way = 0; way < mmu->ways - 1; way++) { |
cc562d2e VG |
892 | if (!pd0[way]) |
893 | continue; | |
894 | ||
895 | for (n = way + 1; n < mmu->ways; n++) { | |
896 | if ((pd0[way] & PAGE_MASK) == | |
897 | (pd0[n] & PAGE_MASK)) { | |
898 | ||
899 | if (dup_pd_verbose) { | |
900 | pr_info("Duplicate PD's @" | |
901 | "[%d:%d]/[%d:%d]\n", | |
902 | set, way, set, n); | |
903 | pr_info("TLBPD0[%u]: %08x\n", | |
904 | way, pd0[way]); | |
905 | } | |
906 | ||
907 | /* | |
908 | * clear entry @way and not @n. This is | |
909 | * critical to our optimised loop | |
910 | */ | |
911 | pd0[way] = pd1[way] = 0; | |
912 | write_aux_reg(ARC_REG_TLBINDEX, | |
913 | SET_WAY_TO_IDX(mmu, set, way)); | |
914 | __tlb_entry_erase(); | |
915 | } | |
916 | } | |
917 | } | |
918 | } | |
919 | ||
920 | local_irq_restore(flags); | |
921 | } | |
922 | ||
923 | /*********************************************************************** | |
924 | * Diagnostic Routines | |
925 | * -Called from Low Level TLB Hanlders if things don;t look good | |
926 | **********************************************************************/ | |
927 | ||
928 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | |
929 | ||
930 | /* | |
931 | * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS | |
932 | * don't match | |
933 | */ | |
5bd87adf | 934 | void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path) |
cc562d2e | 935 | { |
cc562d2e | 936 | pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", |
5bd87adf | 937 | is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid); |
cc562d2e VG |
938 | |
939 | __asm__ __volatile__("flag 1"); | |
940 | } | |
941 | ||
5bd87adf | 942 | void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr) |
cc562d2e | 943 | { |
5bd87adf | 944 | unsigned int mmu_asid; |
cc562d2e | 945 | |
5bd87adf | 946 | mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff; |
cc562d2e | 947 | |
5bd87adf VG |
948 | /* |
949 | * At the time of a TLB miss/installation | |
950 | * - HW version needs to match SW version | |
951 | * - SW needs to have a valid ASID | |
952 | */ | |
953 | if (addr < 0x70000000 && | |
947bf103 VG |
954 | ((mm_asid == MM_CTXT_NO_ASID) || |
955 | (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK)))) | |
5bd87adf | 956 | print_asid_mismatch(mm_asid, mmu_asid, 0); |
cc562d2e VG |
957 | } |
958 | #endif |