Merge tag 'modules-for-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu...
[linux-2.6-block.git] / arch / mips / mm / tlb-r4k.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
79add627 6 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
1da177e4
LT
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
10 */
eaa38d63 11#include <linux/cpu_pm.h>
1da177e4
LT
12#include <linux/init.h>
13#include <linux/sched.h>
631330f5 14#include <linux/smp.h>
1da177e4 15#include <linux/mm.h>
fd062c84 16#include <linux/hugetlb.h>
d9ba5778 17#include <linux/export.h>
1da177e4
LT
18
19#include <asm/cpu.h>
69f24d17 20#include <asm/cpu-type.h>
1da177e4 21#include <asm/bootinfo.h>
091bc3a4 22#include <asm/hazards.h>
1da177e4
LT
23#include <asm/mmu_context.h>
24#include <asm/pgtable.h>
c01905ee 25#include <asm/tlb.h>
3d18c983 26#include <asm/tlbmisc.h>
1da177e4
LT
27
28extern void build_tlb_refill_handler(void);
29
2a21c730 30/*
06e4814e
HC
31 * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
32 * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
33 * itlb/dtlb are not totally transparent to software.
2a21c730 34 */
06e4814e 35static inline void flush_micro_tlb(void)
14bd8c08
RB
36{
37 switch (current_cpu_type()) {
38 case CPU_LOONGSON2:
06e4814e
HC
39 write_c0_diag(LOONGSON_DIAG_ITLB);
40 break;
c579d310 41 case CPU_LOONGSON3:
06e4814e 42 write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
14bd8c08
RB
43 break;
44 default:
45 break;
46 }
47}
2a21c730 48
06e4814e 49static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
14bd8c08
RB
50{
51 if (vma->vm_flags & VM_EXEC)
06e4814e 52 flush_micro_tlb();
14bd8c08 53}
2a21c730 54
1da177e4
LT
55void local_flush_tlb_all(void)
56{
57 unsigned long flags;
58 unsigned long old_ctx;
75b5b5e0 59 int entry, ftlbhighset;
1da177e4 60
b633648c 61 local_irq_save(flags);
1da177e4
LT
62 /* Save old context and create impossible VPN2 value */
63 old_ctx = read_c0_entryhi();
f1014d1b 64 htw_stop();
1da177e4
LT
65 write_c0_entrylo0(0);
66 write_c0_entrylo1(0);
67
10313980 68 entry = num_wired_entries();
1da177e4 69
e710d666
MR
70 /*
71 * Blast 'em all away.
72 * If there are any wired entries, fall back to iterating
73 */
74 if (cpu_has_tlbinv && !entry) {
75b5b5e0
LY
75 if (current_cpu_data.tlbsizevtlb) {
76 write_c0_index(0);
77 mtc0_tlbw_hazard();
78 tlbinvf(); /* invalidate VTLB */
79 }
80 ftlbhighset = current_cpu_data.tlbsizevtlb +
81 current_cpu_data.tlbsizeftlbsets;
82 for (entry = current_cpu_data.tlbsizevtlb;
83 entry < ftlbhighset;
84 entry++) {
85 write_c0_index(entry);
86 mtc0_tlbw_hazard();
87 tlbinvf(); /* invalidate one FTLB set */
88 }
601cfa7b
LY
89 } else {
90 while (entry < current_cpu_data.tlbsize) {
91 /* Make sure all entries differ. */
92 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
93 write_c0_index(entry);
94 mtc0_tlbw_hazard();
95 tlb_write_indexed();
96 entry++;
97 }
1da177e4
LT
98 }
99 tlbw_use_hazard();
100 write_c0_entryhi(old_ctx);
f1014d1b 101 htw_start();
06e4814e 102 flush_micro_tlb();
b633648c 103 local_irq_restore(flags);
1da177e4 104}
f2e3656d 105EXPORT_SYMBOL(local_flush_tlb_all);
1da177e4 106
1da177e4
LT
107void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
108 unsigned long end)
109{
110 struct mm_struct *mm = vma->vm_mm;
111 int cpu = smp_processor_id();
112
113 if (cpu_context(cpu, mm) != 0) {
a5e696e5 114 unsigned long size, flags;
1da177e4 115
b633648c 116 local_irq_save(flags);
ac53c4fc
DD
117 start = round_down(start, PAGE_SIZE << 1);
118 end = round_up(end, PAGE_SIZE << 1);
119 size = (end - start) >> (PAGE_SHIFT + 1);
75b5b5e0
LY
120 if (size <= (current_cpu_data.tlbsizeftlbsets ?
121 current_cpu_data.tlbsize / 8 :
122 current_cpu_data.tlbsize / 2)) {
c8790d65 123 unsigned long old_entryhi, uninitialized_var(old_mmid);
1da177e4
LT
124 int newpid = cpu_asid(cpu, mm);
125
c8790d65
PB
126 old_entryhi = read_c0_entryhi();
127 if (cpu_has_mmid) {
128 old_mmid = read_c0_memorymapid();
129 write_c0_memorymapid(newpid);
130 }
131
f1014d1b 132 htw_stop();
1da177e4
LT
133 while (start < end) {
134 int idx;
135
c8790d65
PB
136 if (cpu_has_mmid)
137 write_c0_entryhi(start);
138 else
139 write_c0_entryhi(start | newpid);
ac53c4fc 140 start += (PAGE_SIZE << 1);
1da177e4
LT
141 mtc0_tlbw_hazard();
142 tlb_probe();
432bef2a 143 tlb_probe_hazard();
1da177e4
LT
144 idx = read_c0_index();
145 write_c0_entrylo0(0);
146 write_c0_entrylo1(0);
147 if (idx < 0)
148 continue;
149 /* Make sure all entries differ. */
172546bf 150 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
1da177e4
LT
151 mtc0_tlbw_hazard();
152 tlb_write_indexed();
153 }
154 tlbw_use_hazard();
c8790d65
PB
155 write_c0_entryhi(old_entryhi);
156 if (cpu_has_mmid)
157 write_c0_memorymapid(old_mmid);
f1014d1b 158 htw_start();
1da177e4 159 } else {
9a27324f 160 drop_mmu_context(mm);
1da177e4 161 }
06e4814e 162 flush_micro_tlb();
b633648c 163 local_irq_restore(flags);
1da177e4
LT
164 }
165}
166
167void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
168{
a5e696e5 169 unsigned long size, flags;
1da177e4 170
b633648c 171 local_irq_save(flags);
1da177e4
LT
172 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
173 size = (size + 1) >> 1;
75b5b5e0
LY
174 if (size <= (current_cpu_data.tlbsizeftlbsets ?
175 current_cpu_data.tlbsize / 8 :
176 current_cpu_data.tlbsize / 2)) {
1da177e4
LT
177 int pid = read_c0_entryhi();
178
179 start &= (PAGE_MASK << 1);
180 end += ((PAGE_SIZE << 1) - 1);
181 end &= (PAGE_MASK << 1);
f1014d1b 182 htw_stop();
1da177e4
LT
183
184 while (start < end) {
185 int idx;
186
187 write_c0_entryhi(start);
188 start += (PAGE_SIZE << 1);
189 mtc0_tlbw_hazard();
190 tlb_probe();
432bef2a 191 tlb_probe_hazard();
1da177e4
LT
192 idx = read_c0_index();
193 write_c0_entrylo0(0);
194 write_c0_entrylo1(0);
195 if (idx < 0)
196 continue;
197 /* Make sure all entries differ. */
172546bf 198 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
1da177e4
LT
199 mtc0_tlbw_hazard();
200 tlb_write_indexed();
201 }
202 tlbw_use_hazard();
203 write_c0_entryhi(pid);
f1014d1b 204 htw_start();
1da177e4
LT
205 } else {
206 local_flush_tlb_all();
207 }
06e4814e 208 flush_micro_tlb();
b633648c 209 local_irq_restore(flags);
1da177e4
LT
210}
211
212void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
213{
214 int cpu = smp_processor_id();
215
216 if (cpu_context(cpu, vma->vm_mm) != 0) {
c8790d65
PB
217 unsigned long uninitialized_var(old_mmid);
218 unsigned long flags, old_entryhi;
219 int idx;
1da177e4 220
1da177e4 221 page &= (PAGE_MASK << 1);
b633648c 222 local_irq_save(flags);
c8790d65 223 old_entryhi = read_c0_entryhi();
f1014d1b 224 htw_stop();
c8790d65
PB
225 if (cpu_has_mmid) {
226 old_mmid = read_c0_memorymapid();
227 write_c0_entryhi(page);
228 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm));
229 } else {
230 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm));
231 }
1da177e4
LT
232 mtc0_tlbw_hazard();
233 tlb_probe();
432bef2a 234 tlb_probe_hazard();
1da177e4
LT
235 idx = read_c0_index();
236 write_c0_entrylo0(0);
237 write_c0_entrylo1(0);
238 if (idx < 0)
239 goto finish;
240 /* Make sure all entries differ. */
172546bf 241 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
1da177e4
LT
242 mtc0_tlbw_hazard();
243 tlb_write_indexed();
244 tlbw_use_hazard();
245
246 finish:
c8790d65
PB
247 write_c0_entryhi(old_entryhi);
248 if (cpu_has_mmid)
249 write_c0_memorymapid(old_mmid);
f1014d1b 250 htw_start();
06e4814e 251 flush_micro_tlb_vm(vma);
b633648c 252 local_irq_restore(flags);
1da177e4
LT
253 }
254}
255
256/*
257 * This one is only used for pages with the global bit set so we don't care
258 * much about the ASID.
259 */
260void local_flush_tlb_one(unsigned long page)
261{
262 unsigned long flags;
263 int oldpid, idx;
264
b633648c 265 local_irq_save(flags);
1da177e4 266 oldpid = read_c0_entryhi();
f1014d1b 267 htw_stop();
172546bf 268 page &= (PAGE_MASK << 1);
1da177e4
LT
269 write_c0_entryhi(page);
270 mtc0_tlbw_hazard();
271 tlb_probe();
432bef2a 272 tlb_probe_hazard();
1da177e4
LT
273 idx = read_c0_index();
274 write_c0_entrylo0(0);
275 write_c0_entrylo1(0);
276 if (idx >= 0) {
277 /* Make sure all entries differ. */
172546bf 278 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
1da177e4
LT
279 mtc0_tlbw_hazard();
280 tlb_write_indexed();
281 tlbw_use_hazard();
282 }
283 write_c0_entryhi(oldpid);
f1014d1b 284 htw_start();
06e4814e 285 flush_micro_tlb();
b633648c 286 local_irq_restore(flags);
1da177e4
LT
287}
288
289/*
290 * We will need multiple versions of update_mmu_cache(), one that just
291 * updates the TLB with the new pte(s), and another which also checks
292 * for the R4k "end of page" hardware bug and does the needy.
293 */
294void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
295{
296 unsigned long flags;
297 pgd_t *pgdp;
c6e8b587 298 pud_t *pudp;
1da177e4
LT
299 pmd_t *pmdp;
300 pte_t *ptep;
301 int idx, pid;
302
303 /*
304 * Handle debugger faulting in for debugee.
305 */
306 if (current->active_mm != vma->vm_mm)
307 return;
308
b633648c 309 local_irq_save(flags);
172546bf 310
6a8dff6a 311 htw_stop();
1da177e4 312 address &= (PAGE_MASK << 1);
c8790d65
PB
313 if (cpu_has_mmid) {
314 write_c0_entryhi(address);
315 } else {
316 pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
317 write_c0_entryhi(address | pid);
318 }
1da177e4
LT
319 pgdp = pgd_offset(vma->vm_mm, address);
320 mtc0_tlbw_hazard();
321 tlb_probe();
432bef2a 322 tlb_probe_hazard();
c6e8b587
RB
323 pudp = pud_offset(pgdp, address);
324 pmdp = pmd_offset(pudp, address);
1da177e4 325 idx = read_c0_index();
aa1762f4 326#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
fd062c84
DD
327 /* this could be a huge page */
328 if (pmd_huge(*pmdp)) {
329 unsigned long lo;
330 write_c0_pagemask(PM_HUGE_MASK);
331 ptep = (pte_t *)pmdp;
6dd9344c 332 lo = pte_to_entrylo(pte_val(*ptep));
fd062c84
DD
333 write_c0_entrylo0(lo);
334 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
335
336 mtc0_tlbw_hazard();
337 if (idx < 0)
338 tlb_write_random();
339 else
340 tlb_write_indexed();
fb944c9b 341 tlbw_use_hazard();
fd062c84
DD
342 write_c0_pagemask(PM_DEFAULT_MASK);
343 } else
344#endif
345 {
346 ptep = pte_offset_map(pmdp, address);
1da177e4 347
34adb28d 348#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
c5b36783
SH
349#ifdef CONFIG_XPA
350 write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
4b6f99d3
JH
351 if (cpu_has_xpa)
352 writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
c5b36783
SH
353 ptep++;
354 write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
4b6f99d3
JH
355 if (cpu_has_xpa)
356 writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
c5b36783 357#else
fd062c84
DD
358 write_c0_entrylo0(ptep->pte_high);
359 ptep++;
360 write_c0_entrylo1(ptep->pte_high);
c5b36783 361#endif
1da177e4 362#else
6dd9344c
DD
363 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
364 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
1da177e4 365#endif
fd062c84
DD
366 mtc0_tlbw_hazard();
367 if (idx < 0)
368 tlb_write_random();
369 else
370 tlb_write_indexed();
371 }
1da177e4 372 tlbw_use_hazard();
6a8dff6a 373 htw_start();
06e4814e 374 flush_micro_tlb_vm(vma);
b633648c 375 local_irq_restore(flags);
1da177e4
LT
376}
377
694b8c35
ML
378void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
379 unsigned long entryhi, unsigned long pagemask)
1da177e4 380{
c5b36783
SH
381#ifdef CONFIG_XPA
382 panic("Broken for XPA kernels");
383#else
c8790d65 384 unsigned int uninitialized_var(old_mmid);
1da177e4
LT
385 unsigned long flags;
386 unsigned long wired;
387 unsigned long old_pagemask;
388 unsigned long old_ctx;
389
b633648c 390 local_irq_save(flags);
c8790d65
PB
391 if (cpu_has_mmid) {
392 old_mmid = read_c0_memorymapid();
393 write_c0_memorymapid(MMID_KERNEL_WIRED);
394 }
1da177e4
LT
395 /* Save old context and create impossible VPN2 value */
396 old_ctx = read_c0_entryhi();
f1014d1b 397 htw_stop();
1da177e4 398 old_pagemask = read_c0_pagemask();
10313980 399 wired = num_wired_entries();
1da177e4
LT
400 write_c0_wired(wired + 1);
401 write_c0_index(wired);
432bef2a 402 tlbw_use_hazard(); /* What is the hazard here? */
1da177e4
LT
403 write_c0_pagemask(pagemask);
404 write_c0_entryhi(entryhi);
405 write_c0_entrylo0(entrylo0);
406 write_c0_entrylo1(entrylo1);
407 mtc0_tlbw_hazard();
408 tlb_write_indexed();
409 tlbw_use_hazard();
410
411 write_c0_entryhi(old_ctx);
c8790d65
PB
412 if (cpu_has_mmid)
413 write_c0_memorymapid(old_mmid);
432bef2a 414 tlbw_use_hazard(); /* What is the hazard here? */
f1014d1b 415 htw_start();
1da177e4
LT
416 write_c0_pagemask(old_pagemask);
417 local_flush_tlb_all();
b633648c 418 local_irq_restore(flags);
c5b36783 419#endif
1da177e4
LT
420}
421
970d032f
RB
422#ifdef CONFIG_TRANSPARENT_HUGEPAGE
423
fd8cfd30 424int has_transparent_hugepage(void)
970d032f 425{
fd8cfd30 426 static unsigned int mask = -1;
970d032f 427
fd8cfd30
HD
428 if (mask == -1) { /* first call comes during __init */
429 unsigned long flags;
970d032f 430
fd8cfd30
HD
431 local_irq_save(flags);
432 write_c0_pagemask(PM_HUGE_MASK);
433 back_to_back_c0_hazard();
434 mask = read_c0_pagemask();
435 write_c0_pagemask(PM_DEFAULT_MASK);
436 local_irq_restore(flags);
437 }
970d032f
RB
438 return mask == PM_HUGE_MASK;
439}
440
441#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
442
d377732c
RM
443/*
444 * Used for loading TLB entries before trap_init() has started, when we
445 * don't actually want to add a wired entry which remains throughout the
446 * lifetime of the system
447 */
448
b1f7e112 449int temp_tlb_entry;
d377732c
RM
450
451__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
452 unsigned long entryhi, unsigned long pagemask)
453{
454 int ret = 0;
455 unsigned long flags;
456 unsigned long wired;
457 unsigned long old_pagemask;
458 unsigned long old_ctx;
459
460 local_irq_save(flags);
461 /* Save old context and create impossible VPN2 value */
6a8dff6a 462 htw_stop();
d377732c
RM
463 old_ctx = read_c0_entryhi();
464 old_pagemask = read_c0_pagemask();
10313980 465 wired = num_wired_entries();
d377732c
RM
466 if (--temp_tlb_entry < wired) {
467 printk(KERN_WARNING
468 "No TLB space left for add_temporary_entry\n");
469 ret = -ENOSPC;
470 goto out;
471 }
472
473 write_c0_index(temp_tlb_entry);
474 write_c0_pagemask(pagemask);
475 write_c0_entryhi(entryhi);
476 write_c0_entrylo0(entrylo0);
477 write_c0_entrylo1(entrylo1);
478 mtc0_tlbw_hazard();
479 tlb_write_indexed();
480 tlbw_use_hazard();
481
482 write_c0_entryhi(old_ctx);
483 write_c0_pagemask(old_pagemask);
6a8dff6a 484 htw_start();
d377732c
RM
485out:
486 local_irq_restore(flags);
487 return ret;
488}
489
078a55fc 490static int ntlb;
41c594ab
RB
491static int __init set_ntlb(char *str)
492{
493 get_option(&str, &ntlb);
494 return 1;
495}
496
497__setup("ntlb=", set_ntlb);
498
eaa38d63
JH
499/*
500 * Configure TLB (for init or after a CPU has been powered off).
501 */
502static void r4k_tlb_configure(void)
1da177e4 503{
1da177e4
LT
504 /*
505 * You should never change this register:
506 * - On R4600 1.7 the tlbp never hits for pages smaller than
507 * the value in the c0_pagemask register.
508 * - The entire mm handling assumes the c0_pagemask register to
a7c2996e 509 * be set to fixed-size pages.
1da177e4 510 */
1da177e4 511 write_c0_pagemask(PM_DEFAULT_MASK);
091bc3a4
PB
512 back_to_back_c0_hazard();
513 if (read_c0_pagemask() != PM_DEFAULT_MASK)
514 panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
515
1da177e4 516 write_c0_wired(0);
cde15b59
RB
517 if (current_cpu_type() == CPU_R10000 ||
518 current_cpu_type() == CPU_R12000 ||
30577391
JK
519 current_cpu_type() == CPU_R14000 ||
520 current_cpu_type() == CPU_R16000)
cde15b59 521 write_c0_framemask(0);
6dd9344c 522
05857c64 523 if (cpu_has_rixi) {
6dd9344c 524 /*
e05cb568 525 * Enable the no read, no exec bits, and enable large physical
6dd9344c
DD
526 * address.
527 */
6dd9344c 528#ifdef CONFIG_64BIT
a5770df0
SH
529 set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
530#else
531 set_c0_pagegrain(PG_RIE | PG_XIE);
6dd9344c 532#endif
6dd9344c
DD
533 }
534
d377732c
RM
535 temp_tlb_entry = current_cpu_data.tlbsize - 1;
536
70342287 537 /* From this point on the ARC firmware is dead. */
1da177e4
LT
538 local_flush_tlb_all();
539
c6281edb 540 /* Did I tell you that ARC SUCKS? */
eaa38d63
JH
541}
542
543void tlb_init(void)
544{
545 r4k_tlb_configure();
c6281edb 546
41c594ab
RB
547 if (ntlb) {
548 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
549 int wired = current_cpu_data.tlbsize - ntlb;
550 write_c0_wired(wired);
551 write_c0_index(wired-1);
49a89efb 552 printk("Restricting TLB to %d entries\n", ntlb);
41c594ab
RB
553 } else
554 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
555 }
556
1da177e4
LT
557 build_tlb_refill_handler();
558}
eaa38d63
JH
559
560static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd,
561 void *v)
562{
563 switch (cmd) {
564 case CPU_PM_ENTER_FAILED:
565 case CPU_PM_EXIT:
566 r4k_tlb_configure();
567 break;
568 }
569
570 return NOTIFY_OK;
571}
572
573static struct notifier_block r4k_tlb_pm_notifier_block = {
574 .notifier_call = r4k_tlb_pm_notifier,
575};
576
577static int __init r4k_tlb_init_pm(void)
578{
579 return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block);
580}
581arch_initcall(r4k_tlb_init_pm);