sparc64: Add global PMU register dumping via sysrq.
[linux-2.6-block.git] / arch / sparc / mm / ultra.S
CommitLineData
b00dc837 1/*
1da177e4
LT
2 * ultra.S: Don't expand these all over the place...
3 *
93dae5b7 4 * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
1da177e4
LT
5 */
6
1da177e4
LT
7#include <asm/asi.h>
8#include <asm/pgtable.h>
9#include <asm/page.h>
10#include <asm/spitfire.h>
11#include <asm/mmu_context.h>
2ef27778 12#include <asm/mmu.h>
1da177e4
LT
13#include <asm/pil.h>
14#include <asm/head.h>
15#include <asm/thread_info.h>
16#include <asm/cacheflush.h>
52bf082f 17#include <asm/hypervisor.h>
93dae5b7 18#include <asm/cpudata.h>
1da177e4
LT
19
20 /* Basically, most of the Spitfire vs. Cheetah madness
21 * has to do with the fact that Cheetah does not support
22 * IMMU flushes out of the secondary context. Someone needs
23 * to throw a south lake birthday party for the folks
24 * in Microelectronics who refused to fix this shit.
25 */
26
27 /* This file is meant to be read efficiently by the CPU, not humans.
28 * Staraj sie tego nikomu nie pierdolnac...
29 */
30 .text
31 .align 32
32 .globl __flush_tlb_mm
52bf082f
DM
33__flush_tlb_mm: /* 18 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
1da177e4
LT
35 ldxa [%o1] ASI_DMMU, %g2
36 cmp %g2, %o0
37 bne,pn %icc, __spitfire_flush_tlb_mm_slow
38 mov 0x50, %g3
39 stxa %g0, [%g3] ASI_DMMU_DEMAP
40 stxa %g0, [%g3] ASI_IMMU_DEMAP
4da808c3
DM
41 sethi %hi(KERNBASE), %g3
42 flush %g3
1da177e4 43 retl
4da808c3 44 nop
1da177e4
LT
45 nop
46 nop
47 nop
48 nop
49 nop
50 nop
51 nop
2ef27778
DM
52 nop
53 nop
1da177e4
LT
54
55 .align 32
56 .globl __flush_tlb_pending
52bf082f 57__flush_tlb_pending: /* 26 insns */
1da177e4
LT
58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
59 rdpr %pstate, %g7
60 sllx %o1, 3, %o1
61 andn %g7, PSTATE_IE, %g2
62 wrpr %g2, %pstate
63 mov SECONDARY_CONTEXT, %o4
64 ldxa [%o4] ASI_DMMU, %g2
65 stxa %o0, [%o4] ASI_DMMU
661: sub %o1, (1 << 3), %o1
67 ldx [%o2 + %o1], %o3
68 andcc %o3, 1, %g0
69 andn %o3, 1, %o3
70 be,pn %icc, 2f
71 or %o3, 0x10, %o3
72 stxa %g0, [%o3] ASI_IMMU_DEMAP
732: stxa %g0, [%o3] ASI_DMMU_DEMAP
74 membar #Sync
75 brnz,pt %o1, 1b
76 nop
77 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
78 sethi %hi(KERNBASE), %o4
79 flush %o4
1da177e4
LT
80 retl
81 wrpr %g7, 0x0, %pstate
fef43da4 82 nop
2ef27778
DM
83 nop
84 nop
85 nop
1da177e4
LT
86
87 .align 32
88 .globl __flush_tlb_kernel_range
1daef08a 89__flush_tlb_kernel_range: /* 16 insns */
52bf082f 90 /* %o0=start, %o1=end */
1da177e4
LT
91 cmp %o0, %o1
92 be,pn %xcc, 2f
93 sethi %hi(PAGE_SIZE), %o4
94 sub %o1, %o0, %o3
95 sub %o3, %o4, %o3
96 or %o0, 0x20, %o0 ! Nucleus
971: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
98 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
99 membar #Sync
100 brnz,pt %o3, 1b
101 sub %o3, %o4, %o3
4da808c3
DM
1022: sethi %hi(KERNBASE), %o3
103 flush %o3
104 retl
105 nop
52bf082f 106 nop
1da177e4
LT
107
108__spitfire_flush_tlb_mm_slow:
109 rdpr %pstate, %g1
110 wrpr %g1, PSTATE_IE, %pstate
111 stxa %o0, [%o1] ASI_DMMU
112 stxa %g0, [%g3] ASI_DMMU_DEMAP
113 stxa %g0, [%g3] ASI_IMMU_DEMAP
114 flush %g6
115 stxa %g2, [%o1] ASI_DMMU
4da808c3
DM
116 sethi %hi(KERNBASE), %o1
117 flush %o1
1da177e4
LT
118 retl
119 wrpr %g1, 0, %pstate
120
121/*
122 * The following code flushes one page_size worth.
123 */
83005161 124 .section .kprobes.text, "ax"
1da177e4
LT
125 .align 32
126 .globl __flush_icache_page
127__flush_icache_page: /* %o0 = phys_page */
1da177e4
LT
128 srlx %o0, PAGE_SHIFT, %o0
129 sethi %uhi(PAGE_OFFSET), %g1
130 sllx %o0, PAGE_SHIFT, %o0
131 sethi %hi(PAGE_SIZE), %g2
132 sllx %g1, 32, %g1
133 add %o0, %g1, %o0
1341: subcc %g2, 32, %g2
135 bne,pt %icc, 1b
136 flush %o0 + %g2
137 retl
138 nop
139
140#ifdef DCACHE_ALIASING_POSSIBLE
141
142#if (PAGE_SHIFT != 13)
143#error only page shift of 13 is supported by dcache flush
144#endif
145
146#define DTAG_MASK 0x3
147
c5bd50a9
DM
148 /* This routine is Spitfire specific so the hardcoded
149 * D-cache size and line-size are OK.
150 */
1da177e4
LT
151 .align 64
152 .globl __flush_dcache_page
153__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
154 sethi %uhi(PAGE_OFFSET), %g1
155 sllx %g1, 32, %g1
c5bd50a9
DM
156 sub %o0, %g1, %o0 ! physical address
157 srlx %o0, 11, %o0 ! make D-cache TAG
158 sethi %hi(1 << 14), %o2 ! D-cache size
159 sub %o2, (1 << 5), %o2 ! D-cache line size
1601: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
161 andcc %o3, DTAG_MASK, %g0 ! Valid?
162 be,pn %xcc, 2f ! Nope, branch
163 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
164 cmp %o3, %o0 ! TAG match?
165 bne,pt %xcc, 2f ! Nope, branch
166 nop
167 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
168 membar #Sync
1692: brnz,pt %o2, 1b
170 sub %o2, (1 << 5), %o2 ! D-cache line size
1da177e4
LT
171
172 /* The I-cache does not snoop local stores so we
173 * better flush that too when necessary.
174 */
175 brnz,pt %o1, __flush_icache_page
176 sllx %o0, 11, %o0
177 retl
178 nop
179
1da177e4
LT
180#endif /* DCACHE_ALIASING_POSSIBLE */
181
c5bd50a9
DM
182 .previous
183
2ef27778 184 /* Cheetah specific versions, patched at boot time. */
4da808c3 185__cheetah_flush_tlb_mm: /* 19 insns */
1da177e4
LT
186 rdpr %pstate, %g7
187 andn %g7, PSTATE_IE, %g2
188 wrpr %g2, 0x0, %pstate
189 wrpr %g0, 1, %tl
190 mov PRIMARY_CONTEXT, %o2
191 mov 0x40, %g3
192 ldxa [%o2] ASI_DMMU, %g2
2ef27778
DM
193 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
194 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
195 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
196 stxa %o0, [%o2] ASI_DMMU
197 stxa %g0, [%g3] ASI_DMMU_DEMAP
198 stxa %g0, [%g3] ASI_IMMU_DEMAP
199 stxa %g2, [%o2] ASI_DMMU
4da808c3
DM
200 sethi %hi(KERNBASE), %o2
201 flush %o2
1da177e4
LT
202 wrpr %g0, 0, %tl
203 retl
204 wrpr %g7, 0x0, %pstate
205
4da808c3 206__cheetah_flush_tlb_pending: /* 27 insns */
1da177e4
LT
207 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
208 rdpr %pstate, %g7
209 sllx %o1, 3, %o1
210 andn %g7, PSTATE_IE, %g2
211 wrpr %g2, 0x0, %pstate
212 wrpr %g0, 1, %tl
213 mov PRIMARY_CONTEXT, %o4
214 ldxa [%o4] ASI_DMMU, %g2
2ef27778
DM
215 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
216 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
217 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
218 stxa %o0, [%o4] ASI_DMMU
2191: sub %o1, (1 << 3), %o1
220 ldx [%o2 + %o1], %o3
221 andcc %o3, 1, %g0
222 be,pn %icc, 2f
223 andn %o3, 1, %o3
224 stxa %g0, [%o3] ASI_IMMU_DEMAP
2252: stxa %g0, [%o3] ASI_DMMU_DEMAP
b445e26c 226 membar #Sync
1da177e4 227 brnz,pt %o1, 1b
b445e26c 228 nop
1da177e4 229 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
230 sethi %hi(KERNBASE), %o4
231 flush %o4
1da177e4
LT
232 wrpr %g0, 0, %tl
233 retl
234 wrpr %g7, 0x0, %pstate
235
236#ifdef DCACHE_ALIASING_POSSIBLE
c5bd50a9 237__cheetah_flush_dcache_page: /* 11 insns */
1da177e4
LT
238 sethi %uhi(PAGE_OFFSET), %g1
239 sllx %g1, 32, %g1
240 sub %o0, %g1, %o0
241 sethi %hi(PAGE_SIZE), %o4
2421: subcc %o4, (1 << 5), %o4
243 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
244 membar #Sync
245 bne,pt %icc, 1b
246 nop
247 retl /* I-cache flush never needed on Cheetah, see callers. */
248 nop
249#endif /* DCACHE_ALIASING_POSSIBLE */
250
52bf082f 251 /* Hypervisor specific versions, patched at boot time. */
2a3a5f5d
DM
252__hypervisor_tlb_tl0_error:
253 save %sp, -192, %sp
254 mov %i0, %o0
255 call hypervisor_tlbop_error
256 mov %i1, %o1
257 ret
258 restore
259
260__hypervisor_flush_tlb_mm: /* 10 insns */
52bf082f
DM
261 mov %o0, %o2 /* ARG2: mmu context */
262 mov 0, %o0 /* ARG0: CPU lists unimplemented */
263 mov 0, %o1 /* ARG1: CPU lists unimplemented */
264 mov HV_MMU_ALL, %o3 /* ARG3: flags */
265 mov HV_FAST_MMU_DEMAP_CTX, %o5
266 ta HV_FAST_TRAP
2a3a5f5d
DM
267 brnz,pn %o0, __hypervisor_tlb_tl0_error
268 mov HV_FAST_MMU_DEMAP_CTX, %o1
52bf082f
DM
269 retl
270 nop
271
2a3a5f5d 272__hypervisor_flush_tlb_pending: /* 16 insns */
52bf082f
DM
273 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
274 sllx %o1, 3, %g1
275 mov %o2, %g2
276 mov %o0, %g3
2771: sub %g1, (1 << 3), %g1
278 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
279 mov %g3, %o1 /* ARG1: mmu context */
2a3a5f5d
DM
280 mov HV_MMU_ALL, %o2 /* ARG2: flags */
281 srlx %o0, PAGE_SHIFT, %o0
282 sllx %o0, PAGE_SHIFT, %o0
52bf082f 283 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
284 brnz,pn %o0, __hypervisor_tlb_tl0_error
285 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
52bf082f
DM
286 brnz,pt %g1, 1b
287 nop
288 retl
289 nop
290
2a3a5f5d 291__hypervisor_flush_tlb_kernel_range: /* 16 insns */
52bf082f
DM
292 /* %o0=start, %o1=end */
293 cmp %o0, %o1
294 be,pn %xcc, 2f
295 sethi %hi(PAGE_SIZE), %g3
296 mov %o0, %g1
297 sub %o1, %g1, %g2
298 sub %g2, %g3, %g2
2991: add %g1, %g2, %o0 /* ARG0: virtual address */
300 mov 0, %o1 /* ARG1: mmu context */
301 mov HV_MMU_ALL, %o2 /* ARG2: flags */
302 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
303 brnz,pn %o0, __hypervisor_tlb_tl0_error
304 mov HV_MMU_UNMAP_ADDR_TRAP, %o1
52bf082f
DM
305 brnz,pt %g2, 1b
306 sub %g2, %g3, %g2
3072: retl
308 nop
309
310#ifdef DCACHE_ALIASING_POSSIBLE
311 /* XXX Niagara and friends have an 8K cache, so no aliasing is
312 * XXX possible, but nothing explicit in the Hypervisor API
313 * XXX guarantees this.
314 */
315__hypervisor_flush_dcache_page: /* 2 insns */
316 retl
317 nop
318#endif
319
320tlb_patch_one:
1da177e4
LT
3211: lduw [%o1], %g1
322 stw %g1, [%o0]
323 flush %o0
324 subcc %o2, 1, %o2
325 add %o1, 4, %o1
326 bne,pt %icc, 1b
327 add %o0, 4, %o0
328 retl
329 nop
330
331 .globl cheetah_patch_cachetlbops
332cheetah_patch_cachetlbops:
333 save %sp, -128, %sp
334
335 sethi %hi(__flush_tlb_mm), %o0
336 or %o0, %lo(__flush_tlb_mm), %o0
337 sethi %hi(__cheetah_flush_tlb_mm), %o1
338 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
52bf082f 339 call tlb_patch_one
4da808c3 340 mov 19, %o2
1da177e4
LT
341
342 sethi %hi(__flush_tlb_pending), %o0
343 or %o0, %lo(__flush_tlb_pending), %o0
344 sethi %hi(__cheetah_flush_tlb_pending), %o1
345 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
52bf082f 346 call tlb_patch_one
4da808c3 347 mov 27, %o2
1da177e4
LT
348
349#ifdef DCACHE_ALIASING_POSSIBLE
350 sethi %hi(__flush_dcache_page), %o0
351 or %o0, %lo(__flush_dcache_page), %o0
c5bd50a9
DM
352 sethi %hi(__cheetah_flush_dcache_page), %o1
353 or %o1, %lo(__cheetah_flush_dcache_page), %o1
52bf082f 354 call tlb_patch_one
1da177e4
LT
355 mov 11, %o2
356#endif /* DCACHE_ALIASING_POSSIBLE */
357
358 ret
359 restore
360
361#ifdef CONFIG_SMP
362 /* These are all called by the slaves of a cross call, at
363 * trap level 1, with interrupts fully disabled.
364 *
365 * Register usage:
366 * %g5 mm->context (all tlb flushes)
367 * %g1 address arg 1 (tlb page and range flushes)
368 * %g7 address arg 2 (tlb range flush only)
369 *
56fb4df6
DM
370 * %g6 scratch 1
371 * %g2 scratch 2
372 * %g3 scratch 3
373 * %g4 scratch 4
1da177e4
LT
374 */
375 .align 32
376 .globl xcall_flush_tlb_mm
2a3a5f5d 377xcall_flush_tlb_mm: /* 21 insns */
1da177e4 378 mov PRIMARY_CONTEXT, %g2
1da177e4 379 ldxa [%g2] ASI_DMMU, %g3
2ef27778
DM
380 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
381 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
382 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
1da177e4 383 stxa %g5, [%g2] ASI_DMMU
2ef27778 384 mov 0x40, %g4
1da177e4
LT
385 stxa %g0, [%g4] ASI_DMMU_DEMAP
386 stxa %g0, [%g4] ASI_IMMU_DEMAP
387 stxa %g3, [%g2] ASI_DMMU
388 retry
52bf082f
DM
389 nop
390 nop
391 nop
392 nop
393 nop
394 nop
395 nop
2a3a5f5d
DM
396 nop
397 nop
398 nop
1da177e4
LT
399
400 .globl xcall_flush_tlb_pending
2a3a5f5d 401xcall_flush_tlb_pending: /* 21 insns */
1da177e4
LT
402 /* %g5=context, %g1=nr, %g7=vaddrs[] */
403 sllx %g1, 3, %g1
404 mov PRIMARY_CONTEXT, %g4
405 ldxa [%g4] ASI_DMMU, %g2
2ef27778
DM
406 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
407 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
408 or %g5, %g4, %g5
409 mov PRIMARY_CONTEXT, %g4
1da177e4
LT
410 stxa %g5, [%g4] ASI_DMMU
4111: sub %g1, (1 << 3), %g1
412 ldx [%g7 + %g1], %g5
413 andcc %g5, 0x1, %g0
414 be,pn %icc, 2f
415
416 andn %g5, 0x1, %g5
417 stxa %g0, [%g5] ASI_IMMU_DEMAP
4182: stxa %g0, [%g5] ASI_DMMU_DEMAP
419 membar #Sync
420 brnz,pt %g1, 1b
421 nop
422 stxa %g2, [%g4] ASI_DMMU
423 retry
2a3a5f5d 424 nop
1da177e4
LT
425
426 .globl xcall_flush_tlb_kernel_range
2a3a5f5d 427xcall_flush_tlb_kernel_range: /* 25 insns */
1da177e4
LT
428 sethi %hi(PAGE_SIZE - 1), %g2
429 or %g2, %lo(PAGE_SIZE - 1), %g2
430 andn %g1, %g2, %g1
431 andn %g7, %g2, %g7
432 sub %g7, %g1, %g3
433 add %g2, 1, %g2
434 sub %g3, %g2, %g3
435 or %g1, 0x20, %g1 ! Nucleus
4361: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
437 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
438 membar #Sync
439 brnz,pt %g3, 1b
440 sub %g3, %g2, %g3
441 retry
442 nop
443 nop
52bf082f
DM
444 nop
445 nop
446 nop
447 nop
448 nop
449 nop
2a3a5f5d
DM
450 nop
451 nop
452 nop
1da177e4
LT
453
454 /* This runs in a very controlled environment, so we do
455 * not need to worry about BH races etc.
456 */
457 .globl xcall_sync_tick
458xcall_sync_tick:
45fec05f
DM
459
460661: rdpr %pstate, %g2
1da177e4 461 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
df7d6aec 462 .section .sun4v_2insn_patch, "ax"
45fec05f
DM
463 .word 661b
464 nop
465 nop
466 .previous
467
1da177e4 468 rdpr %pil, %g2
b4f4372f 469 wrpr %g0, PIL_NORMAL_MAX, %pil
1da177e4
LT
470 sethi %hi(109f), %g7
471 b,pt %xcc, etrap_irq
472109: or %g7, %lo(109b), %g7
10e26723
DM
473#ifdef CONFIG_TRACE_IRQFLAGS
474 call trace_hardirqs_off
475 nop
476#endif
1da177e4
LT
477 call smp_synchronize_tick_client
478 nop
1da177e4
LT
479 b rtrap_xcall
480 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
481
93dae5b7
DM
482 .globl xcall_fetch_glob_regs
483xcall_fetch_glob_regs:
916ca14a
DM
484 sethi %hi(global_cpu_snapshot), %g1
485 or %g1, %lo(global_cpu_snapshot), %g1
93dae5b7
DM
486 __GET_CPUID(%g2)
487 sllx %g2, 6, %g3
488 add %g1, %g3, %g1
489 rdpr %tstate, %g7
490 stx %g7, [%g1 + GR_SNAP_TSTATE]
491 rdpr %tpc, %g7
492 stx %g7, [%g1 + GR_SNAP_TPC]
493 rdpr %tnpc, %g7
494 stx %g7, [%g1 + GR_SNAP_TNPC]
495 stx %o7, [%g1 + GR_SNAP_O7]
496 stx %i7, [%g1 + GR_SNAP_I7]
5afe2738 497 /* Don't try this at home kids... */
a5a737e0
DM
498 rdpr %cwp, %g3
499 sub %g3, 1, %g7
5afe2738
DM
500 wrpr %g7, %cwp
501 mov %i7, %g7
a5a737e0 502 wrpr %g3, %cwp
5afe2738 503 stx %g7, [%g1 + GR_SNAP_RPC]
93dae5b7
DM
504 sethi %hi(trap_block), %g7
505 or %g7, %lo(trap_block), %g7
506 sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
507 add %g7, %g2, %g7
508 ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
93dae5b7
DM
509 stx %g3, [%g1 + GR_SNAP_THREAD]
510 retry
93dae5b7 511
916ca14a
DM
512 .globl xcall_fetch_glob_pmu
513xcall_fetch_glob_pmu:
514 sethi %hi(global_cpu_snapshot), %g1
515 or %g1, %lo(global_cpu_snapshot), %g1
516 __GET_CPUID(%g2)
517 sllx %g2, 6, %g3
518 add %g1, %g3, %g1
519 rd %pic, %g7
520 stx %g7, [%g1 + (4 * 8)]
521 rd %pcr, %g7
522 stx %g7, [%g1 + (0 * 8)]
523 retry
524
525 .globl xcall_fetch_glob_pmu_n4
526xcall_fetch_glob_pmu_n4:
527 sethi %hi(global_cpu_snapshot), %g1
528 or %g1, %lo(global_cpu_snapshot), %g1
529 __GET_CPUID(%g2)
530 sllx %g2, 6, %g3
531 add %g1, %g3, %g1
532
533 ldxa [%g0] ASI_PIC, %g7
534 stx %g7, [%g1 + (4 * 8)]
535 mov 0x08, %g3
536 ldxa [%g3] ASI_PIC, %g7
537 stx %g7, [%g1 + (5 * 8)]
538 mov 0x10, %g3
539 ldxa [%g3] ASI_PIC, %g7
540 stx %g7, [%g1 + (6 * 8)]
541 mov 0x18, %g3
542 ldxa [%g3] ASI_PIC, %g7
543 stx %g7, [%g1 + (7 * 8)]
544
545 mov %o0, %g2
546 mov %o1, %g3
547 mov %o5, %g7
548
549 mov HV_FAST_VT_GET_PERFREG, %o5
550 mov 3, %o0
551 ta HV_FAST_TRAP
552 stx %o1, [%g1 + (3 * 8)]
553 mov HV_FAST_VT_GET_PERFREG, %o5
554 mov 2, %o0
555 ta HV_FAST_TRAP
556 stx %o1, [%g1 + (2 * 8)]
557 mov HV_FAST_VT_GET_PERFREG, %o5
558 mov 1, %o0
559 ta HV_FAST_TRAP
560 stx %o1, [%g1 + (1 * 8)]
561 mov HV_FAST_VT_GET_PERFREG, %o5
562 mov 0, %o0
563 ta HV_FAST_TRAP
564 stx %o1, [%g1 + (0 * 8)]
565
566 mov %g2, %o0
567 mov %g3, %o1
568 mov %g7, %o5
569
570 retry
571
1da177e4
LT
572#ifdef DCACHE_ALIASING_POSSIBLE
573 .align 32
574 .globl xcall_flush_dcache_page_cheetah
575xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
576 sethi %hi(PAGE_SIZE), %g3
5771: subcc %g3, (1 << 5), %g3
578 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
579 membar #Sync
580 bne,pt %icc, 1b
581 nop
582 retry
583 nop
584#endif /* DCACHE_ALIASING_POSSIBLE */
585
586 .globl xcall_flush_dcache_page_spitfire
587xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
588 %g7 == kernel page virtual address
589 %g5 == (page->mapping != NULL) */
590#ifdef DCACHE_ALIASING_POSSIBLE
591 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
592 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
593 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
5941: ldxa [%g3] ASI_DCACHE_TAG, %g2
595 andcc %g2, 0x3, %g0
596 be,pn %xcc, 2f
597 andn %g2, 0x3, %g2
598 cmp %g2, %g1
599
600 bne,pt %xcc, 2f
601 nop
602 stxa %g0, [%g3] ASI_DCACHE_TAG
603 membar #Sync
6042: cmp %g3, 0
605 bne,pt %xcc, 1b
606 sub %g3, (1 << 5), %g3
607
608 brz,pn %g5, 2f
609#endif /* DCACHE_ALIASING_POSSIBLE */
610 sethi %hi(PAGE_SIZE), %g3
611
6121: flush %g7
613 subcc %g3, (1 << 5), %g3
614 bne,pt %icc, 1b
615 add %g7, (1 << 5), %g7
616
6172: retry
618 nop
619 nop
620
2a3a5f5d
DM
621 /* %g5: error
622 * %g6: tlb op
623 */
624__hypervisor_tlb_xcall_error:
625 mov %g5, %g4
626 mov %g6, %g5
627 ba,pt %xcc, etrap
628 rd %pc, %g7
629 mov %l4, %o0
630 call hypervisor_tlbop_error_xcall
631 mov %l5, %o1
7697daaa 632 ba,a,pt %xcc, rtrap
2a3a5f5d 633
52bf082f 634 .globl __hypervisor_xcall_flush_tlb_mm
2a3a5f5d 635__hypervisor_xcall_flush_tlb_mm: /* 21 insns */
52bf082f
DM
636 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
637 mov %o0, %g2
638 mov %o1, %g3
639 mov %o2, %g4
640 mov %o3, %g1
641 mov %o5, %g7
642 clr %o0 /* ARG0: CPU lists unimplemented */
643 clr %o1 /* ARG1: CPU lists unimplemented */
644 mov %g5, %o2 /* ARG2: mmu context */
645 mov HV_MMU_ALL, %o3 /* ARG3: flags */
646 mov HV_FAST_MMU_DEMAP_CTX, %o5
647 ta HV_FAST_TRAP
2a3a5f5d
DM
648 mov HV_FAST_MMU_DEMAP_CTX, %g6
649 brnz,pn %o0, __hypervisor_tlb_xcall_error
650 mov %o0, %g5
52bf082f
DM
651 mov %g2, %o0
652 mov %g3, %o1
653 mov %g4, %o2
654 mov %g1, %o3
655 mov %g7, %o5
656 membar #Sync
657 retry
658
659 .globl __hypervisor_xcall_flush_tlb_pending
2a3a5f5d
DM
660__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
661 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
52bf082f
DM
662 sllx %g1, 3, %g1
663 mov %o0, %g2
664 mov %o1, %g3
665 mov %o2, %g4
6661: sub %g1, (1 << 3), %g1
667 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
668 mov %g5, %o1 /* ARG1: mmu context */
2a3a5f5d
DM
669 mov HV_MMU_ALL, %o2 /* ARG2: flags */
670 srlx %o0, PAGE_SHIFT, %o0
671 sllx %o0, PAGE_SHIFT, %o0
52bf082f 672 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
673 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
674 brnz,a,pn %o0, __hypervisor_tlb_xcall_error
675 mov %o0, %g5
52bf082f
DM
676 brnz,pt %g1, 1b
677 nop
678 mov %g2, %o0
679 mov %g3, %o1
680 mov %g4, %o2
681 membar #Sync
682 retry
683
684 .globl __hypervisor_xcall_flush_tlb_kernel_range
2a3a5f5d
DM
685__hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
686 /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
52bf082f
DM
687 sethi %hi(PAGE_SIZE - 1), %g2
688 or %g2, %lo(PAGE_SIZE - 1), %g2
689 andn %g1, %g2, %g1
690 andn %g7, %g2, %g7
691 sub %g7, %g1, %g3
692 add %g2, 1, %g2
693 sub %g3, %g2, %g3
694 mov %o0, %g2
695 mov %o1, %g4
2a3a5f5d 696 mov %o2, %g7
52bf082f
DM
6971: add %g1, %g3, %o0 /* ARG0: virtual address */
698 mov 0, %o1 /* ARG1: mmu context */
699 mov HV_MMU_ALL, %o2 /* ARG2: flags */
700 ta HV_MMU_UNMAP_ADDR_TRAP
2a3a5f5d
DM
701 mov HV_MMU_UNMAP_ADDR_TRAP, %g6
702 brnz,pn %o0, __hypervisor_tlb_xcall_error
703 mov %o0, %g5
52bf082f
DM
704 sethi %hi(PAGE_SIZE), %o2
705 brnz,pt %g3, 1b
706 sub %g3, %o2, %g3
707 mov %g2, %o0
708 mov %g4, %o1
2a3a5f5d 709 mov %g7, %o2
52bf082f
DM
710 membar #Sync
711 retry
712
1da177e4
LT
713 /* These just get rescheduled to PIL vectors. */
714 .globl xcall_call_function
715xcall_call_function:
716 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
717 retry
718
d172ad18
DM
719 .globl xcall_call_function_single
720xcall_call_function_single:
721 wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
722 retry
723
1da177e4
LT
724 .globl xcall_receive_signal
725xcall_receive_signal:
726 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
727 retry
728
729 .globl xcall_capture
730xcall_capture:
731 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
732 retry
733
ee29074d
DM
734 .globl xcall_new_mmu_context_version
735xcall_new_mmu_context_version:
736 wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
737 retry
738
e2fdd7fd
DM
739#ifdef CONFIG_KGDB
740 .globl xcall_kgdb_capture
741xcall_kgdb_capture:
42cc77c8
DM
742 wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
743 retry
e2fdd7fd
DM
744#endif
745
1da177e4 746#endif /* CONFIG_SMP */
52bf082f
DM
747
748
749 .globl hypervisor_patch_cachetlbops
750hypervisor_patch_cachetlbops:
751 save %sp, -128, %sp
752
753 sethi %hi(__flush_tlb_mm), %o0
754 or %o0, %lo(__flush_tlb_mm), %o0
755 sethi %hi(__hypervisor_flush_tlb_mm), %o1
756 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
757 call tlb_patch_one
2a3a5f5d 758 mov 10, %o2
52bf082f
DM
759
760 sethi %hi(__flush_tlb_pending), %o0
761 or %o0, %lo(__flush_tlb_pending), %o0
762 sethi %hi(__hypervisor_flush_tlb_pending), %o1
763 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
764 call tlb_patch_one
2a3a5f5d 765 mov 16, %o2
52bf082f
DM
766
767 sethi %hi(__flush_tlb_kernel_range), %o0
768 or %o0, %lo(__flush_tlb_kernel_range), %o0
769 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
770 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
771 call tlb_patch_one
2a3a5f5d 772 mov 16, %o2
52bf082f
DM
773
774#ifdef DCACHE_ALIASING_POSSIBLE
775 sethi %hi(__flush_dcache_page), %o0
776 or %o0, %lo(__flush_dcache_page), %o0
777 sethi %hi(__hypervisor_flush_dcache_page), %o1
778 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
779 call tlb_patch_one
780 mov 2, %o2
781#endif /* DCACHE_ALIASING_POSSIBLE */
782
783#ifdef CONFIG_SMP
784 sethi %hi(xcall_flush_tlb_mm), %o0
785 or %o0, %lo(xcall_flush_tlb_mm), %o0
786 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
787 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
788 call tlb_patch_one
2a3a5f5d 789 mov 21, %o2
52bf082f
DM
790
791 sethi %hi(xcall_flush_tlb_pending), %o0
792 or %o0, %lo(xcall_flush_tlb_pending), %o0
793 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
794 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
795 call tlb_patch_one
2a3a5f5d 796 mov 21, %o2
52bf082f
DM
797
798 sethi %hi(xcall_flush_tlb_kernel_range), %o0
799 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
800 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
801 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
802 call tlb_patch_one
2a3a5f5d 803 mov 25, %o2
52bf082f
DM
804#endif /* CONFIG_SMP */
805
806 ret
807 restore