[SPARC64]: Report mondo error correctly in hypervisor_xcall_deliver().
[linux-2.6-block.git] / arch / sparc64 / mm / ultra.S
CommitLineData
1da177e4
LT
1/* $Id: ultra.S,v 1.72 2002/02/09 19:49:31 davem Exp $
2 * ultra.S: Don't expand these all over the place...
3 *
4 * Copyright (C) 1997, 2000 David S. Miller (davem@redhat.com)
5 */
6
7#include <linux/config.h>
8#include <asm/asi.h>
9#include <asm/pgtable.h>
10#include <asm/page.h>
11#include <asm/spitfire.h>
12#include <asm/mmu_context.h>
2ef27778 13#include <asm/mmu.h>
1da177e4
LT
14#include <asm/pil.h>
15#include <asm/head.h>
16#include <asm/thread_info.h>
17#include <asm/cacheflush.h>
52bf082f 18#include <asm/hypervisor.h>
1da177e4
LT
19
20 /* Basically, most of the Spitfire vs. Cheetah madness
21 * has to do with the fact that Cheetah does not support
22 * IMMU flushes out of the secondary context. Someone needs
23 * to throw a south lake birthday party for the folks
24 * in Microelectronics who refused to fix this shit.
25 */
26
27 /* This file is meant to be read efficiently by the CPU, not humans.
28 * Staraj sie tego nikomu nie pierdolnac...
29 */
30 .text
31 .align 32
32 .globl __flush_tlb_mm
52bf082f
DM
33__flush_tlb_mm: /* 18 insns */
34 /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
1da177e4
LT
35 ldxa [%o1] ASI_DMMU, %g2
36 cmp %g2, %o0
37 bne,pn %icc, __spitfire_flush_tlb_mm_slow
38 mov 0x50, %g3
39 stxa %g0, [%g3] ASI_DMMU_DEMAP
40 stxa %g0, [%g3] ASI_IMMU_DEMAP
4da808c3
DM
41 sethi %hi(KERNBASE), %g3
42 flush %g3
1da177e4 43 retl
4da808c3 44 nop
1da177e4
LT
45 nop
46 nop
47 nop
48 nop
49 nop
50 nop
51 nop
2ef27778
DM
52 nop
53 nop
1da177e4
LT
54
55 .align 32
56 .globl __flush_tlb_pending
52bf082f 57__flush_tlb_pending: /* 26 insns */
1da177e4
LT
58 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
59 rdpr %pstate, %g7
60 sllx %o1, 3, %o1
61 andn %g7, PSTATE_IE, %g2
62 wrpr %g2, %pstate
63 mov SECONDARY_CONTEXT, %o4
64 ldxa [%o4] ASI_DMMU, %g2
65 stxa %o0, [%o4] ASI_DMMU
661: sub %o1, (1 << 3), %o1
67 ldx [%o2 + %o1], %o3
68 andcc %o3, 1, %g0
69 andn %o3, 1, %o3
70 be,pn %icc, 2f
71 or %o3, 0x10, %o3
72 stxa %g0, [%o3] ASI_IMMU_DEMAP
732: stxa %g0, [%o3] ASI_DMMU_DEMAP
74 membar #Sync
75 brnz,pt %o1, 1b
76 nop
77 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
78 sethi %hi(KERNBASE), %o4
79 flush %o4
1da177e4
LT
80 retl
81 wrpr %g7, 0x0, %pstate
fef43da4 82 nop
2ef27778
DM
83 nop
84 nop
85 nop
1da177e4
LT
86
87 .align 32
88 .globl __flush_tlb_kernel_range
1daef08a 89__flush_tlb_kernel_range: /* 16 insns */
52bf082f 90 /* %o0=start, %o1=end */
1da177e4
LT
91 cmp %o0, %o1
92 be,pn %xcc, 2f
93 sethi %hi(PAGE_SIZE), %o4
94 sub %o1, %o0, %o3
95 sub %o3, %o4, %o3
96 or %o0, 0x20, %o0 ! Nucleus
971: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
98 stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
99 membar #Sync
100 brnz,pt %o3, 1b
101 sub %o3, %o4, %o3
4da808c3
DM
1022: sethi %hi(KERNBASE), %o3
103 flush %o3
104 retl
105 nop
52bf082f 106 nop
1da177e4
LT
107
108__spitfire_flush_tlb_mm_slow:
109 rdpr %pstate, %g1
110 wrpr %g1, PSTATE_IE, %pstate
111 stxa %o0, [%o1] ASI_DMMU
112 stxa %g0, [%g3] ASI_DMMU_DEMAP
113 stxa %g0, [%g3] ASI_IMMU_DEMAP
114 flush %g6
115 stxa %g2, [%o1] ASI_DMMU
4da808c3
DM
116 sethi %hi(KERNBASE), %o1
117 flush %o1
1da177e4
LT
118 retl
119 wrpr %g1, 0, %pstate
120
121/*
122 * The following code flushes one page_size worth.
123 */
124#if (PAGE_SHIFT == 13)
125#define ITAG_MASK 0xfe
126#elif (PAGE_SHIFT == 16)
127#define ITAG_MASK 0x7fe
128#else
129#error unsupported PAGE_SIZE
130#endif
83005161 131 .section .kprobes.text, "ax"
1da177e4
LT
132 .align 32
133 .globl __flush_icache_page
134__flush_icache_page: /* %o0 = phys_page */
135 membar #StoreStore
136 srlx %o0, PAGE_SHIFT, %o0
137 sethi %uhi(PAGE_OFFSET), %g1
138 sllx %o0, PAGE_SHIFT, %o0
139 sethi %hi(PAGE_SIZE), %g2
140 sllx %g1, 32, %g1
141 add %o0, %g1, %o0
1421: subcc %g2, 32, %g2
143 bne,pt %icc, 1b
144 flush %o0 + %g2
145 retl
146 nop
147
148#ifdef DCACHE_ALIASING_POSSIBLE
149
150#if (PAGE_SHIFT != 13)
151#error only page shift of 13 is supported by dcache flush
152#endif
153
154#define DTAG_MASK 0x3
155
c5bd50a9
DM
156 /* This routine is Spitfire specific so the hardcoded
157 * D-cache size and line-size are OK.
158 */
1da177e4
LT
159 .align 64
160 .globl __flush_dcache_page
161__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
162 sethi %uhi(PAGE_OFFSET), %g1
163 sllx %g1, 32, %g1
c5bd50a9
DM
164 sub %o0, %g1, %o0 ! physical address
165 srlx %o0, 11, %o0 ! make D-cache TAG
166 sethi %hi(1 << 14), %o2 ! D-cache size
167 sub %o2, (1 << 5), %o2 ! D-cache line size
1681: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
169 andcc %o3, DTAG_MASK, %g0 ! Valid?
170 be,pn %xcc, 2f ! Nope, branch
171 andn %o3, DTAG_MASK, %o3 ! Clear valid bits
172 cmp %o3, %o0 ! TAG match?
173 bne,pt %xcc, 2f ! Nope, branch
174 nop
175 stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
176 membar #Sync
1772: brnz,pt %o2, 1b
178 sub %o2, (1 << 5), %o2 ! D-cache line size
1da177e4
LT
179
180 /* The I-cache does not snoop local stores so we
181 * better flush that too when necessary.
182 */
183 brnz,pt %o1, __flush_icache_page
184 sllx %o0, 11, %o0
185 retl
186 nop
187
1da177e4
LT
188#endif /* DCACHE_ALIASING_POSSIBLE */
189
c5bd50a9
DM
190 .previous
191
2ef27778 192 /* Cheetah specific versions, patched at boot time. */
4da808c3 193__cheetah_flush_tlb_mm: /* 19 insns */
1da177e4
LT
194 rdpr %pstate, %g7
195 andn %g7, PSTATE_IE, %g2
196 wrpr %g2, 0x0, %pstate
197 wrpr %g0, 1, %tl
198 mov PRIMARY_CONTEXT, %o2
199 mov 0x40, %g3
200 ldxa [%o2] ASI_DMMU, %g2
2ef27778
DM
201 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
202 sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
203 or %o0, %o1, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
204 stxa %o0, [%o2] ASI_DMMU
205 stxa %g0, [%g3] ASI_DMMU_DEMAP
206 stxa %g0, [%g3] ASI_IMMU_DEMAP
207 stxa %g2, [%o2] ASI_DMMU
4da808c3
DM
208 sethi %hi(KERNBASE), %o2
209 flush %o2
1da177e4
LT
210 wrpr %g0, 0, %tl
211 retl
212 wrpr %g7, 0x0, %pstate
213
4da808c3 214__cheetah_flush_tlb_pending: /* 27 insns */
1da177e4
LT
215 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
216 rdpr %pstate, %g7
217 sllx %o1, 3, %o1
218 andn %g7, PSTATE_IE, %g2
219 wrpr %g2, 0x0, %pstate
220 wrpr %g0, 1, %tl
221 mov PRIMARY_CONTEXT, %o4
222 ldxa [%o4] ASI_DMMU, %g2
2ef27778
DM
223 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
224 sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
225 or %o0, %o3, %o0 /* Preserve nucleus page size fields */
1da177e4
LT
226 stxa %o0, [%o4] ASI_DMMU
2271: sub %o1, (1 << 3), %o1
228 ldx [%o2 + %o1], %o3
229 andcc %o3, 1, %g0
230 be,pn %icc, 2f
231 andn %o3, 1, %o3
232 stxa %g0, [%o3] ASI_IMMU_DEMAP
2332: stxa %g0, [%o3] ASI_DMMU_DEMAP
b445e26c 234 membar #Sync
1da177e4 235 brnz,pt %o1, 1b
b445e26c 236 nop
1da177e4 237 stxa %g2, [%o4] ASI_DMMU
4da808c3
DM
238 sethi %hi(KERNBASE), %o4
239 flush %o4
1da177e4
LT
240 wrpr %g0, 0, %tl
241 retl
242 wrpr %g7, 0x0, %pstate
243
244#ifdef DCACHE_ALIASING_POSSIBLE
c5bd50a9 245__cheetah_flush_dcache_page: /* 11 insns */
1da177e4
LT
246 sethi %uhi(PAGE_OFFSET), %g1
247 sllx %g1, 32, %g1
248 sub %o0, %g1, %o0
249 sethi %hi(PAGE_SIZE), %o4
2501: subcc %o4, (1 << 5), %o4
251 stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
252 membar #Sync
253 bne,pt %icc, 1b
254 nop
255 retl /* I-cache flush never needed on Cheetah, see callers. */
256 nop
257#endif /* DCACHE_ALIASING_POSSIBLE */
258
52bf082f
DM
259 /* Hypervisor specific versions, patched at boot time. */
260__hypervisor_flush_tlb_mm: /* 8 insns */
261 mov %o0, %o2 /* ARG2: mmu context */
262 mov 0, %o0 /* ARG0: CPU lists unimplemented */
263 mov 0, %o1 /* ARG1: CPU lists unimplemented */
264 mov HV_MMU_ALL, %o3 /* ARG3: flags */
265 mov HV_FAST_MMU_DEMAP_CTX, %o5
266 ta HV_FAST_TRAP
267 retl
268 nop
269
270__hypervisor_flush_tlb_pending: /* 15 insns */
271 /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
272 sllx %o1, 3, %g1
273 mov %o2, %g2
274 mov %o0, %g3
2751: sub %g1, (1 << 3), %g1
276 ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
277 mov %g3, %o1 /* ARG1: mmu context */
278 mov HV_MMU_DMMU, %o2
279 andcc %o0, 1, %g0
280 movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */
281 andn %o0, 1, %o0
282 ta HV_MMU_UNMAP_ADDR_TRAP
283 brnz,pt %g1, 1b
284 nop
285 retl
286 nop
287
288__hypervisor_flush_tlb_kernel_range: /* 14 insns */
289 /* %o0=start, %o1=end */
290 cmp %o0, %o1
291 be,pn %xcc, 2f
292 sethi %hi(PAGE_SIZE), %g3
293 mov %o0, %g1
294 sub %o1, %g1, %g2
295 sub %g2, %g3, %g2
2961: add %g1, %g2, %o0 /* ARG0: virtual address */
297 mov 0, %o1 /* ARG1: mmu context */
298 mov HV_MMU_ALL, %o2 /* ARG2: flags */
299 ta HV_MMU_UNMAP_ADDR_TRAP
300 brnz,pt %g2, 1b
301 sub %g2, %g3, %g2
3022: retl
303 nop
304
305#ifdef DCACHE_ALIASING_POSSIBLE
306 /* XXX Niagara and friends have an 8K cache, so no aliasing is
307 * XXX possible, but nothing explicit in the Hypervisor API
308 * XXX guarantees this.
309 */
310__hypervisor_flush_dcache_page: /* 2 insns */
311 retl
312 nop
313#endif
314
315tlb_patch_one:
1da177e4
LT
3161: lduw [%o1], %g1
317 stw %g1, [%o0]
318 flush %o0
319 subcc %o2, 1, %o2
320 add %o1, 4, %o1
321 bne,pt %icc, 1b
322 add %o0, 4, %o0
323 retl
324 nop
325
326 .globl cheetah_patch_cachetlbops
327cheetah_patch_cachetlbops:
328 save %sp, -128, %sp
329
330 sethi %hi(__flush_tlb_mm), %o0
331 or %o0, %lo(__flush_tlb_mm), %o0
332 sethi %hi(__cheetah_flush_tlb_mm), %o1
333 or %o1, %lo(__cheetah_flush_tlb_mm), %o1
52bf082f 334 call tlb_patch_one
4da808c3 335 mov 19, %o2
1da177e4
LT
336
337 sethi %hi(__flush_tlb_pending), %o0
338 or %o0, %lo(__flush_tlb_pending), %o0
339 sethi %hi(__cheetah_flush_tlb_pending), %o1
340 or %o1, %lo(__cheetah_flush_tlb_pending), %o1
52bf082f 341 call tlb_patch_one
4da808c3 342 mov 27, %o2
1da177e4
LT
343
344#ifdef DCACHE_ALIASING_POSSIBLE
345 sethi %hi(__flush_dcache_page), %o0
346 or %o0, %lo(__flush_dcache_page), %o0
c5bd50a9
DM
347 sethi %hi(__cheetah_flush_dcache_page), %o1
348 or %o1, %lo(__cheetah_flush_dcache_page), %o1
52bf082f 349 call tlb_patch_one
1da177e4
LT
350 mov 11, %o2
351#endif /* DCACHE_ALIASING_POSSIBLE */
352
353 ret
354 restore
355
356#ifdef CONFIG_SMP
357 /* These are all called by the slaves of a cross call, at
358 * trap level 1, with interrupts fully disabled.
359 *
360 * Register usage:
361 * %g5 mm->context (all tlb flushes)
362 * %g1 address arg 1 (tlb page and range flushes)
363 * %g7 address arg 2 (tlb range flush only)
364 *
56fb4df6
DM
365 * %g6 scratch 1
366 * %g2 scratch 2
367 * %g3 scratch 3
368 * %g4 scratch 4
1da177e4
LT
369 */
370 .align 32
371 .globl xcall_flush_tlb_mm
52bf082f 372xcall_flush_tlb_mm: /* 18 insns */
1da177e4 373 mov PRIMARY_CONTEXT, %g2
1da177e4 374 ldxa [%g2] ASI_DMMU, %g3
2ef27778
DM
375 srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
376 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
377 or %g5, %g4, %g5 /* Preserve nucleus page size fields */
1da177e4 378 stxa %g5, [%g2] ASI_DMMU
2ef27778 379 mov 0x40, %g4
1da177e4
LT
380 stxa %g0, [%g4] ASI_DMMU_DEMAP
381 stxa %g0, [%g4] ASI_IMMU_DEMAP
382 stxa %g3, [%g2] ASI_DMMU
383 retry
52bf082f
DM
384 nop
385 nop
386 nop
387 nop
388 nop
389 nop
390 nop
1da177e4
LT
391
392 .globl xcall_flush_tlb_pending
52bf082f 393xcall_flush_tlb_pending: /* 20 insns */
1da177e4
LT
394 /* %g5=context, %g1=nr, %g7=vaddrs[] */
395 sllx %g1, 3, %g1
396 mov PRIMARY_CONTEXT, %g4
397 ldxa [%g4] ASI_DMMU, %g2
2ef27778
DM
398 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
399 sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
400 or %g5, %g4, %g5
401 mov PRIMARY_CONTEXT, %g4
1da177e4
LT
402 stxa %g5, [%g4] ASI_DMMU
4031: sub %g1, (1 << 3), %g1
404 ldx [%g7 + %g1], %g5
405 andcc %g5, 0x1, %g0
406 be,pn %icc, 2f
407
408 andn %g5, 0x1, %g5
409 stxa %g0, [%g5] ASI_IMMU_DEMAP
4102: stxa %g0, [%g5] ASI_DMMU_DEMAP
411 membar #Sync
412 brnz,pt %g1, 1b
413 nop
414 stxa %g2, [%g4] ASI_DMMU
415 retry
416
417 .globl xcall_flush_tlb_kernel_range
52bf082f 418xcall_flush_tlb_kernel_range: /* 22 insns */
1da177e4
LT
419 sethi %hi(PAGE_SIZE - 1), %g2
420 or %g2, %lo(PAGE_SIZE - 1), %g2
421 andn %g1, %g2, %g1
422 andn %g7, %g2, %g7
423 sub %g7, %g1, %g3
424 add %g2, 1, %g2
425 sub %g3, %g2, %g3
426 or %g1, 0x20, %g1 ! Nucleus
4271: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
428 stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
429 membar #Sync
430 brnz,pt %g3, 1b
431 sub %g3, %g2, %g3
432 retry
433 nop
434 nop
52bf082f
DM
435 nop
436 nop
437 nop
438 nop
439 nop
440 nop
1da177e4
LT
441
442 /* This runs in a very controlled environment, so we do
443 * not need to worry about BH races etc.
444 */
445 .globl xcall_sync_tick
446xcall_sync_tick:
45fec05f
DM
447
448661: rdpr %pstate, %g2
1da177e4 449 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
df7d6aec 450 .section .sun4v_2insn_patch, "ax"
45fec05f
DM
451 .word 661b
452 nop
453 nop
454 .previous
455
1da177e4
LT
456 rdpr %pil, %g2
457 wrpr %g0, 15, %pil
458 sethi %hi(109f), %g7
459 b,pt %xcc, etrap_irq
460109: or %g7, %lo(109b), %g7
461 call smp_synchronize_tick_client
462 nop
463 clr %l6
464 b rtrap_xcall
465 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
466
467 /* NOTE: This is SPECIAL!! We do etrap/rtrap however
468 * we choose to deal with the "BH's run with
469 * %pil==15" problem (described in asm/pil.h)
470 * by just invoking rtrap directly past where
471 * BH's are checked for.
472 *
473 * We do it like this because we do not want %pil==15
474 * lockups to prevent regs being reported.
475 */
476 .globl xcall_report_regs
477xcall_report_regs:
45fec05f
DM
478
479661: rdpr %pstate, %g2
1da177e4 480 wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
df7d6aec 481 .section .sun4v_2insn_patch, "ax"
45fec05f
DM
482 .word 661b
483 nop
484 nop
485 .previous
486
1da177e4
LT
487 rdpr %pil, %g2
488 wrpr %g0, 15, %pil
489 sethi %hi(109f), %g7
490 b,pt %xcc, etrap_irq
491109: or %g7, %lo(109b), %g7
492 call __show_regs
493 add %sp, PTREGS_OFF, %o0
494 clr %l6
495 /* Has to be a non-v9 branch due to the large distance. */
496 b rtrap_xcall
497 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
498
499#ifdef DCACHE_ALIASING_POSSIBLE
500 .align 32
501 .globl xcall_flush_dcache_page_cheetah
502xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
503 sethi %hi(PAGE_SIZE), %g3
5041: subcc %g3, (1 << 5), %g3
505 stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
506 membar #Sync
507 bne,pt %icc, 1b
508 nop
509 retry
510 nop
511#endif /* DCACHE_ALIASING_POSSIBLE */
512
513 .globl xcall_flush_dcache_page_spitfire
514xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
515 %g7 == kernel page virtual address
516 %g5 == (page->mapping != NULL) */
517#ifdef DCACHE_ALIASING_POSSIBLE
518 srlx %g1, (13 - 2), %g1 ! Form tag comparitor
519 sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
520 sub %g3, (1 << 5), %g3 ! D$ linesize == 32
5211: ldxa [%g3] ASI_DCACHE_TAG, %g2
522 andcc %g2, 0x3, %g0
523 be,pn %xcc, 2f
524 andn %g2, 0x3, %g2
525 cmp %g2, %g1
526
527 bne,pt %xcc, 2f
528 nop
529 stxa %g0, [%g3] ASI_DCACHE_TAG
530 membar #Sync
5312: cmp %g3, 0
532 bne,pt %xcc, 1b
533 sub %g3, (1 << 5), %g3
534
535 brz,pn %g5, 2f
536#endif /* DCACHE_ALIASING_POSSIBLE */
537 sethi %hi(PAGE_SIZE), %g3
538
5391: flush %g7
540 subcc %g3, (1 << 5), %g3
541 bne,pt %icc, 1b
542 add %g7, (1 << 5), %g7
543
5442: retry
545 nop
546 nop
547
52bf082f
DM
548 .globl __hypervisor_xcall_flush_tlb_mm
549__hypervisor_xcall_flush_tlb_mm: /* 18 insns */
550 /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
551 mov %o0, %g2
552 mov %o1, %g3
553 mov %o2, %g4
554 mov %o3, %g1
555 mov %o5, %g7
556 clr %o0 /* ARG0: CPU lists unimplemented */
557 clr %o1 /* ARG1: CPU lists unimplemented */
558 mov %g5, %o2 /* ARG2: mmu context */
559 mov HV_MMU_ALL, %o3 /* ARG3: flags */
560 mov HV_FAST_MMU_DEMAP_CTX, %o5
561 ta HV_FAST_TRAP
562 mov %g2, %o0
563 mov %g3, %o1
564 mov %g4, %o2
565 mov %g1, %o3
566 mov %g7, %o5
567 membar #Sync
568 retry
569
570 .globl __hypervisor_xcall_flush_tlb_pending
571__hypervisor_xcall_flush_tlb_pending: /* 18 insns */
572 /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4=scratch, %g6=unusable */
573 sllx %g1, 3, %g1
574 mov %o0, %g2
575 mov %o1, %g3
576 mov %o2, %g4
5771: sub %g1, (1 << 3), %g1
578 ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
579 mov %g5, %o1 /* ARG1: mmu context */
580 mov HV_MMU_DMMU, %o2
581 andcc %o0, 1, %g0
582 movne %icc, HV_MMU_ALL, %o2 /* ARG2: flags */
583 ta HV_MMU_UNMAP_ADDR_TRAP
584 brnz,pt %g1, 1b
585 nop
586 mov %g2, %o0
587 mov %g3, %o1
588 mov %g4, %o2
589 membar #Sync
590 retry
591
592 .globl __hypervisor_xcall_flush_tlb_kernel_range
593__hypervisor_xcall_flush_tlb_kernel_range: /* 22 insns */
594 /* %g1=start, %g7=end, g2,g3,g4,g5=scratch, g6=unusable */
595 sethi %hi(PAGE_SIZE - 1), %g2
596 or %g2, %lo(PAGE_SIZE - 1), %g2
597 andn %g1, %g2, %g1
598 andn %g7, %g2, %g7
599 sub %g7, %g1, %g3
600 add %g2, 1, %g2
601 sub %g3, %g2, %g3
602 mov %o0, %g2
603 mov %o1, %g4
604 mov %o2, %g5
6051: add %g1, %g3, %o0 /* ARG0: virtual address */
606 mov 0, %o1 /* ARG1: mmu context */
607 mov HV_MMU_ALL, %o2 /* ARG2: flags */
608 ta HV_MMU_UNMAP_ADDR_TRAP
609 sethi %hi(PAGE_SIZE), %o2
610 brnz,pt %g3, 1b
611 sub %g3, %o2, %g3
612 mov %g2, %o0
613 mov %g4, %o1
614 mov %g5, %o2
615 membar #Sync
616 retry
617
1da177e4
LT
618 /* These just get rescheduled to PIL vectors. */
619 .globl xcall_call_function
620xcall_call_function:
621 wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
622 retry
623
624 .globl xcall_receive_signal
625xcall_receive_signal:
626 wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
627 retry
628
629 .globl xcall_capture
630xcall_capture:
631 wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
632 retry
633
634#endif /* CONFIG_SMP */
52bf082f
DM
635
636
637 .globl hypervisor_patch_cachetlbops
638hypervisor_patch_cachetlbops:
639 save %sp, -128, %sp
640
641 sethi %hi(__flush_tlb_mm), %o0
642 or %o0, %lo(__flush_tlb_mm), %o0
643 sethi %hi(__hypervisor_flush_tlb_mm), %o1
644 or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
645 call tlb_patch_one
646 mov 8, %o2
647
648 sethi %hi(__flush_tlb_pending), %o0
649 or %o0, %lo(__flush_tlb_pending), %o0
650 sethi %hi(__hypervisor_flush_tlb_pending), %o1
651 or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
652 call tlb_patch_one
653 mov 15, %o2
654
655 sethi %hi(__flush_tlb_kernel_range), %o0
656 or %o0, %lo(__flush_tlb_kernel_range), %o0
657 sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
658 or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
659 call tlb_patch_one
660 mov 14, %o2
661
662#ifdef DCACHE_ALIASING_POSSIBLE
663 sethi %hi(__flush_dcache_page), %o0
664 or %o0, %lo(__flush_dcache_page), %o0
665 sethi %hi(__hypervisor_flush_dcache_page), %o1
666 or %o1, %lo(__hypervisor_flush_dcache_page), %o1
667 call tlb_patch_one
668 mov 2, %o2
669#endif /* DCACHE_ALIASING_POSSIBLE */
670
671#ifdef CONFIG_SMP
672 sethi %hi(xcall_flush_tlb_mm), %o0
673 or %o0, %lo(xcall_flush_tlb_mm), %o0
674 sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
675 or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
676 call tlb_patch_one
677 mov 18, %o2
678
679 sethi %hi(xcall_flush_tlb_pending), %o0
680 or %o0, %lo(xcall_flush_tlb_pending), %o0
681 sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
682 or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
683 call tlb_patch_one
684 mov 18, %o2
685
686 sethi %hi(xcall_flush_tlb_kernel_range), %o0
687 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
688 sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
689 or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
690 call tlb_patch_one
691 mov 22, %o2
692#endif /* CONFIG_SMP */
693
694 ret
695 restore