Merge branch 'for-2.6.28' of git://linux-nfs.org/~bfields/linux
[linux-2.6-block.git] / arch / ia64 / kvm / optvfault.S
CommitLineData
7fc86bd9 1/*
1f095610 2 * arch/ia64/kvm/optvfault.S
7fc86bd9
XZ
3 * optimize virtualization fault handler
4 *
5 * Copyright (C) 2006 Intel Co
6 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
1f095610
XZ
7 * Copyright (C) 2008 Intel Co
8 * Add the support for Tukwila processors.
9 * Xiantao Zhang <xiantao.zhang@intel.com>
7fc86bd9
XZ
10 */
11
12#include <asm/asmmacro.h>
13#include <asm/processor.h>
14
15#include "vti.h"
16#include "asm-offsets.h"
17
18#define ACCE_MOV_FROM_AR
19#define ACCE_MOV_FROM_RR
20#define ACCE_MOV_TO_RR
21#define ACCE_RSM
22#define ACCE_SSM
23#define ACCE_MOV_TO_PSR
24#define ACCE_THASH
25
1f095610
XZ
26#define VMX_VPS_SYNC_READ \
27 add r16=VMM_VPD_BASE_OFFSET,r21; \
28 mov r17 = b0; \
29 mov r18 = r24; \
30 mov r19 = r25; \
31 mov r20 = r31; \
32 ;; \
33{.mii; \
34 ld8 r16 = [r16]; \
35 nop 0x0; \
36 mov r24 = ip; \
37 ;; \
38}; \
39{.mmb; \
40 add r24=0x20, r24; \
41 mov r25 =r16; \
42 br.sptk.many kvm_vps_sync_read; \
43}; \
44 mov b0 = r17; \
45 mov r24 = r18; \
46 mov r25 = r19; \
47 mov r31 = r20
48
81aec522
XZ
49ENTRY(kvm_vps_entry)
50 adds r29 = VMM_VCPU_VSA_BASE_OFFSET,r21
51 ;;
52 ld8 r29 = [r29]
53 ;;
54 add r29 = r29, r30
55 ;;
56 mov b0 = r29
57 br.sptk.many b0
58END(kvm_vps_entry)
59
60/*
61 * Inputs:
62 * r24 : return address
63 * r25 : vpd
64 * r29 : scratch
65 *
66 */
67GLOBAL_ENTRY(kvm_vps_sync_read)
68 movl r30 = PAL_VPS_SYNC_READ
69 ;;
70 br.sptk.many kvm_vps_entry
71END(kvm_vps_sync_read)
72
73/*
74 * Inputs:
75 * r24 : return address
76 * r25 : vpd
77 * r29 : scratch
78 *
79 */
80GLOBAL_ENTRY(kvm_vps_sync_write)
81 movl r30 = PAL_VPS_SYNC_WRITE
82 ;;
83 br.sptk.many kvm_vps_entry
84END(kvm_vps_sync_write)
85
86/*
87 * Inputs:
88 * r23 : pr
89 * r24 : guest b0
90 * r25 : vpd
91 *
92 */
93GLOBAL_ENTRY(kvm_vps_resume_normal)
94 movl r30 = PAL_VPS_RESUME_NORMAL
95 ;;
96 mov pr=r23,-2
97 br.sptk.many kvm_vps_entry
98END(kvm_vps_resume_normal)
99
100/*
101 * Inputs:
102 * r23 : pr
103 * r24 : guest b0
104 * r25 : vpd
105 * r17 : isr
106 */
107GLOBAL_ENTRY(kvm_vps_resume_handler)
108 movl r30 = PAL_VPS_RESUME_HANDLER
109 ;;
ce50b91d 110 ld8 r26=[r25]
81aec522
XZ
111 shr r17=r17,IA64_ISR_IR_BIT
112 ;;
ce50b91d 113 dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE
81aec522
XZ
114 mov pr=r23,-2
115 br.sptk.many kvm_vps_entry
116END(kvm_vps_resume_handler)
117
7fc86bd9
XZ
118//mov r1=ar3
119GLOBAL_ENTRY(kvm_asm_mov_from_ar)
120#ifndef ACCE_MOV_FROM_AR
121 br.many kvm_virtualization_fault_back
122#endif
123 add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
124 add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
125 extr.u r17=r25,6,7
126 ;;
127 ld8 r18=[r18]
128 mov r19=ar.itc
129 mov r24=b0
130 ;;
131 add r19=r19,r18
132 addl r20=@gprel(asm_mov_to_reg),gp
133 ;;
134 st8 [r16] = r19
135 adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
136 shladd r17=r17,4,r20
137 ;;
138 mov b0=r17
139 br.sptk.few b0
140 ;;
141END(kvm_asm_mov_from_ar)
142
143
144// mov r1=rr[r3]
145GLOBAL_ENTRY(kvm_asm_mov_from_rr)
146#ifndef ACCE_MOV_FROM_RR
147 br.many kvm_virtualization_fault_back
148#endif
149 extr.u r16=r25,20,7
150 extr.u r17=r25,6,7
151 addl r20=@gprel(asm_mov_from_reg),gp
152 ;;
153 adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
154 shladd r16=r16,4,r20
155 mov r24=b0
156 ;;
157 add r27=VMM_VCPU_VRR0_OFFSET,r21
158 mov b0=r16
159 br.many b0
160 ;;
161kvm_asm_mov_from_rr_back_1:
162 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
163 adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
164 shr.u r26=r19,61
165 ;;
166 shladd r17=r17,4,r22
167 shladd r27=r26,3,r27
168 ;;
169 ld8 r19=[r27]
170 mov b0=r17
171 br.many b0
172END(kvm_asm_mov_from_rr)
173
174
175// mov rr[r3]=r2
176GLOBAL_ENTRY(kvm_asm_mov_to_rr)
177#ifndef ACCE_MOV_TO_RR
178 br.many kvm_virtualization_fault_back
179#endif
180 extr.u r16=r25,20,7
181 extr.u r17=r25,13,7
182 addl r20=@gprel(asm_mov_from_reg),gp
183 ;;
184 adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
185 shladd r16=r16,4,r20
186 mov r22=b0
187 ;;
188 add r27=VMM_VCPU_VRR0_OFFSET,r21
189 mov b0=r16
190 br.many b0
191 ;;
192kvm_asm_mov_to_rr_back_1:
193 adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
194 shr.u r23=r19,61
195 shladd r17=r17,4,r20
196 ;;
197 //if rr6, go back
198 cmp.eq p6,p0=6,r23
199 mov b0=r22
200 (p6) br.cond.dpnt.many kvm_virtualization_fault_back
201 ;;
202 mov r28=r19
203 mov b0=r17
204 br.many b0
205kvm_asm_mov_to_rr_back_2:
206 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
207 shladd r27=r23,3,r27
208 ;; // vrr.rid<<4 |0xe
209 st8 [r27]=r19
210 mov b0=r30
211 ;;
212 extr.u r16=r19,8,26
213 extr.u r18 =r19,2,6
214 mov r17 =0xe
215 ;;
216 shladd r16 = r16, 4, r17
217 extr.u r19 =r19,0,8
218 ;;
219 shl r16 = r16,8
220 ;;
221 add r19 = r19, r16
222 ;; //set ve 1
223 dep r19=-1,r19,0,1
224 cmp.lt p6,p0=14,r18
225 ;;
226 (p6) mov r18=14
227 ;;
228 (p6) dep r19=r18,r19,2,6
229 ;;
230 cmp.eq p6,p0=0,r23
231 ;;
232 cmp.eq.or p6,p0=4,r23
233 ;;
234 adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
235 (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
236 ;;
237 ld4 r16=[r16]
238 cmp.eq p7,p0=r0,r0
239 (p6) shladd r17=r23,1,r17
240 ;;
241 (p6) st8 [r17]=r19
242 (p6) tbit.nz p6,p7=r16,0
243 ;;
244 (p7) mov rr[r28]=r19
245 mov r24=r22
246 br.many b0
247END(kvm_asm_mov_to_rr)
248
249
250//rsm
251GLOBAL_ENTRY(kvm_asm_rsm)
252#ifndef ACCE_RSM
253 br.many kvm_virtualization_fault_back
254#endif
1f095610
XZ
255 VMX_VPS_SYNC_READ
256 ;;
7fc86bd9
XZ
257 extr.u r26=r25,6,21
258 extr.u r27=r25,31,2
259 ;;
7fc86bd9
XZ
260 extr.u r28=r25,36,1
261 dep r26=r27,r26,21,2
262 ;;
263 add r17=VPD_VPSR_START_OFFSET,r16
264 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
265 //r26 is imm24
266 dep r26=r28,r26,23,1
267 ;;
268 ld8 r18=[r17]
269 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
270 ld4 r23=[r22]
271 sub r27=-1,r26
272 mov r24=b0
273 ;;
274 mov r20=cr.ipsr
275 or r28=r27,r28
276 and r19=r18,r27
277 ;;
278 st8 [r17]=r19
279 and r20=r20,r28
280 /* Comment it out due to short of fp lazy alorgithm support
281 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
282 ;;
283 ld8 r27=[r27]
284 ;;
285 tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
286 ;;
287 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
288 */
289 ;;
290 mov cr.ipsr=r20
291 tbit.nz p6,p0=r23,0
292 ;;
293 tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
1f095610 294 (p6) br.dptk kvm_resume_to_guest_with_sync
7fc86bd9
XZ
295 ;;
296 add r26=VMM_VCPU_META_RR0_OFFSET,r21
297 add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
298 dep r23=-1,r23,0,1
299 ;;
300 ld8 r26=[r26]
301 ld8 r27=[r27]
302 st4 [r22]=r23
303 dep.z r28=4,61,3
304 ;;
305 mov rr[r0]=r26
306 ;;
307 mov rr[r28]=r27
308 ;;
309 srlz.d
1f095610 310 br.many kvm_resume_to_guest_with_sync
7fc86bd9
XZ
311END(kvm_asm_rsm)
312
313
314//ssm
315GLOBAL_ENTRY(kvm_asm_ssm)
316#ifndef ACCE_SSM
317 br.many kvm_virtualization_fault_back
318#endif
1f095610
XZ
319 VMX_VPS_SYNC_READ
320 ;;
7fc86bd9
XZ
321 extr.u r26=r25,6,21
322 extr.u r27=r25,31,2
323 ;;
7fc86bd9
XZ
324 extr.u r28=r25,36,1
325 dep r26=r27,r26,21,2
326 ;; //r26 is imm24
327 add r27=VPD_VPSR_START_OFFSET,r16
328 dep r26=r28,r26,23,1
329 ;; //r19 vpsr
330 ld8 r29=[r27]
331 mov r24=b0
332 ;;
333 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
334 mov r20=cr.ipsr
335 or r19=r29,r26
336 ;;
337 ld4 r23=[r22]
338 st8 [r27]=r19
339 or r20=r20,r26
340 ;;
341 mov cr.ipsr=r20
342 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
343 ;;
344 and r19=r28,r19
345 tbit.z p6,p0=r23,0
346 ;;
347 cmp.ne.or p6,p0=r28,r19
348 (p6) br.dptk kvm_asm_ssm_1
349 ;;
350 add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
351 add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
352 dep r23=0,r23,0,1
353 ;;
354 ld8 r26=[r26]
355 ld8 r27=[r27]
356 st4 [r22]=r23
357 dep.z r28=4,61,3
358 ;;
359 mov rr[r0]=r26
360 ;;
361 mov rr[r28]=r27
362 ;;
363 srlz.d
364 ;;
365kvm_asm_ssm_1:
366 tbit.nz p6,p0=r29,IA64_PSR_I_BIT
367 ;;
368 tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
1f095610 369 (p6) br.dptk kvm_resume_to_guest_with_sync
7fc86bd9
XZ
370 ;;
371 add r29=VPD_VTPR_START_OFFSET,r16
372 add r30=VPD_VHPI_START_OFFSET,r16
373 ;;
374 ld8 r29=[r29]
375 ld8 r30=[r30]
376 ;;
377 extr.u r17=r29,4,4
378 extr.u r18=r29,16,1
379 ;;
380 dep r17=r18,r17,4,1
381 ;;
382 cmp.gt p6,p0=r30,r17
383 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
1f095610 384 br.many kvm_resume_to_guest_with_sync
7fc86bd9
XZ
385END(kvm_asm_ssm)
386
387
388//mov psr.l=r2
389GLOBAL_ENTRY(kvm_asm_mov_to_psr)
390#ifndef ACCE_MOV_TO_PSR
391 br.many kvm_virtualization_fault_back
392#endif
1f095610 393 VMX_VPS_SYNC_READ
7fc86bd9 394 ;;
1f095610 395 extr.u r26=r25,13,7 //r2
7fc86bd9
XZ
396 addl r20=@gprel(asm_mov_from_reg),gp
397 ;;
398 adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
399 shladd r26=r26,4,r20
400 mov r24=b0
401 ;;
402 add r27=VPD_VPSR_START_OFFSET,r16
403 mov b0=r26
404 br.many b0
405 ;;
406kvm_asm_mov_to_psr_back:
407 ld8 r17=[r27]
408 add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
409 dep r19=0,r19,32,32
410 ;;
411 ld4 r23=[r22]
412 dep r18=0,r17,0,32
413 ;;
414 add r30=r18,r19
415 movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
416 ;;
417 st8 [r27]=r30
418 and r27=r28,r30
419 and r29=r28,r17
420 ;;
421 cmp.eq p5,p0=r29,r27
422 cmp.eq p6,p7=r28,r27
423 (p5) br.many kvm_asm_mov_to_psr_1
424 ;;
425 //virtual to physical
426 (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
427 (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
428 (p7) dep r23=-1,r23,0,1
429 ;;
430 //physical to virtual
431 (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
432 (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
433 (p6) dep r23=0,r23,0,1
434 ;;
435 ld8 r26=[r26]
436 ld8 r27=[r27]
437 st4 [r22]=r23
438 dep.z r28=4,61,3
439 ;;
440 mov rr[r0]=r26
441 ;;
442 mov rr[r28]=r27
443 ;;
444 srlz.d
445 ;;
446kvm_asm_mov_to_psr_1:
447 mov r20=cr.ipsr
448 movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
449 ;;
450 or r19=r19,r28
451 dep r20=0,r20,0,32
452 ;;
453 add r20=r19,r20
454 mov b0=r24
455 ;;
456 /* Comment it out due to short of fp lazy algorithm support
457 adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
458 ;;
459 ld8 r27=[r27]
460 ;;
461 tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
462 ;;
463 (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
464 ;;
465 */
466 mov cr.ipsr=r20
467 cmp.ne p6,p0=r0,r0
468 ;;
469 tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
470 tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
1f095610 471 (p6) br.dpnt.few kvm_resume_to_guest_with_sync
7fc86bd9
XZ
472 ;;
473 add r29=VPD_VTPR_START_OFFSET,r16
474 add r30=VPD_VHPI_START_OFFSET,r16
475 ;;
476 ld8 r29=[r29]
477 ld8 r30=[r30]
478 ;;
479 extr.u r17=r29,4,4
480 extr.u r18=r29,16,1
481 ;;
482 dep r17=r18,r17,4,1
483 ;;
484 cmp.gt p6,p0=r30,r17
485 (p6) br.dpnt.few kvm_asm_dispatch_vexirq
1f095610 486 br.many kvm_resume_to_guest_with_sync
7fc86bd9
XZ
487END(kvm_asm_mov_to_psr)
488
489
490ENTRY(kvm_asm_dispatch_vexirq)
491//increment iip
1f095610
XZ
492 mov r17 = b0
493 mov r18 = r31
494{.mii
495 add r25=VMM_VPD_BASE_OFFSET,r21
496 nop 0x0
497 mov r24 = ip
498 ;;
499}
500{.mmb
501 add r24 = 0x20, r24
502 ld8 r25 = [r25]
503 br.sptk.many kvm_vps_sync_write
504}
505 mov b0 =r17
7fc86bd9 506 mov r16=cr.ipsr
1f095610
XZ
507 mov r31 = r18
508 mov r19 = 37
7fc86bd9
XZ
509 ;;
510 extr.u r17=r16,IA64_PSR_RI_BIT,2
511 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
512 ;;
513 (p6) mov r18=cr.iip
514 (p6) mov r17=r0
515 (p7) add r17=1,r17
516 ;;
517 (p6) add r18=0x10,r18
518 dep r16=r17,r16,IA64_PSR_RI_BIT,2
519 ;;
520 (p6) mov cr.iip=r18
521 mov cr.ipsr=r16
522 mov r30 =1
523 br.many kvm_dispatch_vexirq
524END(kvm_asm_dispatch_vexirq)
525
526// thash
527// TODO: add support when pta.vf = 1
528GLOBAL_ENTRY(kvm_asm_thash)
529#ifndef ACCE_THASH
530 br.many kvm_virtualization_fault_back
531#endif
532 extr.u r17=r25,20,7 // get r3 from opcode in r25
533 extr.u r18=r25,6,7 // get r1 from opcode in r25
534 addl r20=@gprel(asm_mov_from_reg),gp
535 ;;
536 adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
537 shladd r17=r17,4,r20 // get addr of MOVE_FROM_REG(r17)
538 adds r16=VMM_VPD_BASE_OFFSET,r21 // get vcpu.arch.priveregs
539 ;;
540 mov r24=b0
541 ;;
542 ld8 r16=[r16] // get VPD addr
543 mov b0=r17
544 br.many b0 // r19 return value
545 ;;
546kvm_asm_thash_back1:
547 shr.u r23=r19,61 // get RR number
1f095610 548 adds r28=VMM_VCPU_VRR0_OFFSET,r21 // get vcpu->arch.vrr[0]'s addr
7fc86bd9
XZ
549 adds r16=VMM_VPD_VPTA_OFFSET,r16 // get vpta
550 ;;
1f095610 551 shladd r27=r23,3,r28 // get vcpu->arch.vrr[r23]'s addr
7fc86bd9
XZ
552 ld8 r17=[r16] // get PTA
553 mov r26=1
554 ;;
1f095610
XZ
555 extr.u r29=r17,2,6 // get pta.size
556 ld8 r28=[r27] // get vcpu->arch.vrr[r23]'s value
7fc86bd9 557 ;;
1f095610
XZ
558 mov b0=r24
559 //Fallback to C if pta.vf is set
560 tbit.nz p6,p0=r17, 8
561 ;;
562 (p6) mov r24=EVENT_THASH
563 (p6) br.cond.dpnt.many kvm_virtualization_fault_back
564 extr.u r28=r28,2,6 // get rr.ps
7fc86bd9
XZ
565 shl r22=r26,r29 // 1UL << pta.size
566 ;;
1f095610 567 shr.u r23=r19,r28 // vaddr >> rr.ps
7fc86bd9
XZ
568 adds r26=3,r29 // pta.size + 3
569 shl r27=r17,3 // pta << 3
570 ;;
571 shl r23=r23,3 // (vaddr >> rr.ps) << 3
1f095610 572 shr.u r27=r27,r26 // (pta << 3) >> (pta.size+3)
7fc86bd9
XZ
573 movl r16=7<<61
574 ;;
575 adds r22=-1,r22 // (1UL << pta.size) - 1
576 shl r27=r27,r29 // ((pta<<3)>>(pta.size+3))<<pta.size
577 and r19=r19,r16 // vaddr & VRN_MASK
578 ;;
579 and r22=r22,r23 // vhpt_offset
580 or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
581 adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
582 ;;
583 or r19=r19,r22 // calc pval
584 shladd r17=r18,4,r26
585 adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
586 ;;
587 mov b0=r17
588 br.many b0
589END(kvm_asm_thash)
590
591#define MOV_TO_REG0 \
592{; \
593 nop.b 0x0; \
594 nop.b 0x0; \
595 nop.b 0x0; \
596 ;; \
597};
598
599
600#define MOV_TO_REG(n) \
601{; \
602 mov r##n##=r19; \
603 mov b0=r30; \
604 br.sptk.many b0; \
605 ;; \
606};
607
608
609#define MOV_FROM_REG(n) \
610{; \
611 mov r19=r##n##; \
612 mov b0=r30; \
613 br.sptk.many b0; \
614 ;; \
615};
616
617
618#define MOV_TO_BANK0_REG(n) \
619ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##); \
620{; \
621 mov r26=r2; \
622 mov r2=r19; \
623 bsw.1; \
624 ;; \
625}; \
626{; \
627 mov r##n##=r2; \
628 nop.b 0x0; \
629 bsw.0; \
630 ;; \
631}; \
632{; \
633 mov r2=r26; \
634 mov b0=r30; \
635 br.sptk.many b0; \
636 ;; \
637}; \
638END(asm_mov_to_bank0_reg##n##)
639
640
641#define MOV_FROM_BANK0_REG(n) \
642ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##); \
643{; \
644 mov r26=r2; \
645 nop.b 0x0; \
646 bsw.1; \
647 ;; \
648}; \
649{; \
650 mov r2=r##n##; \
651 nop.b 0x0; \
652 bsw.0; \
653 ;; \
654}; \
655{; \
656 mov r19=r2; \
657 mov r2=r26; \
658 mov b0=r30; \
659}; \
660{; \
661 nop.b 0x0; \
662 nop.b 0x0; \
663 br.sptk.many b0; \
664 ;; \
665}; \
666END(asm_mov_from_bank0_reg##n##)
667
668
669#define JMP_TO_MOV_TO_BANK0_REG(n) \
670{; \
671 nop.b 0x0; \
672 nop.b 0x0; \
673 br.sptk.many asm_mov_to_bank0_reg##n##; \
674 ;; \
675}
676
677
678#define JMP_TO_MOV_FROM_BANK0_REG(n) \
679{; \
680 nop.b 0x0; \
681 nop.b 0x0; \
682 br.sptk.many asm_mov_from_bank0_reg##n##; \
683 ;; \
684}
685
686
687MOV_FROM_BANK0_REG(16)
688MOV_FROM_BANK0_REG(17)
689MOV_FROM_BANK0_REG(18)
690MOV_FROM_BANK0_REG(19)
691MOV_FROM_BANK0_REG(20)
692MOV_FROM_BANK0_REG(21)
693MOV_FROM_BANK0_REG(22)
694MOV_FROM_BANK0_REG(23)
695MOV_FROM_BANK0_REG(24)
696MOV_FROM_BANK0_REG(25)
697MOV_FROM_BANK0_REG(26)
698MOV_FROM_BANK0_REG(27)
699MOV_FROM_BANK0_REG(28)
700MOV_FROM_BANK0_REG(29)
701MOV_FROM_BANK0_REG(30)
702MOV_FROM_BANK0_REG(31)
703
704
705// mov from reg table
706ENTRY(asm_mov_from_reg)
707 MOV_FROM_REG(0)
708 MOV_FROM_REG(1)
709 MOV_FROM_REG(2)
710 MOV_FROM_REG(3)
711 MOV_FROM_REG(4)
712 MOV_FROM_REG(5)
713 MOV_FROM_REG(6)
714 MOV_FROM_REG(7)
715 MOV_FROM_REG(8)
716 MOV_FROM_REG(9)
717 MOV_FROM_REG(10)
718 MOV_FROM_REG(11)
719 MOV_FROM_REG(12)
720 MOV_FROM_REG(13)
721 MOV_FROM_REG(14)
722 MOV_FROM_REG(15)
723 JMP_TO_MOV_FROM_BANK0_REG(16)
724 JMP_TO_MOV_FROM_BANK0_REG(17)
725 JMP_TO_MOV_FROM_BANK0_REG(18)
726 JMP_TO_MOV_FROM_BANK0_REG(19)
727 JMP_TO_MOV_FROM_BANK0_REG(20)
728 JMP_TO_MOV_FROM_BANK0_REG(21)
729 JMP_TO_MOV_FROM_BANK0_REG(22)
730 JMP_TO_MOV_FROM_BANK0_REG(23)
731 JMP_TO_MOV_FROM_BANK0_REG(24)
732 JMP_TO_MOV_FROM_BANK0_REG(25)
733 JMP_TO_MOV_FROM_BANK0_REG(26)
734 JMP_TO_MOV_FROM_BANK0_REG(27)
735 JMP_TO_MOV_FROM_BANK0_REG(28)
736 JMP_TO_MOV_FROM_BANK0_REG(29)
737 JMP_TO_MOV_FROM_BANK0_REG(30)
738 JMP_TO_MOV_FROM_BANK0_REG(31)
739 MOV_FROM_REG(32)
740 MOV_FROM_REG(33)
741 MOV_FROM_REG(34)
742 MOV_FROM_REG(35)
743 MOV_FROM_REG(36)
744 MOV_FROM_REG(37)
745 MOV_FROM_REG(38)
746 MOV_FROM_REG(39)
747 MOV_FROM_REG(40)
748 MOV_FROM_REG(41)
749 MOV_FROM_REG(42)
750 MOV_FROM_REG(43)
751 MOV_FROM_REG(44)
752 MOV_FROM_REG(45)
753 MOV_FROM_REG(46)
754 MOV_FROM_REG(47)
755 MOV_FROM_REG(48)
756 MOV_FROM_REG(49)
757 MOV_FROM_REG(50)
758 MOV_FROM_REG(51)
759 MOV_FROM_REG(52)
760 MOV_FROM_REG(53)
761 MOV_FROM_REG(54)
762 MOV_FROM_REG(55)
763 MOV_FROM_REG(56)
764 MOV_FROM_REG(57)
765 MOV_FROM_REG(58)
766 MOV_FROM_REG(59)
767 MOV_FROM_REG(60)
768 MOV_FROM_REG(61)
769 MOV_FROM_REG(62)
770 MOV_FROM_REG(63)
771 MOV_FROM_REG(64)
772 MOV_FROM_REG(65)
773 MOV_FROM_REG(66)
774 MOV_FROM_REG(67)
775 MOV_FROM_REG(68)
776 MOV_FROM_REG(69)
777 MOV_FROM_REG(70)
778 MOV_FROM_REG(71)
779 MOV_FROM_REG(72)
780 MOV_FROM_REG(73)
781 MOV_FROM_REG(74)
782 MOV_FROM_REG(75)
783 MOV_FROM_REG(76)
784 MOV_FROM_REG(77)
785 MOV_FROM_REG(78)
786 MOV_FROM_REG(79)
787 MOV_FROM_REG(80)
788 MOV_FROM_REG(81)
789 MOV_FROM_REG(82)
790 MOV_FROM_REG(83)
791 MOV_FROM_REG(84)
792 MOV_FROM_REG(85)
793 MOV_FROM_REG(86)
794 MOV_FROM_REG(87)
795 MOV_FROM_REG(88)
796 MOV_FROM_REG(89)
797 MOV_FROM_REG(90)
798 MOV_FROM_REG(91)
799 MOV_FROM_REG(92)
800 MOV_FROM_REG(93)
801 MOV_FROM_REG(94)
802 MOV_FROM_REG(95)
803 MOV_FROM_REG(96)
804 MOV_FROM_REG(97)
805 MOV_FROM_REG(98)
806 MOV_FROM_REG(99)
807 MOV_FROM_REG(100)
808 MOV_FROM_REG(101)
809 MOV_FROM_REG(102)
810 MOV_FROM_REG(103)
811 MOV_FROM_REG(104)
812 MOV_FROM_REG(105)
813 MOV_FROM_REG(106)
814 MOV_FROM_REG(107)
815 MOV_FROM_REG(108)
816 MOV_FROM_REG(109)
817 MOV_FROM_REG(110)
818 MOV_FROM_REG(111)
819 MOV_FROM_REG(112)
820 MOV_FROM_REG(113)
821 MOV_FROM_REG(114)
822 MOV_FROM_REG(115)
823 MOV_FROM_REG(116)
824 MOV_FROM_REG(117)
825 MOV_FROM_REG(118)
826 MOV_FROM_REG(119)
827 MOV_FROM_REG(120)
828 MOV_FROM_REG(121)
829 MOV_FROM_REG(122)
830 MOV_FROM_REG(123)
831 MOV_FROM_REG(124)
832 MOV_FROM_REG(125)
833 MOV_FROM_REG(126)
834 MOV_FROM_REG(127)
835END(asm_mov_from_reg)
836
837
838/* must be in bank 0
839 * parameter:
840 * r31: pr
841 * r24: b0
842 */
1f095610
XZ
843ENTRY(kvm_resume_to_guest_with_sync)
844 adds r19=VMM_VPD_BASE_OFFSET,r21
845 mov r16 = r31
846 mov r17 = r24
847 ;;
848{.mii
849 ld8 r25 =[r19]
850 nop 0x0
851 mov r24 = ip
852 ;;
853}
854{.mmb
855 add r24 =0x20, r24
856 nop 0x0
857 br.sptk.many kvm_vps_sync_write
858}
859
860 mov r31 = r16
861 mov r24 =r17
862 ;;
863 br.sptk.many kvm_resume_to_guest
864END(kvm_resume_to_guest_with_sync)
865
7fc86bd9
XZ
866ENTRY(kvm_resume_to_guest)
867 adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
868 ;;
869 ld8 r1 =[r16]
870 adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
871 ;;
872 mov r16=cr.ipsr
873 ;;
874 ld8 r20 = [r20]
875 adds r19=VMM_VPD_BASE_OFFSET,r21
876 ;;
877 ld8 r25=[r19]
878 extr.u r17=r16,IA64_PSR_RI_BIT,2
879 tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
880 ;;
881 (p6) mov r18=cr.iip
882 (p6) mov r17=r0
883 ;;
884 (p6) add r18=0x10,r18
885 (p7) add r17=1,r17
886 ;;
887 (p6) mov cr.iip=r18
888 dep r16=r17,r16,IA64_PSR_RI_BIT,2
889 ;;
890 mov cr.ipsr=r16
891 adds r19= VPD_VPSR_START_OFFSET,r25
892 add r28=PAL_VPS_RESUME_NORMAL,r20
893 add r29=PAL_VPS_RESUME_HANDLER,r20
894 ;;
895 ld8 r19=[r19]
896 mov b0=r29
ce50b91d 897 mov r27=cr.isr
7fc86bd9 898 ;;
ce50b91d
XZ
899 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic
900 shr r27=r27,IA64_ISR_IR_BIT
7fc86bd9
XZ
901 ;;
902 (p6) ld8 r26=[r25]
903 (p7) mov b0=r28
ce50b91d
XZ
904 ;;
905 (p6) dep r26=r27,r26,63,1
7fc86bd9
XZ
906 mov pr=r31,-2
907 br.sptk.many b0 // call pal service
908 ;;
909END(kvm_resume_to_guest)
910
911
912MOV_TO_BANK0_REG(16)
913MOV_TO_BANK0_REG(17)
914MOV_TO_BANK0_REG(18)
915MOV_TO_BANK0_REG(19)
916MOV_TO_BANK0_REG(20)
917MOV_TO_BANK0_REG(21)
918MOV_TO_BANK0_REG(22)
919MOV_TO_BANK0_REG(23)
920MOV_TO_BANK0_REG(24)
921MOV_TO_BANK0_REG(25)
922MOV_TO_BANK0_REG(26)
923MOV_TO_BANK0_REG(27)
924MOV_TO_BANK0_REG(28)
925MOV_TO_BANK0_REG(29)
926MOV_TO_BANK0_REG(30)
927MOV_TO_BANK0_REG(31)
928
929
930// mov to reg table
931ENTRY(asm_mov_to_reg)
932 MOV_TO_REG0
933 MOV_TO_REG(1)
934 MOV_TO_REG(2)
935 MOV_TO_REG(3)
936 MOV_TO_REG(4)
937 MOV_TO_REG(5)
938 MOV_TO_REG(6)
939 MOV_TO_REG(7)
940 MOV_TO_REG(8)
941 MOV_TO_REG(9)
942 MOV_TO_REG(10)
943 MOV_TO_REG(11)
944 MOV_TO_REG(12)
945 MOV_TO_REG(13)
946 MOV_TO_REG(14)
947 MOV_TO_REG(15)
948 JMP_TO_MOV_TO_BANK0_REG(16)
949 JMP_TO_MOV_TO_BANK0_REG(17)
950 JMP_TO_MOV_TO_BANK0_REG(18)
951 JMP_TO_MOV_TO_BANK0_REG(19)
952 JMP_TO_MOV_TO_BANK0_REG(20)
953 JMP_TO_MOV_TO_BANK0_REG(21)
954 JMP_TO_MOV_TO_BANK0_REG(22)
955 JMP_TO_MOV_TO_BANK0_REG(23)
956 JMP_TO_MOV_TO_BANK0_REG(24)
957 JMP_TO_MOV_TO_BANK0_REG(25)
958 JMP_TO_MOV_TO_BANK0_REG(26)
959 JMP_TO_MOV_TO_BANK0_REG(27)
960 JMP_TO_MOV_TO_BANK0_REG(28)
961 JMP_TO_MOV_TO_BANK0_REG(29)
962 JMP_TO_MOV_TO_BANK0_REG(30)
963 JMP_TO_MOV_TO_BANK0_REG(31)
964 MOV_TO_REG(32)
965 MOV_TO_REG(33)
966 MOV_TO_REG(34)
967 MOV_TO_REG(35)
968 MOV_TO_REG(36)
969 MOV_TO_REG(37)
970 MOV_TO_REG(38)
971 MOV_TO_REG(39)
972 MOV_TO_REG(40)
973 MOV_TO_REG(41)
974 MOV_TO_REG(42)
975 MOV_TO_REG(43)
976 MOV_TO_REG(44)
977 MOV_TO_REG(45)
978 MOV_TO_REG(46)
979 MOV_TO_REG(47)
980 MOV_TO_REG(48)
981 MOV_TO_REG(49)
982 MOV_TO_REG(50)
983 MOV_TO_REG(51)
984 MOV_TO_REG(52)
985 MOV_TO_REG(53)
986 MOV_TO_REG(54)
987 MOV_TO_REG(55)
988 MOV_TO_REG(56)
989 MOV_TO_REG(57)
990 MOV_TO_REG(58)
991 MOV_TO_REG(59)
992 MOV_TO_REG(60)
993 MOV_TO_REG(61)
994 MOV_TO_REG(62)
995 MOV_TO_REG(63)
996 MOV_TO_REG(64)
997 MOV_TO_REG(65)
998 MOV_TO_REG(66)
999 MOV_TO_REG(67)
1000 MOV_TO_REG(68)
1001 MOV_TO_REG(69)
1002 MOV_TO_REG(70)
1003 MOV_TO_REG(71)
1004 MOV_TO_REG(72)
1005 MOV_TO_REG(73)
1006 MOV_TO_REG(74)
1007 MOV_TO_REG(75)
1008 MOV_TO_REG(76)
1009 MOV_TO_REG(77)
1010 MOV_TO_REG(78)
1011 MOV_TO_REG(79)
1012 MOV_TO_REG(80)
1013 MOV_TO_REG(81)
1014 MOV_TO_REG(82)
1015 MOV_TO_REG(83)
1016 MOV_TO_REG(84)
1017 MOV_TO_REG(85)
1018 MOV_TO_REG(86)
1019 MOV_TO_REG(87)
1020 MOV_TO_REG(88)
1021 MOV_TO_REG(89)
1022 MOV_TO_REG(90)
1023 MOV_TO_REG(91)
1024 MOV_TO_REG(92)
1025 MOV_TO_REG(93)
1026 MOV_TO_REG(94)
1027 MOV_TO_REG(95)
1028 MOV_TO_REG(96)
1029 MOV_TO_REG(97)
1030 MOV_TO_REG(98)
1031 MOV_TO_REG(99)
1032 MOV_TO_REG(100)
1033 MOV_TO_REG(101)
1034 MOV_TO_REG(102)
1035 MOV_TO_REG(103)
1036 MOV_TO_REG(104)
1037 MOV_TO_REG(105)
1038 MOV_TO_REG(106)
1039 MOV_TO_REG(107)
1040 MOV_TO_REG(108)
1041 MOV_TO_REG(109)
1042 MOV_TO_REG(110)
1043 MOV_TO_REG(111)
1044 MOV_TO_REG(112)
1045 MOV_TO_REG(113)
1046 MOV_TO_REG(114)
1047 MOV_TO_REG(115)
1048 MOV_TO_REG(116)
1049 MOV_TO_REG(117)
1050 MOV_TO_REG(118)
1051 MOV_TO_REG(119)
1052 MOV_TO_REG(120)
1053 MOV_TO_REG(121)
1054 MOV_TO_REG(122)
1055 MOV_TO_REG(123)
1056 MOV_TO_REG(124)
1057 MOV_TO_REG(125)
1058 MOV_TO_REG(126)
1059 MOV_TO_REG(127)
1060END(asm_mov_to_reg)