KVM: PPC: Book3S HV: Framework and hcall stubs for nested virtualization
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_hv_rmhandlers.S
CommitLineData
de56a948
PM
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
177339d7 23#include <asm/mmu.h>
de56a948 24#include <asm/page.h>
177339d7
PM
25#include <asm/ptrace.h>
26#include <asm/hvcall.h>
de56a948
PM
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
f0888f70 29#include <asm/kvm_book3s_asm.h>
f64e8084 30#include <asm/book3s/64/mmu-hash.h>
41f4e631 31#include <asm/export.h>
e4e38121 32#include <asm/tm.h>
fd7bacbc 33#include <asm/opal.h>
5af50993 34#include <asm/xive-regs.h>
857b99e1 35#include <asm/thread_info.h>
ec0c464c 36#include <asm/asm-compat.h>
2c86cd18 37#include <asm/feature-fixups.h>
e4e38121 38
2f272463
PM
39/* Sign-extend HDEC if not on POWER9 */
40#define EXTEND_HDEC(reg) \
41BEGIN_FTR_SECTION; \
42 extsw reg, reg; \
43END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
44
e0b7ec05
PM
45/* Values in HSTATE_NAPPING(r13) */
46#define NAPPING_CEDE 1
47#define NAPPING_NOVCPU 2
48
7ceaa6dc 49/* Stack frame offsets for kvmppc_hv_entry */
95a6432c 50#define SFS 208
7ceaa6dc 51#define STACK_SLOT_TRAP (SFS-4)
95a6432c 52#define STACK_SLOT_SHORT_PATH (SFS-8)
7ceaa6dc
PM
53#define STACK_SLOT_TID (SFS-16)
54#define STACK_SLOT_PSSCR (SFS-24)
55#define STACK_SLOT_PID (SFS-32)
56#define STACK_SLOT_IAMR (SFS-40)
57#define STACK_SLOT_CIABR (SFS-48)
58#define STACK_SLOT_DAWR (SFS-56)
59#define STACK_SLOT_DAWRX (SFS-64)
769377f7 60#define STACK_SLOT_HFSCR (SFS-72)
95a6432c
PM
61/* the following is used by the P9 short path */
62#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
7ceaa6dc 63
de56a948 64/*
19ccb76a 65 * Call kvmppc_hv_entry in real mode.
de56a948
PM
66 * Must be called with interrupts hard-disabled.
67 *
68 * Input Registers:
69 *
70 * LR = return address to continue at after eventually re-enabling MMU
71 */
6ed179b6 72_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
218309b7
PM
73 mflr r0
74 std r0, PPC_LR_STKOFF(r1)
75 stdu r1, -112(r1)
de56a948 76 mfmsr r10
8b24e69f 77 std r10, HSTATE_HOST_MSR(r13)
218309b7 78 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
de56a948
PM
79 li r0,MSR_RI
80 andc r0,r10,r0
81 li r6,MSR_IR | MSR_DR
82 andc r6,r10,r6
83 mtmsrd r0,1 /* clear RI in MSR */
84 mtsrr0 r5
85 mtsrr1 r6
222f20f1 86 RFI_TO_KERNEL
de56a948 87
218309b7 88kvmppc_call_hv_entry:
c0101509
PM
89BEGIN_FTR_SECTION
90 /* On P9, do LPCR setting, if necessary */
91 ld r3, HSTATE_SPLIT_MODE(r13)
92 cmpdi r3, 0
93 beq 46f
94 lwz r4, KVM_SPLIT_DO_SET(r3)
95 cmpwi r4, 0
96 beq 46f
97 bl kvmhv_p9_set_lpcr
98 nop
9946:
100END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
101
e0b7ec05 102 ld r4, HSTATE_KVM_VCPU(r13)
218309b7
PM
103 bl kvmppc_hv_entry
104
105 /* Back from guest - restore host state and return to caller */
106
eee7ff9d 107BEGIN_FTR_SECTION
218309b7
PM
108 /* Restore host DABR and DABRX */
109 ld r5,HSTATE_DABR(r13)
110 li r6,7
111 mtspr SPRN_DABR,r5
112 mtspr SPRN_DABRX,r6
eee7ff9d 113END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
218309b7
PM
114
115 /* Restore SPRG3 */
9d378dfa
SW
116 ld r3,PACA_SPRG_VDSO(r13)
117 mtspr SPRN_SPRG_VDSO_WRITE,r3
218309b7 118
218309b7 119 /* Reload the host's PMU registers */
41f4e631 120 bl kvmhv_load_host_pmu
218309b7 121
e0b7ec05
PM
122 /*
123 * Reload DEC. HDEC interrupts were disabled when
124 * we reloaded the host's LPCR value.
125 */
126 ld r3, HSTATE_DECEXP(r13)
127 mftb r4
128 subf r4, r4, r3
129 mtspr SPRN_DEC, r4
130
b4deba5c
PM
131 /* hwthread_req may have got set by cede or no vcpu, so clear it */
132 li r0, 0
133 stb r0, HSTATE_HWTHREAD_REQ(r13)
134
218309b7 135 /*
e20bbd3d
AP
136 * For external interrupts we need to call the Linux
137 * handler to process the interrupt. We do that by jumping
138 * to absolute address 0x500 for external interrupts.
139 * The [h]rfid at the end of the handler will return to
140 * the book3s_hv_interrupts.S code. For other interrupts
141 * we do the rfid to get back to the book3s_hv_interrupts.S
142 * code here.
218309b7
PM
143 */
144 ld r8, 112+PPC_LR_STKOFF(r1)
145 addi r1, r1, 112
146 ld r7, HSTATE_HOST_MSR(r13)
147
8b24e69f
PM
148 /* Return the trap number on this thread as the return value */
149 mr r3, r12
150
53af3ba2
PM
151 /*
152 * If we came back from the guest via a relocation-on interrupt,
153 * we will be in virtual mode at this point, which makes it a
154 * little easier to get back to the caller.
155 */
156 mfmsr r0
157 andi. r0, r0, MSR_IR /* in real mode? */
158 bne .Lvirt_return
159
8b24e69f 160 /* RFI into the highmem handler */
218309b7
PM
161 mfmsr r6
162 li r0, MSR_RI
163 andc r6, r6, r0
164 mtmsrd r6, 1 /* Clear RI in MSR */
165 mtsrr0 r8
166 mtsrr1 r7
222f20f1 167 RFI_TO_KERNEL
218309b7 168
8b24e69f 169 /* Virtual-mode return */
53af3ba2 170.Lvirt_return:
8b24e69f 171 mtlr r8
53af3ba2
PM
172 blr
173
e0b7ec05
PM
174kvmppc_primary_no_guest:
175 /* We handle this much like a ceded vcpu */
fd6d53b1 176 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
2f272463
PM
177 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
178 /* HDEC value came from DEC in the first place, it will fit */
fd6d53b1
PM
179 mfspr r3, SPRN_HDEC
180 mtspr SPRN_DEC, r3
6af27c84
PM
181 /*
182 * Make sure the primary has finished the MMU switch.
183 * We should never get here on a secondary thread, but
184 * check it for robustness' sake.
185 */
186 ld r5, HSTATE_KVM_VCORE(r13)
18765: lbz r0, VCORE_IN_GUEST(r5)
188 cmpwi r0, 0
189 beq 65b
190 /* Set LPCR. */
191 ld r8,VCORE_LPCR(r5)
192 mtspr SPRN_LPCR,r8
193 isync
e0b7ec05
PM
194 /* set our bit in napping_threads */
195 ld r5, HSTATE_KVM_VCORE(r13)
196 lbz r7, HSTATE_PTID(r13)
197 li r0, 1
198 sld r0, r0, r7
199 addi r6, r5, VCORE_NAPPING_THREADS
2001: lwarx r3, 0, r6
201 or r3, r3, r0
202 stwcx. r3, 0, r6
203 bne 1b
7d6c40da 204 /* order napping_threads update vs testing entry_exit_map */
e0b7ec05
PM
205 isync
206 li r12, 0
207 lwz r7, VCORE_ENTRY_EXIT(r5)
208 cmpwi r7, 0x100
209 bge kvm_novcpu_exit /* another thread already exiting */
210 li r3, NAPPING_NOVCPU
211 stb r3, HSTATE_NAPPING(r13)
e0b7ec05 212
ccc07772 213 li r3, 0 /* Don't wake on privileged (OS) doorbell */
e0b7ec05
PM
214 b kvm_do_nap
215
37f55d30
SW
216/*
217 * kvm_novcpu_wakeup
218 * Entered from kvm_start_guest if kvm_hstate.napping is set
219 * to NAPPING_NOVCPU
220 * r2 = kernel TOC
221 * r13 = paca
222 */
e0b7ec05
PM
223kvm_novcpu_wakeup:
224 ld r1, HSTATE_HOST_R1(r13)
225 ld r5, HSTATE_KVM_VCORE(r13)
226 li r0, 0
227 stb r0, HSTATE_NAPPING(r13)
e0b7ec05 228
e3bbbbfa
PM
229 /* check the wake reason */
230 bl kvmppc_check_wake_reason
6af27c84 231
37f55d30
SW
232 /*
233 * Restore volatile registers since we could have called
234 * a C routine in kvmppc_check_wake_reason.
235 * r5 = VCORE
236 */
237 ld r5, HSTATE_KVM_VCORE(r13)
238
e0b7ec05 239 /* see if any other thread is already exiting */
e0b7ec05
PM
240 lwz r0, VCORE_ENTRY_EXIT(r5)
241 cmpwi r0, 0x100
242 bge kvm_novcpu_exit
243
244 /* clear our bit in napping_threads */
245 lbz r7, HSTATE_PTID(r13)
246 li r0, 1
247 sld r0, r0, r7
248 addi r6, r5, VCORE_NAPPING_THREADS
e3bbbbfa
PM
2494: lwarx r7, 0, r6
250 andc r7, r7, r0
251 stwcx. r7, 0, r6
e0b7ec05
PM
252 bne 4b
253
e3bbbbfa 254 /* See if the wake reason means we need to exit */
e0b7ec05
PM
255 cmpdi r3, 0
256 bge kvm_novcpu_exit
e0b7ec05 257
fd6d53b1
PM
258 /* See if our timeslice has expired (HDEC is negative) */
259 mfspr r0, SPRN_HDEC
2f272463 260 EXTEND_HDEC(r0)
fd6d53b1 261 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
2f272463 262 cmpdi r0, 0
fd6d53b1
PM
263 blt kvm_novcpu_exit
264
e0b7ec05
PM
265 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
266 ld r4, HSTATE_KVM_VCPU(r13)
267 cmpdi r4, 0
b6c295df
PM
268 beq kvmppc_primary_no_guest
269
270#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
271 addi r3, r4, VCPU_TB_RMENTRY
272 bl kvmhv_start_timing
273#endif
274 b kvmppc_got_guest
e0b7ec05
PM
275
276kvm_novcpu_exit:
6af27c84
PM
277#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
278 ld r4, HSTATE_KVM_VCPU(r13)
279 cmpdi r4, 0
280 beq 13f
281 addi r3, r4, VCPU_TB_RMEXIT
282 bl kvmhv_accumulate_time
283#endif
eddb60fb 28413: mr r3, r12
7ceaa6dc 285 stw r12, STACK_SLOT_TRAP(r1)
eddb60fb
PM
286 bl kvmhv_commence_exit
287 nop
6af27c84 288 b kvmhv_switch_to_host
e0b7ec05 289
371fefd6 290/*
e0b7ec05 291 * We come in here when wakened from nap mode.
371fefd6
PM
292 * Relocation is off and most register values are lost.
293 * r13 points to the PACA.
9d292501 294 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
371fefd6
PM
295 */
296 .globl kvm_start_guest
297kvm_start_guest:
fd17dc7b 298 /* Set runlatch bit the minute you wake up from nap */
1f09c3ed
PM
299 mfspr r0, SPRN_CTRLF
300 ori r0, r0, 1
301 mtspr SPRN_CTRLT, r0
fd17dc7b 302
9d292501
NP
303 /*
304 * Could avoid this and pass it through in r3. For now,
305 * code expects it to be in SRR1.
306 */
307 mtspr SPRN_SRR1,r3
308
19ccb76a
PM
309 ld r2,PACATOC(r13)
310
a4bc64d3
NR
311 li r0,0
312 stb r0,PACA_FTRACE_ENABLED(r13)
313
f0888f70
PM
314 li r0,KVM_HWTHREAD_IN_KVM
315 stb r0,HSTATE_HWTHREAD_STATE(r13)
371fefd6 316
f0888f70
PM
317 /* NV GPR values from power7_idle() will no longer be valid */
318 li r0,1
319 stb r0,PACA_NAPSTATELOST(r13)
371fefd6 320
4619ac88
PM
321 /* were we napping due to cede? */
322 lbz r0,HSTATE_NAPPING(r13)
e0b7ec05
PM
323 cmpwi r0,NAPPING_CEDE
324 beq kvm_end_cede
325 cmpwi r0,NAPPING_NOVCPU
326 beq kvm_novcpu_wakeup
327
328 ld r1,PACAEMERGSP(r13)
329 subi r1,r1,STACK_FRAME_OVERHEAD
4619ac88
PM
330
331 /*
332 * We weren't napping due to cede, so this must be a secondary
333 * thread being woken up to run a guest, or being woken up due
334 * to a stray IPI. (Or due to some machine check or hypervisor
335 * maintenance interrupt while the core is in KVM.)
336 */
f0888f70
PM
337
338 /* Check the wake reason in SRR1 to see why we got here */
e3bbbbfa 339 bl kvmppc_check_wake_reason
37f55d30
SW
340 /*
341 * kvmppc_check_wake_reason could invoke a C routine, but we
342 * have no volatile registers to restore when we return.
343 */
344
e3bbbbfa
PM
345 cmpdi r3, 0
346 bge kvm_no_guest
371fefd6 347
b4deba5c
PM
348 /* get vcore pointer, NULL if we have nothing to run */
349 ld r5,HSTATE_KVM_VCORE(r13)
350 cmpdi r5,0
351 /* if we have no vcore to run, go back to sleep */
7b444c67 352 beq kvm_no_guest
f0888f70 353
56548fc0
PM
354kvm_secondary_got_guest:
355
e0b7ec05 356 /* Set HSTATE_DSCR(r13) to something sensible */
1db36525 357 ld r6, PACA_DSCR_DEFAULT(r13)
e0b7ec05 358 std r6, HSTATE_DSCR(r13)
2fde6d20 359
b4deba5c
PM
360 /* On thread 0 of a subcore, set HDEC to max */
361 lbz r4, HSTATE_PTID(r13)
362 cmpwi r4, 0
363 bne 63f
2f272463
PM
364 LOAD_REG_ADDR(r6, decrementer_max)
365 ld r6, 0(r6)
b4deba5c
PM
366 mtspr SPRN_HDEC, r6
367 /* and set per-LPAR registers, if doing dynamic micro-threading */
368 ld r6, HSTATE_SPLIT_MODE(r13)
369 cmpdi r6, 0
370 beq 63f
c0101509 371BEGIN_FTR_SECTION
b4deba5c
PM
372 ld r0, KVM_SPLIT_RPR(r6)
373 mtspr SPRN_RPR, r0
374 ld r0, KVM_SPLIT_PMMAR(r6)
375 mtspr SPRN_PMMAR, r0
376 ld r0, KVM_SPLIT_LDBAR(r6)
377 mtspr SPRN_LDBAR, r0
378 isync
c0101509
PM
379FTR_SECTION_ELSE
380 /* On P9 we use the split_info for coordinating LPCR changes */
381 lwz r4, KVM_SPLIT_DO_SET(r6)
382 cmpwi r4, 0
d20fe50a 383 beq 1f
c0101509
PM
384 mr r3, r6
385 bl kvmhv_p9_set_lpcr
386 nop
d20fe50a 3871:
c0101509 388ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
b4deba5c
PM
38963:
390 /* Order load of vcpu after load of vcore */
5d5b99cd 391 lwsync
b4deba5c 392 ld r4, HSTATE_KVM_VCPU(r13)
e0b7ec05 393 bl kvmppc_hv_entry
218309b7
PM
394
395 /* Back from the guest, go back to nap */
b4deba5c 396 /* Clear our vcpu and vcore pointers so we don't come back in early */
218309b7 397 li r0, 0
b4deba5c 398 std r0, HSTATE_KVM_VCPU(r13)
f019b7ad 399 /*
b4deba5c 400 * Once we clear HSTATE_KVM_VCORE(r13), the code in
5d5b99cd
PM
401 * kvmppc_run_core() is going to assume that all our vcpu
402 * state is visible in memory. This lwsync makes sure
403 * that that is true.
f019b7ad 404 */
218309b7 405 lwsync
b4deba5c 406 std r0, HSTATE_KVM_VCORE(r13)
218309b7 407
fd7bacbc
MS
408 /*
409 * All secondaries exiting guest will fall through this path.
410 * Before proceeding, just check for HMI interrupt and
411 * invoke opal hmi handler. By now we are sure that the
412 * primary thread on this core/subcore has already made partition
413 * switch/TB resync and we are good to call opal hmi handler.
414 */
415 cmpwi r12, BOOK3S_INTERRUPT_HMI
416 bne kvm_no_guest
417
418 li r3,0 /* NULL argument */
419 bl hmi_exception_realmode
56548fc0
PM
420/*
421 * At this point we have finished executing in the guest.
422 * We need to wait for hwthread_req to become zero, since
423 * we may not turn on the MMU while hwthread_req is non-zero.
424 * While waiting we also need to check if we get given a vcpu to run.
425 */
218309b7 426kvm_no_guest:
56548fc0
PM
427 lbz r3, HSTATE_HWTHREAD_REQ(r13)
428 cmpwi r3, 0
429 bne 53f
430 HMT_MEDIUM
431 li r0, KVM_HWTHREAD_IN_KERNEL
218309b7 432 stb r0, HSTATE_HWTHREAD_STATE(r13)
56548fc0
PM
433 /* need to recheck hwthread_req after a barrier, to avoid race */
434 sync
435 lbz r3, HSTATE_HWTHREAD_REQ(r13)
436 cmpwi r3, 0
437 bne 54f
438/*
5fa6b6bd 439 * We jump to pnv_wakeup_loss, which will return to the caller
56548fc0 440 * of power7_nap in the powernv cpu offline loop. The value we
9d292501
NP
441 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
442 * requires SRR1 in r12.
56548fc0 443 */
218309b7
PM
444 li r3, LPCR_PECE0
445 mfspr r4, SPRN_LPCR
446 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
447 mtspr SPRN_LPCR, r4
56548fc0 448 li r3, 0
9d292501 449 mfspr r12,SPRN_SRR1
5fa6b6bd 450 b pnv_wakeup_loss
56548fc0
PM
451
45253: HMT_LOW
b4deba5c
PM
453 ld r5, HSTATE_KVM_VCORE(r13)
454 cmpdi r5, 0
455 bne 60f
456 ld r3, HSTATE_SPLIT_MODE(r13)
457 cmpdi r3, 0
458 beq kvm_no_guest
c0101509
PM
459 lwz r0, KVM_SPLIT_DO_SET(r3)
460 cmpwi r0, 0
461 bne kvmhv_do_set
462 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
463 cmpwi r0, 0
464 bne kvmhv_do_restore
b4deba5c
PM
465 lbz r0, KVM_SPLIT_DO_NAP(r3)
466 cmpwi r0, 0
56548fc0
PM
467 beq kvm_no_guest
468 HMT_MEDIUM
b4deba5c
PM
469 b kvm_unsplit_nap
47060: HMT_MEDIUM
56548fc0
PM
471 b kvm_secondary_got_guest
472
47354: li r0, KVM_HWTHREAD_IN_KVM
474 stb r0, HSTATE_HWTHREAD_STATE(r13)
475 b kvm_no_guest
218309b7 476
c0101509
PM
477kvmhv_do_set:
478 /* Set LPCR, LPIDR etc. on P9 */
479 HMT_MEDIUM
480 bl kvmhv_p9_set_lpcr
481 nop
482 b kvm_no_guest
483
484kvmhv_do_restore:
485 HMT_MEDIUM
486 bl kvmhv_p9_restore_lpcr
487 nop
488 b kvm_no_guest
489
b4deba5c
PM
490/*
491 * Here the primary thread is trying to return the core to
492 * whole-core mode, so we need to nap.
493 */
494kvm_unsplit_nap:
fd7bacbc
MS
495 /*
496 * When secondaries are napping in kvm_unsplit_nap() with
497 * hwthread_req = 1, HMI goes ignored even though subcores are
498 * already exited the guest. Hence HMI keeps waking up secondaries
499 * from nap in a loop and secondaries always go back to nap since
500 * no vcore is assigned to them. This makes impossible for primary
501 * thread to get hold of secondary threads resulting into a soft
502 * lockup in KVM path.
503 *
504 * Let us check if HMI is pending and handle it before we go to nap.
505 */
506 cmpwi r12, BOOK3S_INTERRUPT_HMI
507 bne 55f
508 li r3, 0 /* NULL argument */
509 bl hmi_exception_realmode
51055:
7f235328
GS
511 /*
512 * Ensure that secondary doesn't nap when it has
513 * its vcore pointer set.
514 */
515 sync /* matches smp_mb() before setting split_info.do_nap */
516 ld r0, HSTATE_KVM_VCORE(r13)
517 cmpdi r0, 0
518 bne kvm_no_guest
b4deba5c
PM
519 /* clear any pending message */
520BEGIN_FTR_SECTION
521 lis r6, (PPC_DBELL_SERVER << (63-36))@h
522 PPC_MSGCLR(6)
523END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
524 /* Set kvm_split_mode.napped[tid] = 1 */
525 ld r3, HSTATE_SPLIT_MODE(r13)
526 li r0, 1
c0101509 527 lbz r4, HSTATE_TID(r13)
b4deba5c
PM
528 addi r4, r4, KVM_SPLIT_NAPPED
529 stbx r0, r3, r4
530 /* Check the do_nap flag again after setting napped[] */
531 sync
532 lbz r0, KVM_SPLIT_DO_NAP(r3)
533 cmpwi r0, 0
534 beq 57f
535 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
bf53c88e
PM
536 mfspr r5, SPRN_LPCR
537 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
538 b kvm_nap_sequence
b4deba5c
PM
539
54057: li r0, 0
541 stbx r0, r3, r4
542 b kvm_no_guest
543
218309b7
PM
544/******************************************************************************
545 * *
546 * Entry code *
547 * *
548 *****************************************************************************/
549
de56a948
PM
550.global kvmppc_hv_entry
551kvmppc_hv_entry:
552
553 /* Required state:
554 *
e0b7ec05 555 * R4 = vcpu pointer (or NULL)
de56a948
PM
556 * MSR = ~IR|DR
557 * R13 = PACA
558 * R1 = host R1
06a29e42 559 * R2 = TOC
de56a948 560 * all other volatile GPRS = free
f4c51f84 561 * Does not preserve non-volatile GPRs or CR fields
de56a948
PM
562 */
563 mflr r0
218309b7 564 std r0, PPC_LR_STKOFF(r1)
7ceaa6dc 565 stdu r1, -SFS(r1)
de56a948 566
de56a948
PM
567 /* Save R1 in the PACA */
568 std r1, HSTATE_HOST_R1(r13)
569
44a3add8
PM
570 li r6, KVM_GUEST_MODE_HOST_HV
571 stb r6, HSTATE_IN_GUEST(r13)
572
b6c295df
PM
573#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
574 /* Store initial timestamp */
575 cmpdi r4, 0
576 beq 1f
577 addi r3, r4, VCPU_TB_RMENTRY
578 bl kvmhv_start_timing
5791:
580#endif
f4c51f84
PM
581
582 /* Use cr7 as an indication of radix mode */
583 ld r5, HSTATE_KVM_VCORE(r13)
584 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
585 lbz r0, KVM_RADIX(r9)
586 cmpwi cr7, r0, 0
587
9e368f29 588 /*
c17b98cf 589 * POWER7/POWER8 host -> guest partition switch code.
9e368f29
PM
590 * We don't have to lock against concurrent tlbies,
591 * but we do have to coordinate across hardware threads.
592 */
7d6c40da 593 /* Set bit in entry map iff exit map is zero. */
7d6c40da
PM
594 li r7, 1
595 lbz r6, HSTATE_PTID(r13)
596 sld r7, r7, r6
f4c51f84
PM
597 addi r8, r5, VCORE_ENTRY_EXIT
59821: lwarx r3, 0, r8
7d6c40da 599 cmpwi r3, 0x100 /* any threads starting to exit? */
371fefd6 600 bge secondary_too_late /* if so we're too late to the party */
7d6c40da 601 or r3, r3, r7
f4c51f84 602 stwcx. r3, 0, r8
371fefd6
PM
603 bne 21b
604
605 /* Primary thread switches to guest partition. */
371fefd6 606 cmpwi r6,0
6af27c84 607 bne 10f
9a4506e1
NP
608
609 /* Radix has already switched LPID and flushed core TLB */
610 bne cr7, 22f
611
de56a948 612 lwz r7,KVM_LPID(r9)
7a84084c
PM
613BEGIN_FTR_SECTION
614 ld r6,KVM_SDR1(r9)
de56a948
PM
615 li r0,LPID_RSVD /* switch to reserved LPID */
616 mtspr SPRN_LPID,r0
617 ptesync
618 mtspr SPRN_SDR1,r6 /* switch to partition page table */
7a84084c 619END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
de56a948
PM
620 mtspr SPRN_LPID,r7
621 isync
1b400ba0 622
9a4506e1 623 /* See if we need to flush the TLB. Hash has to be done in RM */
1b400ba0 624 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
a29ebeaf
PM
625BEGIN_FTR_SECTION
626 /*
627 * On POWER9, individual threads can come in here, but the
628 * TLB is shared between the 4 threads in a core, hence
629 * invalidating on one thread invalidates for all.
630 * Thus we make all 4 threads use the same bit here.
631 */
632 clrrdi r6,r6,2
633END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1b400ba0
PM
634 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
635 srdi r6,r6,6 /* doubleword number */
636 sldi r6,r6,3 /* address offset */
637 add r6,r6,r9
638 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
a29ebeaf
PM
639 li r8,1
640 sld r8,r8,r7
1b400ba0 641 ld r7,0(r6)
a29ebeaf 642 and. r7,r7,r8
1b400ba0 643 beq 22f
ca252055 644 /* Flush the TLB of any entries for this LPID */
a29ebeaf
PM
645 lwz r0,KVM_TLB_SETS(r9)
646 mtctr r0
1b400ba0
PM
647 li r7,0x800 /* IS field = 0b10 */
648 ptesync
a29ebeaf 649 li r0,0 /* RS for P9 version of tlbiel */
a29ebeaf 65028: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
1b400ba0
PM
651 addi r7,r7,0x1000
652 bdnz 28b
9a4506e1 653 ptesync
a29ebeaf
PM
65423: ldarx r7,0,r6 /* clear the bit after TLB flushed */
655 andc r7,r7,r8
656 stdcx. r7,0,r6
657 bne 23b
1b400ba0 658
93b0f4dc
PM
659 /* Add timebase offset onto timebase */
66022: ld r8,VCORE_TB_OFFSET(r5)
661 cmpdi r8,0
662 beq 37f
57b8daa7 663 std r8, VCORE_TB_OFFSET_APPL(r5)
93b0f4dc
PM
664 mftb r6 /* current host timebase */
665 add r8,r8,r6
666 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
667 mftb r7 /* check if lower 24 bits overflowed */
668 clrldi r6,r6,40
669 clrldi r7,r7,40
670 cmpld r7,r6
671 bge 37f
672 addis r8,r8,0x100 /* if so, increment upper 40 bits */
673 mtspr SPRN_TBU40,r8
674
388cc6e1
PM
675 /* Load guest PCR value to select appropriate compat mode */
67637: ld r7, VCORE_PCR(r5)
677 cmpdi r7, 0
678 beq 38f
679 mtspr SPRN_PCR, r7
68038:
b005255e
MN
681
682BEGIN_FTR_SECTION
88b02cf9 683 /* DPDES and VTB are shared between threads */
b005255e 684 ld r8, VCORE_DPDES(r5)
88b02cf9 685 ld r7, VCORE_VTB(r5)
b005255e 686 mtspr SPRN_DPDES, r8
88b02cf9 687 mtspr SPRN_VTB, r7
b005255e
MN
688END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
689
fd7bacbc
MS
690 /* Mark the subcore state as inside guest */
691 bl kvmppc_subcore_enter_guest
692 nop
693 ld r5, HSTATE_KVM_VCORE(r13)
694 ld r4, HSTATE_KVM_VCPU(r13)
388cc6e1 695 li r0,1
371fefd6 696 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
9e368f29 697
e0b7ec05 698 /* Do we have a guest vcpu to run? */
6af27c84 69910: cmpdi r4, 0
e0b7ec05
PM
700 beq kvmppc_primary_no_guest
701kvmppc_got_guest:
e0b7ec05
PM
702 /* Increment yield count if they have a VPA */
703 ld r3, VCPU_VPA(r4)
704 cmpdi r3, 0
705 beq 25f
0865a583
AG
706 li r6, LPPACA_YIELDCOUNT
707 LWZX_BE r5, r3, r6
e0b7ec05 708 addi r5, r5, 1
0865a583 709 STWX_BE r5, r3, r6
e0b7ec05
PM
710 li r6, 1
711 stb r6, VCPU_VPA_DIRTY(r4)
71225:
713
e0b7ec05
PM
714 /* Save purr/spurr */
715 mfspr r5,SPRN_PURR
716 mfspr r6,SPRN_SPURR
717 std r5,HSTATE_PURR(r13)
718 std r6,HSTATE_SPURR(r13)
719 ld r7,VCPU_PURR(r4)
720 ld r8,VCPU_SPURR(r4)
721 mtspr SPRN_PURR,r7
722 mtspr SPRN_SPURR,r8
e0b7ec05 723
e9cf1e08
PM
724 /* Save host values of some registers */
725BEGIN_FTR_SECTION
726 mfspr r5, SPRN_TIDR
727 mfspr r6, SPRN_PSSCR
f4c51f84 728 mfspr r7, SPRN_PID
4c3bb4cc 729 mfspr r8, SPRN_IAMR
e9cf1e08
PM
730 std r5, STACK_SLOT_TID(r1)
731 std r6, STACK_SLOT_PSSCR(r1)
f4c51f84 732 std r7, STACK_SLOT_PID(r1)
4c3bb4cc 733 std r8, STACK_SLOT_IAMR(r1)
769377f7
PM
734 mfspr r5, SPRN_HFSCR
735 std r5, STACK_SLOT_HFSCR(r1)
e9cf1e08 736END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
7ceaa6dc
PM
737BEGIN_FTR_SECTION
738 mfspr r5, SPRN_CIABR
739 mfspr r6, SPRN_DAWR
740 mfspr r7, SPRN_DAWRX
741 std r5, STACK_SLOT_CIABR(r1)
742 std r6, STACK_SLOT_DAWR(r1)
743 std r7, STACK_SLOT_DAWRX(r1)
744END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
e9cf1e08 745
e0b7ec05
PM
746BEGIN_FTR_SECTION
747 /* Set partition DABR */
748 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
8563bf52 749 lwz r5,VCPU_DABRX(r4)
e0b7ec05
PM
750 ld r6,VCPU_DABR(r4)
751 mtspr SPRN_DABRX,r5
752 mtspr SPRN_DABR,r6
e0b7ec05 753 isync
e0b7ec05
PM
754END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
755
e4e38121 756#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4bb3c7a0
PM
757/*
758 * Branch around the call if both CPU_FTR_TM and
759 * CPU_FTR_P9_TM_HV_ASSIST are off.
760 */
e4e38121 761BEGIN_FTR_SECTION
4bb3c7a0
PM
762 b 91f
763END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
67f8a8c1 764 /*
7854f754 765 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
67f8a8c1 766 */
6f597c6b
SG
767 mr r3, r4
768 ld r4, VCPU_MSR(r3)
7854f754 769 li r5, 0 /* don't preserve non-vol regs */
7b0e827c 770 bl kvmppc_restore_tm_hv
7854f754 771 nop
6f597c6b 772 ld r4, HSTATE_KVM_VCPU(r13)
4bb3c7a0 77391:
e4e38121
MN
774#endif
775
41f4e631
PM
776 /* Load guest PMU registers; r4 = vcpu pointer here */
777 mr r3, r4
778 bl kvmhv_load_guest_pmu
e0b7ec05
PM
779
780 /* Load up FP, VMX and VSX registers */
41f4e631 781 ld r4, HSTATE_KVM_VCPU(r13)
e0b7ec05
PM
782 bl kvmppc_load_fp
783
784 ld r14, VCPU_GPR(R14)(r4)
785 ld r15, VCPU_GPR(R15)(r4)
786 ld r16, VCPU_GPR(R16)(r4)
787 ld r17, VCPU_GPR(R17)(r4)
788 ld r18, VCPU_GPR(R18)(r4)
789 ld r19, VCPU_GPR(R19)(r4)
790 ld r20, VCPU_GPR(R20)(r4)
791 ld r21, VCPU_GPR(R21)(r4)
792 ld r22, VCPU_GPR(R22)(r4)
793 ld r23, VCPU_GPR(R23)(r4)
794 ld r24, VCPU_GPR(R24)(r4)
795 ld r25, VCPU_GPR(R25)(r4)
796 ld r26, VCPU_GPR(R26)(r4)
797 ld r27, VCPU_GPR(R27)(r4)
798 ld r28, VCPU_GPR(R28)(r4)
799 ld r29, VCPU_GPR(R29)(r4)
800 ld r30, VCPU_GPR(R30)(r4)
801 ld r31, VCPU_GPR(R31)(r4)
802
e0b7ec05
PM
803 /* Switch DSCR to guest value */
804 ld r5, VCPU_DSCR(r4)
805 mtspr SPRN_DSCR, r5
e0b7ec05 806
b005255e 807BEGIN_FTR_SECTION
c17b98cf 808 /* Skip next section on POWER7 */
b005255e
MN
809 b 8f
810END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
b005255e
MN
811 /* Load up POWER8-specific registers */
812 ld r5, VCPU_IAMR(r4)
813 lwz r6, VCPU_PSPB(r4)
814 ld r7, VCPU_FSCR(r4)
815 mtspr SPRN_IAMR, r5
816 mtspr SPRN_PSPB, r6
817 mtspr SPRN_FSCR, r7
818 ld r5, VCPU_DAWR(r4)
819 ld r6, VCPU_DAWRX(r4)
820 ld r7, VCPU_CIABR(r4)
821 ld r8, VCPU_TAR(r4)
b53221e7
MN
822 /*
823 * Handle broken DAWR case by not writing it. This means we
824 * can still store the DAWR register for migration.
825 */
826BEGIN_FTR_SECTION
b005255e
MN
827 mtspr SPRN_DAWR, r5
828 mtspr SPRN_DAWRX, r6
b53221e7 829END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
b005255e
MN
830 mtspr SPRN_CIABR, r7
831 mtspr SPRN_TAR, r8
832 ld r5, VCPU_IC(r4)
7b490411 833 ld r8, VCPU_EBBHR(r4)
88b02cf9 834 mtspr SPRN_IC, r5
b005255e
MN
835 mtspr SPRN_EBBHR, r8
836 ld r5, VCPU_EBBRR(r4)
837 ld r6, VCPU_BESCR(r4)
83677f55
PM
838 lwz r7, VCPU_GUEST_PID(r4)
839 ld r8, VCPU_WORT(r4)
b005255e
MN
840 mtspr SPRN_EBBRR, r5
841 mtspr SPRN_BESCR, r6
83677f55
PM
842 mtspr SPRN_PID, r7
843 mtspr SPRN_WORT, r8
844BEGIN_FTR_SECTION
e9cf1e08 845 /* POWER8-only registers */
b005255e
MN
846 ld r5, VCPU_TCSCR(r4)
847 ld r6, VCPU_ACOP(r4)
83677f55
PM
848 ld r7, VCPU_CSIGR(r4)
849 ld r8, VCPU_TACR(r4)
b005255e
MN
850 mtspr SPRN_TCSCR, r5
851 mtspr SPRN_ACOP, r6
83677f55
PM
852 mtspr SPRN_CSIGR, r7
853 mtspr SPRN_TACR, r8
4bb3c7a0 854 nop
e9cf1e08
PM
855FTR_SECTION_ELSE
856 /* POWER9-only registers */
857 ld r5, VCPU_TID(r4)
858 ld r6, VCPU_PSSCR(r4)
4bb3c7a0 859 lbz r8, HSTATE_FAKE_SUSPEND(r13)
e9cf1e08 860 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
4bb3c7a0 861 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
769377f7 862 ld r7, VCPU_HFSCR(r4)
e9cf1e08
PM
863 mtspr SPRN_TIDR, r5
864 mtspr SPRN_PSSCR, r6
769377f7 865 mtspr SPRN_HFSCR, r7
e9cf1e08 866ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
b005255e
MN
8678:
868
e0b7ec05
PM
869 ld r5, VCPU_SPRG0(r4)
870 ld r6, VCPU_SPRG1(r4)
871 ld r7, VCPU_SPRG2(r4)
872 ld r8, VCPU_SPRG3(r4)
873 mtspr SPRN_SPRG0, r5
874 mtspr SPRN_SPRG1, r6
875 mtspr SPRN_SPRG2, r7
876 mtspr SPRN_SPRG3, r8
877
878 /* Load up DAR and DSISR */
879 ld r5, VCPU_DAR(r4)
880 lwz r6, VCPU_DSISR(r4)
881 mtspr SPRN_DAR, r5
882 mtspr SPRN_DSISR, r6
883
e0b7ec05
PM
884 /* Restore AMR and UAMOR, set AMOR to all 1s */
885 ld r5,VCPU_AMR(r4)
886 ld r6,VCPU_UAMOR(r4)
887 li r7,-1
888 mtspr SPRN_AMR,r5
889 mtspr SPRN_UAMOR,r6
890 mtspr SPRN_AMOR,r7
de56a948
PM
891
892 /* Restore state of CTRL run bit; assume 1 on entry */
893 lwz r5,VCPU_CTRL(r4)
894 andi. r5,r5,1
895 bne 4f
896 mfspr r6,SPRN_CTRLF
897 clrrdi r6,r6,1
898 mtspr SPRN_CTRLT,r6
8994:
6af27c84
PM
900 /* Secondary threads wait for primary to have done partition switch */
901 ld r5, HSTATE_KVM_VCORE(r13)
902 lbz r6, HSTATE_PTID(r13)
903 cmpwi r6, 0
904 beq 21f
905 lbz r0, VCORE_IN_GUEST(r5)
906 cmpwi r0, 0
907 bne 21f
908 HMT_LOW
b4deba5c
PM
90920: lwz r3, VCORE_ENTRY_EXIT(r5)
910 cmpwi r3, 0x100
911 bge no_switch_exit
912 lbz r0, VCORE_IN_GUEST(r5)
6af27c84
PM
913 cmpwi r0, 0
914 beq 20b
915 HMT_MEDIUM
91621:
917 /* Set LPCR. */
918 ld r8,VCORE_LPCR(r5)
919 mtspr SPRN_LPCR,r8
920 isync
921
57b8daa7
PM
922 /*
923 * Set the decrementer to the guest decrementer.
924 */
925 ld r8,VCPU_DEC_EXPIRES(r4)
926 /* r8 is a host timebase value here, convert to guest TB */
927 ld r5,HSTATE_KVM_VCORE(r13)
928 ld r6,VCORE_TB_OFFSET_APPL(r5)
929 add r8,r8,r6
930 mftb r7
931 subf r3,r7,r8
932 mtspr SPRN_DEC,r3
933
6af27c84
PM
934 /* Check if HDEC expires soon */
935 mfspr r3, SPRN_HDEC
2f272463
PM
936 EXTEND_HDEC(r3)
937 cmpdi r3, 512 /* 1 microsecond */
6af27c84
PM
938 blt hdec_soon
939
6964e6a4
PM
940 /* For hash guest, clear out and reload the SLB */
941 ld r6, VCPU_KVM(r4)
942 lbz r0, KVM_RADIX(r6)
943 cmpwi r0, 0
944 bne 9f
945 li r6, 0
946 slbmte r6, r6
947 slbia
948 ptesync
949
950 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
951 lwz r5,VCPU_SLB_MAX(r4)
952 cmpwi r5,0
953 beq 9f
954 mtctr r5
955 addi r6,r4,VCPU_SLB
9561: ld r8,VCPU_SLB_E(r6)
957 ld r9,VCPU_SLB_V(r6)
958 slbmte r9,r8
959 addi r6,r6,VCPU_SLB_SIZE
960 bdnz 1b
9619:
962
5af50993
BH
963#ifdef CONFIG_KVM_XICS
964 /* We are entering the guest on that thread, push VCPU to XIVE */
965 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
0bfa33c7 966 cmpldi cr0, r10, 0
5af50993
BH
967 beq no_xive
968 ld r11, VCPU_XIVE_SAVED_STATE(r4)
969 li r9, TM_QW1_OS
5af50993 970 eieio
ad98dd1a 971 stdcix r11,r9,r10
5af50993
BH
972 lwz r11, VCPU_XIVE_CAM_WORD(r4)
973 li r9, TM_QW1_OS + TM_WORD2
974 stwcix r11,r9,r10
975 li r9, 1
35c2405e 976 stb r9, VCPU_XIVE_PUSHED(r4)
ad98dd1a 977 eieio
2267ea76
BH
978
979 /*
980 * We clear the irq_pending flag. There is a small chance of a
981 * race vs. the escalation interrupt happening on another
982 * processor setting it again, but the only consequence is to
983 * cause a spurrious wakeup on the next H_CEDE which is not an
984 * issue.
985 */
986 li r0,0
987 stb r0, VCPU_IRQ_PENDING(r4)
9b9b13a6
BH
988
989 /*
990 * In single escalation mode, if the escalation interrupt is
991 * on, we mask it.
992 */
993 lbz r0, VCPU_XIVE_ESC_ON(r4)
994 cmpwi r0,0
995 beq 1f
996 ld r10, VCPU_XIVE_ESC_RADDR(r4)
997 li r9, XIVE_ESB_SET_PQ_01
998 ldcix r0, r10, r9
999 sync
1000
1001 /* We have a possible subtle race here: The escalation interrupt might
1002 * have fired and be on its way to the host queue while we mask it,
1003 * and if we unmask it early enough (re-cede right away), there is
1004 * a theorical possibility that it fires again, thus landing in the
1005 * target queue more than once which is a big no-no.
1006 *
1007 * Fortunately, solving this is rather easy. If the above load setting
1008 * PQ to 01 returns a previous value where P is set, then we know the
1009 * escalation interrupt is somewhere on its way to the host. In that
1010 * case we simply don't clear the xive_esc_on flag below. It will be
1011 * eventually cleared by the handler for the escalation interrupt.
1012 *
1013 * Then, when doing a cede, we check that flag again before re-enabling
1014 * the escalation interrupt, and if set, we abort the cede.
1015 */
1016 andi. r0, r0, XIVE_ESB_VAL_P
1017 bne- 1f
1018
1019 /* Now P is 0, we can clear the flag */
1020 li r0, 0
1021 stb r0, VCPU_XIVE_ESC_ON(r4)
10221:
5af50993
BH
1023no_xive:
1024#endif /* CONFIG_KVM_XICS */
1025
95a6432c
PM
1026 li r0, 0
1027 stw r0, STACK_SLOT_SHORT_PATH(r1)
1028
df709a29 1029deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
f7035ce9
PM
1030 /* Check if we can deliver an external or decrementer interrupt now */
1031 ld r0, VCPU_PENDING_EXC(r4)
1032BEGIN_FTR_SECTION
1033 /* On POWER9, also check for emulated doorbell interrupt */
1034 lbz r3, VCPU_DBELL_REQ(r4)
1035 or r0, r0, r3
1036END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1037 cmpdi r0, 0
1038 beq 71f
1039 mr r3, r4
1040 bl kvmppc_guest_entry_inject_int
1041 ld r4, HSTATE_KVM_VCPU(r13)
104271:
de56a948
PM
1043 ld r6, VCPU_SRR0(r4)
1044 ld r7, VCPU_SRR1(r4)
e3bbbbfa
PM
1045 mtspr SPRN_SRR0, r6
1046 mtspr SPRN_SRR1, r7
de56a948 1047
95a6432c
PM
1048fast_guest_entry_c:
1049 ld r10, VCPU_PC(r4)
1050 ld r11, VCPU_MSR(r4)
4619ac88 1051 /* r11 = vcpu->arch.msr & ~MSR_HV */
de56a948
PM
1052 rldicl r11, r11, 63 - MSR_HV_LG, 1
1053 rotldi r11, r11, 1 + MSR_HV_LG
1054 ori r11, r11, MSR_ME
1055
f7035ce9
PM
1056 ld r6, VCPU_CTR(r4)
1057 ld r7, VCPU_XER(r4)
1058 mtctr r6
1059 mtxer r7
19ccb76a 1060
27025a60
LPF
1061/*
1062 * Required state:
1063 * R4 = vcpu
1064 * R10: value for HSRR0
1065 * R11: value for HSRR1
1066 * R13 = PACA
1067 */
de56a948 1068fast_guest_return:
4619ac88
PM
1069 li r0,0
1070 stb r0,VCPU_CEDED(r4) /* cancel cede */
de56a948
PM
1071 mtspr SPRN_HSRR0,r10
1072 mtspr SPRN_HSRR1,r11
1073
1074 /* Activate guest mode, so faults get handled by KVM */
44a3add8 1075 li r9, KVM_GUEST_MODE_GUEST_HV
de56a948
PM
1076 stb r9, HSTATE_IN_GUEST(r13)
1077
b6c295df
PM
1078#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1079 /* Accumulate timing */
1080 addi r3, r4, VCPU_TB_GUEST
1081 bl kvmhv_accumulate_time
1082#endif
1083
de56a948
PM
1084 /* Enter guest */
1085
0acb9111
PM
1086BEGIN_FTR_SECTION
1087 ld r5, VCPU_CFAR(r4)
1088 mtspr SPRN_CFAR, r5
1089END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
4b8473c9
PM
1090BEGIN_FTR_SECTION
1091 ld r0, VCPU_PPR(r4)
1092END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
0acb9111 1093
de56a948 1094 ld r5, VCPU_LR(r4)
fd0944ba 1095 ld r6, VCPU_CR(r4)
de56a948
PM
1096 mtlr r5
1097 mtcr r6
1098
c75df6f9
MN
1099 ld r1, VCPU_GPR(R1)(r4)
1100 ld r2, VCPU_GPR(R2)(r4)
1101 ld r3, VCPU_GPR(R3)(r4)
1102 ld r5, VCPU_GPR(R5)(r4)
1103 ld r6, VCPU_GPR(R6)(r4)
1104 ld r7, VCPU_GPR(R7)(r4)
1105 ld r8, VCPU_GPR(R8)(r4)
1106 ld r9, VCPU_GPR(R9)(r4)
1107 ld r10, VCPU_GPR(R10)(r4)
1108 ld r11, VCPU_GPR(R11)(r4)
1109 ld r12, VCPU_GPR(R12)(r4)
1110 ld r13, VCPU_GPR(R13)(r4)
1111
4b8473c9
PM
1112BEGIN_FTR_SECTION
1113 mtspr SPRN_PPR, r0
1114END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
e001fa78
MN
1115
1116/* Move canary into DSISR to check for later */
1117BEGIN_FTR_SECTION
1118 li r0, 0x7fff
1119 mtspr SPRN_HDSISR, r0
1120END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1121
4b8473c9 1122 ld r0, VCPU_GPR(R0)(r4)
c75df6f9 1123 ld r4, VCPU_GPR(R4)(r4)
222f20f1 1124 HRFI_TO_GUEST
de56a948
PM
1125 b .
1126
95a6432c
PM
1127/*
1128 * Enter the guest on a P9 or later system where we have exactly
1129 * one vcpu per vcore and we don't need to go to real mode
1130 * (which implies that host and guest are both using radix MMU mode).
1131 * r3 = vcpu pointer
1132 * Most SPRs and all the VSRs have been loaded already.
1133 */
1134_GLOBAL(__kvmhv_vcpu_entry_p9)
1135EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
1136 mflr r0
1137 std r0, PPC_LR_STKOFF(r1)
1138 stdu r1, -SFS(r1)
1139
1140 li r0, 1
1141 stw r0, STACK_SLOT_SHORT_PATH(r1)
1142
1143 std r3, HSTATE_KVM_VCPU(r13)
1144 mfcr r4
1145 stw r4, SFS+8(r1)
1146
1147 std r1, HSTATE_HOST_R1(r13)
1148
1149 reg = 14
1150 .rept 18
1151 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1152 reg = reg + 1
1153 .endr
1154
1155 reg = 14
1156 .rept 18
1157 ld reg, __VCPU_GPR(reg)(r3)
1158 reg = reg + 1
1159 .endr
1160
1161 mfmsr r10
1162 std r10, HSTATE_HOST_MSR(r13)
1163
1164 mr r4, r3
1165 b fast_guest_entry_c
1166guest_exit_short_path:
1167
1168 li r0, KVM_GUEST_MODE_NONE
1169 stb r0, HSTATE_IN_GUEST(r13)
1170
1171 reg = 14
1172 .rept 18
1173 std reg, __VCPU_GPR(reg)(r9)
1174 reg = reg + 1
1175 .endr
1176
1177 reg = 14
1178 .rept 18
1179 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1180 reg = reg + 1
1181 .endr
1182
1183 lwz r4, SFS+8(r1)
1184 mtcr r4
1185
1186 mr r3, r12 /* trap number */
1187
1188 addi r1, r1, SFS
1189 ld r0, PPC_LR_STKOFF(r1)
1190 mtlr r0
1191
1192 /* If we are in real mode, do a rfid to get back to the caller */
1193 mfmsr r4
1194 andi. r5, r4, MSR_IR
1195 bnelr
1196 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
1197 mtspr SPRN_SRR0, r0
1198 ld r10, HSTATE_HOST_MSR(r13)
1199 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
1200 mtspr SPRN_SRR1, r10
1201 RFI_TO_KERNEL
1202 b .
1203
b6c295df 1204secondary_too_late:
6af27c84 1205 li r12, 0
a8b48a4d 1206 stw r12, STACK_SLOT_TRAP(r1)
b6c295df
PM
1207 cmpdi r4, 0
1208 beq 11f
6af27c84
PM
1209 stw r12, VCPU_TRAP(r4)
1210#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
b6c295df
PM
1211 addi r3, r4, VCPU_TB_RMEXIT
1212 bl kvmhv_accumulate_time
6af27c84 1213#endif
b6c295df
PM
121411: b kvmhv_switch_to_host
1215
b4deba5c
PM
1216no_switch_exit:
1217 HMT_MEDIUM
1218 li r12, 0
1219 b 12f
b6c295df 1220hdec_soon:
6af27c84 1221 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
b4deba5c 122212: stw r12, VCPU_TRAP(r4)
6af27c84
PM
1223 mr r9, r4
1224#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
b6c295df
PM
1225 addi r3, r4, VCPU_TB_RMEXIT
1226 bl kvmhv_accumulate_time
b6c295df 1227#endif
6964e6a4 1228 b guest_bypass
b6c295df 1229
de56a948
PM
1230/******************************************************************************
1231 * *
1232 * Exit code *
1233 * *
1234 *****************************************************************************/
1235
1236/*
1237 * We come here from the first-level interrupt handlers.
1238 */
dd96b2c2
AK
1239 .globl kvmppc_interrupt_hv
1240kvmppc_interrupt_hv:
de56a948
PM
1241 /*
1242 * Register contents:
d3918e7f 1243 * R12 = (guest CR << 32) | interrupt vector
de56a948 1244 * R13 = PACA
d3918e7f 1245 * guest R12 saved in shadow VCPU SCRATCH0
a97a65d5 1246 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
de56a948
PM
1247 * guest R13 saved in SPRN_SCRATCH0
1248 */
a97a65d5 1249 std r9, HSTATE_SCRATCH2(r13)
44a3add8
PM
1250 lbz r9, HSTATE_IN_GUEST(r13)
1251 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1252 beq kvmppc_bad_host_intr
dd96b2c2
AK
1253#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1254 cmpwi r9, KVM_GUEST_MODE_GUEST
a97a65d5 1255 ld r9, HSTATE_SCRATCH2(r13)
dd96b2c2
AK
1256 beq kvmppc_interrupt_pr
1257#endif
44a3add8
PM
1258 /* We're now back in the host but in guest MMU context */
1259 li r9, KVM_GUEST_MODE_HOST_HV
1260 stb r9, HSTATE_IN_GUEST(r13)
1261
de56a948
PM
1262 ld r9, HSTATE_KVM_VCPU(r13)
1263
1264 /* Save registers */
1265
c75df6f9
MN
1266 std r0, VCPU_GPR(R0)(r9)
1267 std r1, VCPU_GPR(R1)(r9)
1268 std r2, VCPU_GPR(R2)(r9)
1269 std r3, VCPU_GPR(R3)(r9)
1270 std r4, VCPU_GPR(R4)(r9)
1271 std r5, VCPU_GPR(R5)(r9)
1272 std r6, VCPU_GPR(R6)(r9)
1273 std r7, VCPU_GPR(R7)(r9)
1274 std r8, VCPU_GPR(R8)(r9)
a97a65d5 1275 ld r0, HSTATE_SCRATCH2(r13)
c75df6f9
MN
1276 std r0, VCPU_GPR(R9)(r9)
1277 std r10, VCPU_GPR(R10)(r9)
1278 std r11, VCPU_GPR(R11)(r9)
de56a948 1279 ld r3, HSTATE_SCRATCH0(r13)
c75df6f9 1280 std r3, VCPU_GPR(R12)(r9)
d3918e7f
NP
1281 /* CR is in the high half of r12 */
1282 srdi r4, r12, 32
fd0944ba 1283 std r4, VCPU_CR(r9)
0acb9111
PM
1284BEGIN_FTR_SECTION
1285 ld r3, HSTATE_CFAR(r13)
1286 std r3, VCPU_CFAR(r9)
1287END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
4b8473c9
PM
1288BEGIN_FTR_SECTION
1289 ld r4, HSTATE_PPR(r13)
1290 std r4, VCPU_PPR(r9)
1291END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
de56a948
PM
1292
1293 /* Restore R1/R2 so we can handle faults */
1294 ld r1, HSTATE_HOST_R1(r13)
1295 ld r2, PACATOC(r13)
1296
1297 mfspr r10, SPRN_SRR0
1298 mfspr r11, SPRN_SRR1
1299 std r10, VCPU_SRR0(r9)
1300 std r11, VCPU_SRR1(r9)
d3918e7f
NP
1301 /* trap is in the low half of r12, clear CR from the high half */
1302 clrldi r12, r12, 32
de56a948
PM
1303 andi. r0, r12, 2 /* need to read HSRR0/1? */
1304 beq 1f
1305 mfspr r10, SPRN_HSRR0
1306 mfspr r11, SPRN_HSRR1
1307 clrrdi r12, r12, 2
13081: std r10, VCPU_PC(r9)
1309 std r11, VCPU_MSR(r9)
1310
1311 GET_SCRATCH0(r3)
1312 mflr r4
c75df6f9 1313 std r3, VCPU_GPR(R13)(r9)
de56a948
PM
1314 std r4, VCPU_LR(r9)
1315
de56a948
PM
1316 stw r12,VCPU_TRAP(r9)
1317
8b24e69f
PM
1318 /*
1319 * Now that we have saved away SRR0/1 and HSRR0/1,
1320 * interrupts are recoverable in principle, so set MSR_RI.
1321 * This becomes important for relocation-on interrupts from
1322 * the guest, which we can get in radix mode on POWER9.
1323 */
1324 li r0, MSR_RI
1325 mtmsrd r0, 1
1326
b6c295df
PM
1327#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1328 addi r3, r9, VCPU_TB_RMINTR
1329 mr r4, r9
1330 bl kvmhv_accumulate_time
1331 ld r5, VCPU_GPR(R5)(r9)
1332 ld r6, VCPU_GPR(R6)(r9)
1333 ld r7, VCPU_GPR(R7)(r9)
1334 ld r8, VCPU_GPR(R8)(r9)
1335#endif
1336
4a157d61 1337 /* Save HEIR (HV emulation assist reg) in emul_inst
697d3899
PM
1338 if this is an HEI (HV emulation interrupt, e40) */
1339 li r3,KVM_INST_FETCH_FAILED
2bf27601 1340 stw r3,VCPU_LAST_INST(r9)
697d3899
PM
1341 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1342 bne 11f
1343 mfspr r3,SPRN_HEIR
4a157d61 134411: stw r3,VCPU_HEIR(r9)
697d3899
PM
1345
1346 /* these are volatile across C function calls */
a97a65d5
NP
1347#ifdef CONFIG_RELOCATABLE
1348 ld r3, HSTATE_SCRATCH1(r13)
1349 mtctr r3
1350#else
697d3899 1351 mfctr r3
a97a65d5 1352#endif
697d3899
PM
1353 mfxer r4
1354 std r3, VCPU_CTR(r9)
c63517c2 1355 std r4, VCPU_XER(r9)
697d3899 1356
df709a29
PM
1357 /* Save more register state */
1358 mfdar r3
1359 mfdsisr r4
1360 std r3, VCPU_DAR(r9)
1361 stw r4, VCPU_DSISR(r9)
4bb3c7a0 1362
697d3899
PM
1363 /* If this is a page table miss then see if it's theirs or ours */
1364 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1365 beq kvmppc_hdsi
df709a29
PM
1366 std r3, VCPU_FAULT_DAR(r9)
1367 stw r4, VCPU_FAULT_DSISR(r9)
342d3db7
PM
1368 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1369 beq kvmppc_hisi
697d3899 1370
df709a29
PM
1371#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1372 /* For softpatch interrupt, go off and do TM instruction emulation */
1373 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1374 beq kvmppc_tm_emul
1375#endif
1376
de56a948
PM
1377 /* See if this is a leftover HDEC interrupt */
1378 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1379 bne 2f
1380 mfspr r3,SPRN_HDEC
a4faf2e7
PM
1381 EXTEND_HDEC(r3)
1382 cmpdi r3,0
1f09c3ed
PM
1383 mr r4,r9
1384 bge fast_guest_return
de56a948 13852:
697d3899 1386 /* See if this is an hcall we can handle in real mode */
a8606e20
PM
1387 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1388 beq hcall_try_real_mode
de56a948 1389
66feed61
PM
1390 /* Hypervisor doorbell - exit only if host IPI flag set */
1391 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1392 bne 3f
bd0fdb19
NP
1393BEGIN_FTR_SECTION
1394 PPC_MSGSYNC
2cde3716 1395 lwsync
bd0fdb19 1396END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
66feed61 1397 lbz r0, HSTATE_HOST_IPI(r13)
06554d9f 1398 cmpwi r0, 0
df709a29 1399 beq maybe_reenter_guest
66feed61
PM
1400 b guest_exit_cont
14013:
769377f7
PM
1402 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1403 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1404 bne 14f
1405 mfspr r3, SPRN_HFSCR
1406 std r3, VCPU_HFSCR(r9)
1407 b guest_exit_cont
140814:
54695c30
BH
1409 /* External interrupt ? */
1410 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
df709a29 1411 beq kvmppc_guest_external
43ff3f65
PM
1412 /* See if it is a machine check */
1413 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1414 beq machine_check_realmode
df709a29
PM
1415 /* Or a hypervisor maintenance interrupt */
1416 cmpwi r12, BOOK3S_INTERRUPT_HMI
1417 beq hmi_realmode
1418
1419guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1420
43ff3f65
PM
1421#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1422 addi r3, r9, VCPU_TB_RMEXIT
1423 mr r4, r9
1424 bl kvmhv_accumulate_time
1425#endif
5af50993
BH
1426#ifdef CONFIG_KVM_XICS
1427 /* We are exiting, pull the VP from the XIVE */
35c2405e 1428 lbz r0, VCPU_XIVE_PUSHED(r9)
5af50993
BH
1429 cmpwi cr0, r0, 0
1430 beq 1f
1431 li r7, TM_SPC_PULL_OS_CTX
1432 li r6, TM_QW1_OS
1433 mfmsr r0
2662efd0 1434 andi. r0, r0, MSR_DR /* in real mode? */
5af50993
BH
1435 beq 2f
1436 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1437 cmpldi cr0, r10, 0
1438 beq 1f
1439 /* First load to pull the context, we ignore the value */
5af50993 1440 eieio
ad98dd1a 1441 lwzx r11, r7, r10
5af50993
BH
1442 /* Second load to recover the context state (Words 0 and 1) */
1443 ldx r11, r6, r10
1444 b 3f
14452: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1446 cmpldi cr0, r10, 0
1447 beq 1f
1448 /* First load to pull the context, we ignore the value */
5af50993 1449 eieio
ad98dd1a 1450 lwzcix r11, r7, r10
5af50993
BH
1451 /* Second load to recover the context state (Words 0 and 1) */
1452 ldcix r11, r6, r10
14533: std r11, VCPU_XIVE_SAVED_STATE(r9)
1454 /* Fixup some of the state for the next load */
1455 li r10, 0
1456 li r0, 0xff
35c2405e 1457 stb r10, VCPU_XIVE_PUSHED(r9)
5af50993
BH
1458 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1459 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
ad98dd1a 1460 eieio
5af50993
BH
14611:
1462#endif /* CONFIG_KVM_XICS */
de56a948 1463
95a6432c
PM
1464 /* If we came in through the P9 short path, go back out to C now */
1465 lwz r0, STACK_SLOT_SHORT_PATH(r1)
1466 cmpwi r0, 0
1467 bne guest_exit_short_path
1468
6964e6a4 1469 /* For hash guest, read the guest SLB and save it away */
f4c51f84
PM
1470 ld r5, VCPU_KVM(r9)
1471 lbz r0, KVM_RADIX(r5)
f4c51f84 1472 li r5, 0
6964e6a4
PM
1473 cmpwi r0, 0
1474 bne 3f /* for radix, save 0 entries */
de56a948
PM
1475 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1476 mtctr r0
1477 li r6,0
1478 addi r7,r9,VCPU_SLB
de56a948
PM
14791: slbmfee r8,r6
1480 andis. r0,r8,SLB_ESID_V@h
1481 beq 2f
1482 add r8,r8,r6 /* put index in */
1483 slbmfev r3,r6
1484 std r8,VCPU_SLB_E(r7)
1485 std r3,VCPU_SLB_V(r7)
1486 addi r7,r7,VCPU_SLB_SIZE
1487 addi r5,r5,1
14882: addi r6,r6,1
1489 bdnz 1b
6964e6a4
PM
1490 /* Finally clear out the SLB */
1491 li r0,0
1492 slbmte r0,r0
1493 slbia
1494 ptesync
f4c51f84 14953: stw r5,VCPU_SLB_MAX(r9)
b4072df4 1496
cda4a147
PM
1497 /* load host SLB entries */
1498BEGIN_MMU_FTR_SECTION
1499 b 0f
1500END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1501 ld r8,PACA_SLBSHADOWPTR(r13)
1502
1503 .rept SLB_NUM_BOLTED
1504 li r3, SLBSHADOW_SAVEAREA
1505 LDX_BE r5, r8, r3
1506 addi r3, r3, 8
1507 LDX_BE r6, r8, r3
1508 andis. r7,r5,SLB_ESID_V@h
1509 beq 1f
1510 slbmte r6,r5
15111: addi r8,r8,16
1512 .endr
15130:
1514
6964e6a4 1515guest_bypass:
a8b48a4d 1516 stw r12, STACK_SLOT_TRAP(r1)
57b8daa7
PM
1517
1518 /* Save DEC */
1519 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1520 ld r3, HSTATE_KVM_VCORE(r13)
1521 mfspr r5,SPRN_DEC
1522 mftb r6
1523 /* On P9, if the guest has large decr enabled, don't sign extend */
1524BEGIN_FTR_SECTION
1525 ld r4, VCORE_LPCR(r3)
1526 andis. r4, r4, LPCR_LD@h
1527 bne 16f
1528END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1529 extsw r5,r5
153016: add r5,r5,r6
1531 /* r5 is a guest timebase value here, convert to host TB */
1532 ld r4,VCORE_TB_OFFSET_APPL(r3)
1533 subf r5,r4,r5
1534 std r5,VCPU_DEC_EXPIRES(r9)
1535
6af27c84 1536 /* Increment exit count, poke other threads to exit */
57b8daa7 1537 mr r3, r12
6af27c84 1538 bl kvmhv_commence_exit
eddb60fb
PM
1539 nop
1540 ld r9, HSTATE_KVM_VCPU(r13)
6af27c84 1541
ec257165
PM
1542 /* Stop others sending VCPU interrupts to this physical CPU */
1543 li r0, -1
1544 stw r0, VCPU_CPU(r9)
1545 stw r0, VCPU_THREAD_CPU(r9)
1546
de56a948 1547 /* Save guest CTRL register, set runlatch to 1 */
6af27c84 1548 mfspr r6,SPRN_CTRLF
de56a948
PM
1549 stw r6,VCPU_CTRL(r9)
1550 andi. r0,r6,1
1551 bne 4f
1552 ori r6,r6,1
1553 mtspr SPRN_CTRLT,r6
15544:
de56a948
PM
1555 /*
1556 * Save the guest PURR/SPURR
1557 */
1558 mfspr r5,SPRN_PURR
1559 mfspr r6,SPRN_SPURR
1560 ld r7,VCPU_PURR(r9)
1561 ld r8,VCPU_SPURR(r9)
1562 std r5,VCPU_PURR(r9)
1563 std r6,VCPU_SPURR(r9)
1564 subf r5,r7,r5
1565 subf r6,r8,r6
1566
1567 /*
1568 * Restore host PURR/SPURR and add guest times
1569 * so that the time in the guest gets accounted.
1570 */
1571 ld r3,HSTATE_PURR(r13)
1572 ld r4,HSTATE_SPURR(r13)
1573 add r3,r3,r5
1574 add r4,r4,r6
1575 mtspr SPRN_PURR,r3
1576 mtspr SPRN_SPURR,r4
1577
b005255e
MN
1578BEGIN_FTR_SECTION
1579 b 8f
1580END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
b005255e
MN
1581 /* Save POWER8-specific registers */
1582 mfspr r5, SPRN_IAMR
1583 mfspr r6, SPRN_PSPB
1584 mfspr r7, SPRN_FSCR
1585 std r5, VCPU_IAMR(r9)
1586 stw r6, VCPU_PSPB(r9)
1587 std r7, VCPU_FSCR(r9)
1588 mfspr r5, SPRN_IC
b005255e
MN
1589 mfspr r7, SPRN_TAR
1590 std r5, VCPU_IC(r9)
b005255e 1591 std r7, VCPU_TAR(r9)
7b490411 1592 mfspr r8, SPRN_EBBHR
b005255e
MN
1593 std r8, VCPU_EBBHR(r9)
1594 mfspr r5, SPRN_EBBRR
1595 mfspr r6, SPRN_BESCR
83677f55
PM
1596 mfspr r7, SPRN_PID
1597 mfspr r8, SPRN_WORT
b005255e
MN
1598 std r5, VCPU_EBBRR(r9)
1599 std r6, VCPU_BESCR(r9)
83677f55
PM
1600 stw r7, VCPU_GUEST_PID(r9)
1601 std r8, VCPU_WORT(r9)
1602BEGIN_FTR_SECTION
b005255e
MN
1603 mfspr r5, SPRN_TCSCR
1604 mfspr r6, SPRN_ACOP
83677f55
PM
1605 mfspr r7, SPRN_CSIGR
1606 mfspr r8, SPRN_TACR
b005255e
MN
1607 std r5, VCPU_TCSCR(r9)
1608 std r6, VCPU_ACOP(r9)
83677f55
PM
1609 std r7, VCPU_CSIGR(r9)
1610 std r8, VCPU_TACR(r9)
e9cf1e08
PM
1611FTR_SECTION_ELSE
1612 mfspr r5, SPRN_TIDR
1613 mfspr r6, SPRN_PSSCR
1614 std r5, VCPU_TID(r9)
1615 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1616 rotldi r6, r6, 60
1617 std r6, VCPU_PSSCR(r9)
769377f7
PM
1618 /* Restore host HFSCR value */
1619 ld r7, STACK_SLOT_HFSCR(r1)
1620 mtspr SPRN_HFSCR, r7
e9cf1e08 1621ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
ccec4456
PM
1622 /*
1623 * Restore various registers to 0, where non-zero values
1624 * set by the guest could disrupt the host.
1625 */
1626 li r0, 0
4c3bb4cc 1627 mtspr SPRN_PSPB, r0
ccec4456 1628 mtspr SPRN_WORT, r0
83677f55 1629BEGIN_FTR_SECTION
4c3bb4cc 1630 mtspr SPRN_IAMR, r0
83677f55 1631 mtspr SPRN_TCSCR, r0
ccec4456
PM
1632 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1633 li r0, 1
1634 sldi r0, r0, 31
1635 mtspr SPRN_MMCRS, r0
83677f55 1636END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
b005255e
MN
16378:
1638
e0b7ec05 1639 /* Save and reset AMR and UAMOR before turning on the MMU */
e0b7ec05
PM
1640 mfspr r5,SPRN_AMR
1641 mfspr r6,SPRN_UAMOR
1642 std r5,VCPU_AMR(r9)
1643 std r6,VCPU_UAMOR(r9)
1644 li r6,0
1645 mtspr SPRN_AMR,r6
4c3bb4cc 1646 mtspr SPRN_UAMOR, r6
e0b7ec05
PM
1647
1648 /* Switch DSCR back to host value */
e0b7ec05
PM
1649 mfspr r8, SPRN_DSCR
1650 ld r7, HSTATE_DSCR(r13)
1651 std r8, VCPU_DSCR(r9)
1652 mtspr SPRN_DSCR, r7
e0b7ec05
PM
1653
1654 /* Save non-volatile GPRs */
1655 std r14, VCPU_GPR(R14)(r9)
1656 std r15, VCPU_GPR(R15)(r9)
1657 std r16, VCPU_GPR(R16)(r9)
1658 std r17, VCPU_GPR(R17)(r9)
1659 std r18, VCPU_GPR(R18)(r9)
1660 std r19, VCPU_GPR(R19)(r9)
1661 std r20, VCPU_GPR(R20)(r9)
1662 std r21, VCPU_GPR(R21)(r9)
1663 std r22, VCPU_GPR(R22)(r9)
1664 std r23, VCPU_GPR(R23)(r9)
1665 std r24, VCPU_GPR(R24)(r9)
1666 std r25, VCPU_GPR(R25)(r9)
1667 std r26, VCPU_GPR(R26)(r9)
1668 std r27, VCPU_GPR(R27)(r9)
1669 std r28, VCPU_GPR(R28)(r9)
1670 std r29, VCPU_GPR(R29)(r9)
1671 std r30, VCPU_GPR(R30)(r9)
1672 std r31, VCPU_GPR(R31)(r9)
1673
1674 /* Save SPRGs */
1675 mfspr r3, SPRN_SPRG0
1676 mfspr r4, SPRN_SPRG1
1677 mfspr r5, SPRN_SPRG2
1678 mfspr r6, SPRN_SPRG3
1679 std r3, VCPU_SPRG0(r9)
1680 std r4, VCPU_SPRG1(r9)
1681 std r5, VCPU_SPRG2(r9)
1682 std r6, VCPU_SPRG3(r9)
1683
1684 /* save FP state */
1685 mr r3, r9
1686 bl kvmppc_save_fp
de56a948 1687
0a8eccef 1688#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4bb3c7a0
PM
1689/*
1690 * Branch around the call if both CPU_FTR_TM and
1691 * CPU_FTR_P9_TM_HV_ASSIST are off.
1692 */
0a8eccef 1693BEGIN_FTR_SECTION
4bb3c7a0
PM
1694 b 91f
1695END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
67f8a8c1 1696 /*
7854f754 1697 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
67f8a8c1 1698 */
6f597c6b
SG
1699 mr r3, r9
1700 ld r4, VCPU_MSR(r3)
7854f754 1701 li r5, 0 /* don't preserve non-vol regs */
7b0e827c 1702 bl kvmppc_save_tm_hv
7854f754 1703 nop
6f597c6b 1704 ld r9, HSTATE_KVM_VCPU(r13)
4bb3c7a0 170591:
0a8eccef
PM
1706#endif
1707
e0b7ec05
PM
1708 /* Increment yield count if they have a VPA */
1709 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1710 cmpdi r8, 0
1711 beq 25f
0865a583
AG
1712 li r4, LPPACA_YIELDCOUNT
1713 LWZX_BE r3, r8, r4
e0b7ec05 1714 addi r3, r3, 1
0865a583 1715 STWX_BE r3, r8, r4
e0b7ec05
PM
1716 li r3, 1
1717 stb r3, VCPU_VPA_DIRTY(r9)
171825:
1719 /* Save PMU registers if requested */
1720 /* r8 and cr0.eq are live here */
41f4e631
PM
1721 mr r3, r9
1722 li r4, 1
e0b7ec05 1723 beq 21f /* if no VPA, save PMU stuff anyway */
41f4e631
PM
1724 lbz r4, LPPACA_PMCINUSE(r8)
172521: bl kvmhv_save_guest_pmu
1726 ld r9, HSTATE_KVM_VCPU(r13)
de56a948 1727
e9cf1e08 1728 /* Restore host values of some registers */
7ceaa6dc
PM
1729BEGIN_FTR_SECTION
1730 ld r5, STACK_SLOT_CIABR(r1)
1731 ld r6, STACK_SLOT_DAWR(r1)
1732 ld r7, STACK_SLOT_DAWRX(r1)
1733 mtspr SPRN_CIABR, r5
b53221e7
MN
1734 /*
1735 * If the DAWR doesn't work, it's ok to write these here as
1736 * this value should always be zero
1737 */
7ceaa6dc
PM
1738 mtspr SPRN_DAWR, r6
1739 mtspr SPRN_DAWRX, r7
1740END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
e9cf1e08
PM
1741BEGIN_FTR_SECTION
1742 ld r5, STACK_SLOT_TID(r1)
1743 ld r6, STACK_SLOT_PSSCR(r1)
f4c51f84 1744 ld r7, STACK_SLOT_PID(r1)
4c3bb4cc 1745 ld r8, STACK_SLOT_IAMR(r1)
e9cf1e08
PM
1746 mtspr SPRN_TIDR, r5
1747 mtspr SPRN_PSSCR, r6
f4c51f84 1748 mtspr SPRN_PID, r7
4c3bb4cc 1749 mtspr SPRN_IAMR, r8
e9cf1e08 1750END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
a25bd72b
BH
1751
1752#ifdef CONFIG_PPC_RADIX_MMU
1753 /*
1754 * Are we running hash or radix ?
1755 */
67f8a8c1
PM
1756 ld r5, VCPU_KVM(r9)
1757 lbz r0, KVM_RADIX(r5)
1758 cmpwi cr2, r0, 0
2bf1071a 1759 beq cr2, 2f
a25bd72b 1760
df158189
PM
1761 /*
1762 * Radix: do eieio; tlbsync; ptesync sequence in case we
1763 * interrupted the guest between a tlbie and a ptesync.
1764 */
1765 eieio
1766 tlbsync
1767 ptesync
1768
a25bd72b
BH
1769 /* Radix: Handle the case where the guest used an illegal PID */
1770 LOAD_REG_ADDR(r4, mmu_base_pid)
1771 lwz r3, VCPU_GUEST_PID(r9)
1772 lwz r5, 0(r4)
1773 cmpw cr0,r3,r5
1774 blt 2f
1775
1776 /*
1777 * Illegal PID, the HW might have prefetched and cached in the TLB
1778 * some translations for the LPID 0 / guest PID combination which
1779 * Linux doesn't know about, so we need to flush that PID out of
1780 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1781 * the right context.
1782 */
1783 li r0,0
1784 mtspr SPRN_LPID,r0
1785 isync
1786
1787 /* Then do a congruence class local flush */
1788 ld r6,VCPU_KVM(r9)
1789 lwz r0,KVM_TLB_SETS(r6)
1790 mtctr r0
1791 li r7,0x400 /* IS field = 0b01 */
1792 ptesync
1793 sldi r0,r3,32 /* RS has PID */
17941: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1795 addi r7,r7,0x1000
1796 bdnz 1b
1797 ptesync
1798
2bf1071a 17992:
a25bd72b 1800#endif /* CONFIG_PPC_RADIX_MMU */
e9cf1e08 1801
9e368f29 1802 /*
c17b98cf 1803 * POWER7/POWER8 guest -> host partition switch code.
9e368f29
PM
1804 * We don't have to lock against tlbies but we do
1805 * have to coordinate the hardware threads.
a8b48a4d 1806 * Here STACK_SLOT_TRAP(r1) contains the trap number.
9e368f29 1807 */
b6c295df 1808kvmhv_switch_to_host:
371fefd6 1809 /* Secondary threads wait for primary to do partition switch */
6af27c84 1810 ld r5,HSTATE_KVM_VCORE(r13)
e0b7ec05
PM
1811 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1812 lbz r3,HSTATE_PTID(r13)
371fefd6
PM
1813 cmpwi r3,0
1814 beq 15f
1815 HMT_LOW
181613: lbz r3,VCORE_IN_GUEST(r5)
1817 cmpwi r3,0
1818 bne 13b
1819 HMT_MEDIUM
1820 b 16f
1821
1822 /* Primary thread waits for all the secondaries to exit guest */
182315: lwz r3,VCORE_ENTRY_EXIT(r5)
b4deba5c 1824 rlwinm r0,r3,32-8,0xff
371fefd6
PM
1825 clrldi r3,r3,56
1826 cmpw r3,r0
1827 bne 15b
1828 isync
1829
b4deba5c
PM
1830 /* Did we actually switch to the guest at all? */
1831 lbz r6, VCORE_IN_GUEST(r5)
1832 cmpwi r6, 0
1833 beq 19f
1834
371fefd6 1835 /* Primary thread switches back to host partition */
de56a948 1836 lwz r7,KVM_HOST_LPID(r4)
7a84084c
PM
1837BEGIN_FTR_SECTION
1838 ld r6,KVM_HOST_SDR1(r4)
de56a948
PM
1839 li r8,LPID_RSVD /* switch to reserved LPID */
1840 mtspr SPRN_LPID,r8
1841 ptesync
7a84084c
PM
1842 mtspr SPRN_SDR1,r6 /* switch to host page table */
1843END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
de56a948
PM
1844 mtspr SPRN_LPID,r7
1845 isync
93b0f4dc 1846
b005255e 1847BEGIN_FTR_SECTION
88b02cf9 1848 /* DPDES and VTB are shared between threads */
b005255e 1849 mfspr r7, SPRN_DPDES
88b02cf9 1850 mfspr r8, SPRN_VTB
b005255e 1851 std r7, VCORE_DPDES(r5)
88b02cf9 1852 std r8, VCORE_VTB(r5)
b005255e
MN
1853 /* clear DPDES so we don't get guest doorbells in the host */
1854 li r8, 0
1855 mtspr SPRN_DPDES, r8
1856END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1857
93b0f4dc 1858 /* Subtract timebase offset from timebase */
57b8daa7 1859 ld r8, VCORE_TB_OFFSET_APPL(r5)
93b0f4dc
PM
1860 cmpdi r8,0
1861 beq 17f
57b8daa7
PM
1862 li r0, 0
1863 std r0, VCORE_TB_OFFSET_APPL(r5)
c5fb80d3 1864 mftb r6 /* current guest timebase */
93b0f4dc
PM
1865 subf r8,r8,r6
1866 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1867 mftb r7 /* check if lower 24 bits overflowed */
1868 clrldi r6,r6,40
1869 clrldi r7,r7,40
1870 cmpld r7,r6
1871 bge 17f
1872 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1873 mtspr SPRN_TBU40,r8
1874
df709a29
PM
187517:
1876 /*
1877 * If this is an HMI, we called kvmppc_realmode_hmi_handler
1878 * above, which may or may not have already called
1879 * kvmppc_subcore_exit_guest. Fortunately, all that
1880 * kvmppc_subcore_exit_guest does is clear a flag, so calling
1881 * it again here is benign even if kvmppc_realmode_hmi_handler
1882 * has already called it.
1883 */
1884 bl kvmppc_subcore_exit_guest
fd7bacbc
MS
1885 nop
188630: ld r5,HSTATE_KVM_VCORE(r13)
1887 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1888
388cc6e1 1889 /* Reset PCR */
fd7bacbc 1890 ld r0, VCORE_PCR(r5)
388cc6e1
PM
1891 cmpdi r0, 0
1892 beq 18f
1893 li r0, 0
1894 mtspr SPRN_PCR, r0
189518:
93b0f4dc 1896 /* Signal secondary CPUs to continue */
371fefd6 1897 stb r0,VCORE_IN_GUEST(r5)
b4deba5c 189819: lis r8,0x7fff /* MAX_INT@h */
de56a948
PM
1899 mtspr SPRN_HDEC,r8
1900
c0101509
PM
190116:
1902BEGIN_FTR_SECTION
1903 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
1904 ld r3, HSTATE_SPLIT_MODE(r13)
1905 cmpdi r3, 0
1906 beq 47f
1907 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
1908 cmpwi r8, 0
1909 beq 47f
c0101509
PM
1910 bl kvmhv_p9_restore_lpcr
1911 nop
c0101509
PM
1912 b 48f
191347:
1914END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1915 ld r8,KVM_HOST_LPCR(r4)
de56a948
PM
1916 mtspr SPRN_LPCR,r8
1917 isync
c0101509 191848:
b6c295df
PM
1919#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1920 /* Finish timing, if we have a vcpu */
1921 ld r4, HSTATE_KVM_VCPU(r13)
1922 cmpdi r4, 0
1923 li r3, 0
1924 beq 2f
1925 bl kvmhv_accumulate_time
19262:
1927#endif
44a3add8
PM
1928 /* Unset guest mode */
1929 li r0, KVM_GUEST_MODE_NONE
1930 stb r0, HSTATE_IN_GUEST(r13)
1931
a8b48a4d 1932 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
7ceaa6dc
PM
1933 ld r0, SFS+PPC_LR_STKOFF(r1)
1934 addi r1, r1, SFS
218309b7
PM
1935 mtlr r0
1936 blr
b4072df4 1937
df709a29
PM
1938kvmppc_guest_external:
1939 /* External interrupt, first check for host_ipi. If this is
1940 * set, we know the host wants us out so let's do it now
1941 */
1942 bl kvmppc_read_intr
1943
1944 /*
1945 * Restore the active volatile registers after returning from
1946 * a C function.
1947 */
1948 ld r9, HSTATE_KVM_VCPU(r13)
1949 li r12, BOOK3S_INTERRUPT_EXTERNAL
1950
1951 /*
1952 * kvmppc_read_intr return codes:
1953 *
1954 * Exit to host (r3 > 0)
1955 * 1 An interrupt is pending that needs to be handled by the host
1956 * Exit guest and return to host by branching to guest_exit_cont
1957 *
1958 * 2 Passthrough that needs completion in the host
1959 * Exit guest and return to host by branching to guest_exit_cont
1960 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1961 * to indicate to the host to complete handling the interrupt
1962 *
1963 * Before returning to guest, we check if any CPU is heading out
1964 * to the host and if so, we head out also. If no CPUs are heading
1965 * check return values <= 0.
1966 *
1967 * Return to guest (r3 <= 0)
1968 * 0 No external interrupt is pending
1969 * -1 A guest wakeup IPI (which has now been cleared)
1970 * In either case, we return to guest to deliver any pending
1971 * guest interrupts.
1972 *
1973 * -2 A PCI passthrough external interrupt was handled
1974 * (interrupt was delivered directly to guest)
1975 * Return to guest to deliver any pending guest interrupts.
1976 */
1977
1978 cmpdi r3, 1
1979 ble 1f
1980
1981 /* Return code = 2 */
1982 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1983 stw r12, VCPU_TRAP(r9)
1984 b guest_exit_cont
1985
19861: /* Return code <= 1 */
1987 cmpdi r3, 0
1988 bgt guest_exit_cont
1989
1990 /* Return code <= 0 */
1991maybe_reenter_guest:
1992 ld r5, HSTATE_KVM_VCORE(r13)
1993 lwz r0, VCORE_ENTRY_EXIT(r5)
1994 cmpwi r0, 0x100
1995 mr r4, r9
1996 blt deliver_guest_interrupt
1997 b guest_exit_cont
1998
4bb3c7a0
PM
1999#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2000/*
2001 * Softpatch interrupt for transactional memory emulation cases
2002 * on POWER9 DD2.2. This is early in the guest exit path - we
2003 * haven't saved registers or done a treclaim yet.
2004 */
2005kvmppc_tm_emul:
2006 /* Save instruction image in HEIR */
2007 mfspr r3, SPRN_HEIR
2008 stw r3, VCPU_HEIR(r9)
2009
2010 /*
2011 * The cases we want to handle here are those where the guest
2012 * is in real suspend mode and is trying to transition to
2013 * transactional mode.
2014 */
2015 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2016 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2017 bne guest_exit_cont
2018 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2019 cmpwi r3, 1 /* or if not in suspend state */
2020 bne guest_exit_cont
2021
2022 /* Call C code to do the emulation */
2023 mr r3, r9
2024 bl kvmhv_p9_tm_emulation_early
2025 nop
2026 ld r9, HSTATE_KVM_VCPU(r13)
2027 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2028 cmpwi r3, 0
2029 beq guest_exit_cont /* continue exiting if not handled */
2030 ld r10, VCPU_PC(r9)
2031 ld r11, VCPU_MSR(r9)
2032 b fast_interrupt_c_return /* go back to guest if handled */
2033#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2034
697d3899
PM
2035/*
2036 * Check whether an HDSI is an HPTE not found fault or something else.
2037 * If it is an HPTE not found fault that is due to the guest accessing
2038 * a page that they have mapped but which we have paged out, then
2039 * we continue on with the guest exit path. In all other cases,
2040 * reflect the HDSI to the guest as a DSI.
2041 */
2042kvmppc_hdsi:
f4c51f84
PM
2043 ld r3, VCPU_KVM(r9)
2044 lbz r0, KVM_RADIX(r3)
697d3899
PM
2045 mfspr r4, SPRN_HDAR
2046 mfspr r6, SPRN_HDSISR
e001fa78
MN
2047BEGIN_FTR_SECTION
2048 /* Look for DSISR canary. If we find it, retry instruction */
2049 cmpdi r6, 0x7fff
2050 beq 6f
2051END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2052 cmpwi r0, 0
f4c51f84 2053 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
4cf302bc
PM
2054 /* HPTE not found fault or protection fault? */
2055 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
697d3899 2056 beq 1f /* if not, send it to the guest */
4e5acdc2
PM
2057 andi. r0, r11, MSR_DR /* data relocation enabled? */
2058 beq 3f
ef8c640c
PM
2059BEGIN_FTR_SECTION
2060 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2061 b 4f
2062END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
697d3899 2063 clrrdi r0, r4, 28
c75df6f9 2064 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
cf29b215
PM
2065 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2066 bne 7f /* if no SLB entry found */
697d3899
PM
20674: std r4, VCPU_FAULT_DAR(r9)
2068 stw r6, VCPU_FAULT_DSISR(r9)
2069
2070 /* Search the hash table. */
2071 mr r3, r9 /* vcpu pointer */
342d3db7 2072 li r7, 1 /* data fault */
b1576fec 2073 bl kvmppc_hpte_hv_fault
697d3899
PM
2074 ld r9, HSTATE_KVM_VCPU(r13)
2075 ld r10, VCPU_PC(r9)
2076 ld r11, VCPU_MSR(r9)
2077 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2078 cmpdi r3, 0 /* retry the instruction */
2079 beq 6f
2080 cmpdi r3, -1 /* handle in kernel mode */
b4072df4 2081 beq guest_exit_cont
697d3899
PM
2082 cmpdi r3, -2 /* MMIO emulation; need instr word */
2083 beq 2f
2084
cf29b215 2085 /* Synthesize a DSI (or DSegI) for the guest */
697d3899
PM
2086 ld r4, VCPU_FAULT_DAR(r9)
2087 mr r6, r3
cf29b215 20881: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
697d3899 2089 mtspr SPRN_DSISR, r6
cf29b215 20907: mtspr SPRN_DAR, r4
697d3899
PM
2091 mtspr SPRN_SRR0, r10
2092 mtspr SPRN_SRR1, r11
cf29b215 2093 mr r10, r0
e4e38121 2094 bl kvmppc_msr_interrupt
b4072df4 2095fast_interrupt_c_return:
697d3899 20966: ld r7, VCPU_CTR(r9)
c63517c2 2097 ld r8, VCPU_XER(r9)
697d3899
PM
2098 mtctr r7
2099 mtxer r8
2100 mr r4, r9
2101 b fast_guest_return
2102
21033: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2104 ld r5, KVM_VRMA_SLB_V(r5)
2105 b 4b
2106
2107 /* If this is for emulated MMIO, load the instruction word */
21082: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2109
2110 /* Set guest mode to 'jump over instruction' so if lwz faults
2111 * we'll just continue at the next IP. */
2112 li r0, KVM_GUEST_MODE_SKIP
2113 stb r0, HSTATE_IN_GUEST(r13)
2114
2115 /* Do the access with MSR:DR enabled */
2116 mfmsr r3
2117 ori r4, r3, MSR_DR /* Enable paging for data */
2118 mtmsrd r4
2119 lwz r8, 0(r10)
2120 mtmsrd r3
2121
2122 /* Store the result */
2123 stw r8, VCPU_LAST_INST(r9)
2124
2125 /* Unset guest mode. */
44a3add8 2126 li r0, KVM_GUEST_MODE_HOST_HV
697d3899 2127 stb r0, HSTATE_IN_GUEST(r13)
b4072df4 2128 b guest_exit_cont
de56a948 2129
f4c51f84
PM
2130.Lradix_hdsi:
2131 std r4, VCPU_FAULT_DAR(r9)
2132 stw r6, VCPU_FAULT_DSISR(r9)
2133.Lradix_hisi:
2134 mfspr r5, SPRN_ASDR
2135 std r5, VCPU_FAULT_GPA(r9)
2136 b guest_exit_cont
2137
342d3db7
PM
2138/*
2139 * Similarly for an HISI, reflect it to the guest as an ISI unless
2140 * it is an HPTE not found fault for a page that we have paged out.
2141 */
2142kvmppc_hisi:
f4c51f84
PM
2143 ld r3, VCPU_KVM(r9)
2144 lbz r0, KVM_RADIX(r3)
2145 cmpwi r0, 0
2146 bne .Lradix_hisi /* for radix, just save ASDR */
342d3db7
PM
2147 andis. r0, r11, SRR1_ISI_NOPT@h
2148 beq 1f
4e5acdc2
PM
2149 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2150 beq 3f
ef8c640c
PM
2151BEGIN_FTR_SECTION
2152 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2153 b 4f
2154END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
342d3db7 2155 clrrdi r0, r10, 28
c75df6f9 2156 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
cf29b215
PM
2157 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2158 bne 7f /* if no SLB entry found */
342d3db7
PM
21594:
2160 /* Search the hash table. */
2161 mr r3, r9 /* vcpu pointer */
2162 mr r4, r10
2163 mr r6, r11
2164 li r7, 0 /* instruction fault */
b1576fec 2165 bl kvmppc_hpte_hv_fault
342d3db7
PM
2166 ld r9, HSTATE_KVM_VCPU(r13)
2167 ld r10, VCPU_PC(r9)
2168 ld r11, VCPU_MSR(r9)
2169 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2170 cmpdi r3, 0 /* retry the instruction */
b4072df4 2171 beq fast_interrupt_c_return
342d3db7 2172 cmpdi r3, -1 /* handle in kernel mode */
b4072df4 2173 beq guest_exit_cont
342d3db7 2174
cf29b215 2175 /* Synthesize an ISI (or ISegI) for the guest */
342d3db7 2176 mr r11, r3
cf29b215
PM
21771: li r0, BOOK3S_INTERRUPT_INST_STORAGE
21787: mtspr SPRN_SRR0, r10
342d3db7 2179 mtspr SPRN_SRR1, r11
cf29b215 2180 mr r10, r0
e4e38121 2181 bl kvmppc_msr_interrupt
b4072df4 2182 b fast_interrupt_c_return
342d3db7
PM
2183
21843: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2185 ld r5, KVM_VRMA_SLB_V(r6)
2186 b 4b
2187
a8606e20
PM
2188/*
2189 * Try to handle an hcall in real mode.
2190 * Returns to the guest if we handle it, or continues on up to
2191 * the kernel if we can't (i.e. if we don't have a handler for
2192 * it, or if the handler returns H_TOO_HARD).
1f09c3ed
PM
2193 *
2194 * r5 - r8 contain hcall args,
2195 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
a8606e20 2196 */
a8606e20 2197hcall_try_real_mode:
c75df6f9 2198 ld r3,VCPU_GPR(R3)(r9)
a8606e20 2199 andi. r0,r11,MSR_PR
27025a60
LPF
2200 /* sc 1 from userspace - reflect to guest syscall */
2201 bne sc_1_fast_return
a8606e20
PM
2202 clrrdi r3,r3,2
2203 cmpldi r3,hcall_real_table_end - hcall_real_table
b4072df4 2204 bge guest_exit_cont
699a0ea0
PM
2205 /* See if this hcall is enabled for in-kernel handling */
2206 ld r4, VCPU_KVM(r9)
2207 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2208 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2209 add r4, r4, r0
2210 ld r0, KVM_ENABLED_HCALLS(r4)
2211 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2212 srd r0, r0, r4
2213 andi. r0, r0, 1
2214 beq guest_exit_cont
2215 /* Get pointer to handler, if any, and call it */
a8606e20 2216 LOAD_REG_ADDR(r4, hcall_real_table)
4baa1d87 2217 lwax r3,r3,r4
a8606e20 2218 cmpwi r3,0
b4072df4 2219 beq guest_exit_cont
05a308c7
AB
2220 add r12,r3,r4
2221 mtctr r12
a8606e20 2222 mr r3,r9 /* get vcpu pointer */
c75df6f9 2223 ld r4,VCPU_GPR(R4)(r9)
a8606e20
PM
2224 bctrl
2225 cmpdi r3,H_TOO_HARD
2226 beq hcall_real_fallback
2227 ld r4,HSTATE_KVM_VCPU(r13)
c75df6f9 2228 std r3,VCPU_GPR(R3)(r4)
a8606e20
PM
2229 ld r10,VCPU_PC(r4)
2230 ld r11,VCPU_MSR(r4)
2231 b fast_guest_return
2232
27025a60
LPF
2233sc_1_fast_return:
2234 mtspr SPRN_SRR0,r10
2235 mtspr SPRN_SRR1,r11
2236 li r10, BOOK3S_INTERRUPT_SYSCALL
e4e38121 2237 bl kvmppc_msr_interrupt
27025a60
LPF
2238 mr r4,r9
2239 b fast_guest_return
2240
a8606e20
PM
2241 /* We've attempted a real mode hcall, but it's punted it back
2242 * to userspace. We need to restore some clobbered volatiles
2243 * before resuming the pass-it-to-qemu path */
2244hcall_real_fallback:
2245 li r12,BOOK3S_INTERRUPT_SYSCALL
2246 ld r9, HSTATE_KVM_VCPU(r13)
a8606e20 2247
b4072df4 2248 b guest_exit_cont
a8606e20
PM
2249
2250 .globl hcall_real_table
2251hcall_real_table:
2252 .long 0 /* 0 - unused */
c1fb0194
AB
2253 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2254 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2255 .long DOTSYM(kvmppc_h_read) - hcall_real_table
cdeee518
PM
2256 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2257 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
c1fb0194
AB
2258 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2259 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
31217db7 2260 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
a8606e20 2261 .long 0 /* 0x24 - H_SET_SPRG0 */
c1fb0194 2262 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
a8606e20
PM
2263 .long 0 /* 0x2c */
2264 .long 0 /* 0x30 */
2265 .long 0 /* 0x34 */
2266 .long 0 /* 0x38 */
2267 .long 0 /* 0x3c */
2268 .long 0 /* 0x40 */
2269 .long 0 /* 0x44 */
2270 .long 0 /* 0x48 */
2271 .long 0 /* 0x4c */
2272 .long 0 /* 0x50 */
2273 .long 0 /* 0x54 */
2274 .long 0 /* 0x58 */
2275 .long 0 /* 0x5c */
2276 .long 0 /* 0x60 */
e7d26f28 2277#ifdef CONFIG_KVM_XICS
c1fb0194
AB
2278 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2279 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2280 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
5af50993 2281 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
c1fb0194 2282 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
e7d26f28
BH
2283#else
2284 .long 0 /* 0x64 - H_EOI */
2285 .long 0 /* 0x68 - H_CPPR */
2286 .long 0 /* 0x6c - H_IPI */
2287 .long 0 /* 0x70 - H_IPOLL */
2288 .long 0 /* 0x74 - H_XIRR */
2289#endif
a8606e20
PM
2290 .long 0 /* 0x78 */
2291 .long 0 /* 0x7c */
2292 .long 0 /* 0x80 */
2293 .long 0 /* 0x84 */
2294 .long 0 /* 0x88 */
2295 .long 0 /* 0x8c */
2296 .long 0 /* 0x90 */
2297 .long 0 /* 0x94 */
2298 .long 0 /* 0x98 */
2299 .long 0 /* 0x9c */
2300 .long 0 /* 0xa0 */
2301 .long 0 /* 0xa4 */
2302 .long 0 /* 0xa8 */
2303 .long 0 /* 0xac */
2304 .long 0 /* 0xb0 */
2305 .long 0 /* 0xb4 */
2306 .long 0 /* 0xb8 */
2307 .long 0 /* 0xbc */
2308 .long 0 /* 0xc0 */
2309 .long 0 /* 0xc4 */
2310 .long 0 /* 0xc8 */
2311 .long 0 /* 0xcc */
2312 .long 0 /* 0xd0 */
2313 .long 0 /* 0xd4 */
2314 .long 0 /* 0xd8 */
2315 .long 0 /* 0xdc */
c1fb0194 2316 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
90fd09f8 2317 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
a8606e20
PM
2318 .long 0 /* 0xe8 */
2319 .long 0 /* 0xec */
2320 .long 0 /* 0xf0 */
2321 .long 0 /* 0xf4 */
2322 .long 0 /* 0xf8 */
2323 .long 0 /* 0xfc */
2324 .long 0 /* 0x100 */
2325 .long 0 /* 0x104 */
2326 .long 0 /* 0x108 */
2327 .long 0 /* 0x10c */
2328 .long 0 /* 0x110 */
2329 .long 0 /* 0x114 */
2330 .long 0 /* 0x118 */
2331 .long 0 /* 0x11c */
2332 .long 0 /* 0x120 */
c1fb0194 2333 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
8563bf52
PM
2334 .long 0 /* 0x128 */
2335 .long 0 /* 0x12c */
2336 .long 0 /* 0x130 */
c1fb0194 2337 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
31217db7 2338 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
d3695aa4 2339 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
e928e9cb
ME
2340 .long 0 /* 0x140 */
2341 .long 0 /* 0x144 */
2342 .long 0 /* 0x148 */
2343 .long 0 /* 0x14c */
2344 .long 0 /* 0x150 */
2345 .long 0 /* 0x154 */
2346 .long 0 /* 0x158 */
2347 .long 0 /* 0x15c */
2348 .long 0 /* 0x160 */
2349 .long 0 /* 0x164 */
2350 .long 0 /* 0x168 */
2351 .long 0 /* 0x16c */
2352 .long 0 /* 0x170 */
2353 .long 0 /* 0x174 */
2354 .long 0 /* 0x178 */
2355 .long 0 /* 0x17c */
2356 .long 0 /* 0x180 */
2357 .long 0 /* 0x184 */
2358 .long 0 /* 0x188 */
2359 .long 0 /* 0x18c */
2360 .long 0 /* 0x190 */
2361 .long 0 /* 0x194 */
2362 .long 0 /* 0x198 */
2363 .long 0 /* 0x19c */
2364 .long 0 /* 0x1a0 */
2365 .long 0 /* 0x1a4 */
2366 .long 0 /* 0x1a8 */
2367 .long 0 /* 0x1ac */
2368 .long 0 /* 0x1b0 */
2369 .long 0 /* 0x1b4 */
2370 .long 0 /* 0x1b8 */
2371 .long 0 /* 0x1bc */
2372 .long 0 /* 0x1c0 */
2373 .long 0 /* 0x1c4 */
2374 .long 0 /* 0x1c8 */
2375 .long 0 /* 0x1cc */
2376 .long 0 /* 0x1d0 */
2377 .long 0 /* 0x1d4 */
2378 .long 0 /* 0x1d8 */
2379 .long 0 /* 0x1dc */
2380 .long 0 /* 0x1e0 */
2381 .long 0 /* 0x1e4 */
2382 .long 0 /* 0x1e8 */
2383 .long 0 /* 0x1ec */
2384 .long 0 /* 0x1f0 */
2385 .long 0 /* 0x1f4 */
2386 .long 0 /* 0x1f8 */
2387 .long 0 /* 0x1fc */
2388 .long 0 /* 0x200 */
2389 .long 0 /* 0x204 */
2390 .long 0 /* 0x208 */
2391 .long 0 /* 0x20c */
2392 .long 0 /* 0x210 */
2393 .long 0 /* 0x214 */
2394 .long 0 /* 0x218 */
2395 .long 0 /* 0x21c */
2396 .long 0 /* 0x220 */
2397 .long 0 /* 0x224 */
2398 .long 0 /* 0x228 */
2399 .long 0 /* 0x22c */
2400 .long 0 /* 0x230 */
2401 .long 0 /* 0x234 */
2402 .long 0 /* 0x238 */
2403 .long 0 /* 0x23c */
2404 .long 0 /* 0x240 */
2405 .long 0 /* 0x244 */
2406 .long 0 /* 0x248 */
2407 .long 0 /* 0x24c */
2408 .long 0 /* 0x250 */
2409 .long 0 /* 0x254 */
2410 .long 0 /* 0x258 */
2411 .long 0 /* 0x25c */
2412 .long 0 /* 0x260 */
2413 .long 0 /* 0x264 */
2414 .long 0 /* 0x268 */
2415 .long 0 /* 0x26c */
2416 .long 0 /* 0x270 */
2417 .long 0 /* 0x274 */
2418 .long 0 /* 0x278 */
2419 .long 0 /* 0x27c */
2420 .long 0 /* 0x280 */
2421 .long 0 /* 0x284 */
2422 .long 0 /* 0x288 */
2423 .long 0 /* 0x28c */
2424 .long 0 /* 0x290 */
2425 .long 0 /* 0x294 */
2426 .long 0 /* 0x298 */
2427 .long 0 /* 0x29c */
2428 .long 0 /* 0x2a0 */
2429 .long 0 /* 0x2a4 */
2430 .long 0 /* 0x2a8 */
2431 .long 0 /* 0x2ac */
2432 .long 0 /* 0x2b0 */
2433 .long 0 /* 0x2b4 */
2434 .long 0 /* 0x2b8 */
2435 .long 0 /* 0x2bc */
2436 .long 0 /* 0x2c0 */
2437 .long 0 /* 0x2c4 */
2438 .long 0 /* 0x2c8 */
2439 .long 0 /* 0x2cc */
2440 .long 0 /* 0x2d0 */
2441 .long 0 /* 0x2d4 */
2442 .long 0 /* 0x2d8 */
2443 .long 0 /* 0x2dc */
2444 .long 0 /* 0x2e0 */
2445 .long 0 /* 0x2e4 */
2446 .long 0 /* 0x2e8 */
2447 .long 0 /* 0x2ec */
2448 .long 0 /* 0x2f0 */
2449 .long 0 /* 0x2f4 */
2450 .long 0 /* 0x2f8 */
5af50993
BH
2451#ifdef CONFIG_KVM_XICS
2452 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2453#else
2454 .long 0 /* 0x2fc - H_XIRR_X*/
2455#endif
e928e9cb 2456 .long DOTSYM(kvmppc_h_random) - hcall_real_table
ae2113a4 2457 .globl hcall_real_table_end
a8606e20
PM
2458hcall_real_table_end:
2459
8563bf52
PM
2460_GLOBAL(kvmppc_h_set_xdabr)
2461 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2462 beq 6f
2463 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2464 andc. r0, r5, r0
2465 beq 3f
24666: li r3, H_PARAMETER
2467 blr
2468
a8606e20 2469_GLOBAL(kvmppc_h_set_dabr)
8563bf52
PM
2470 li r5, DABRX_USER | DABRX_KERNEL
24713:
eee7ff9d
MN
2472BEGIN_FTR_SECTION
2473 b 2f
2474END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
a8606e20 2475 std r4,VCPU_DABR(r3)
8563bf52
PM
2476 stw r5, VCPU_DABRX(r3)
2477 mtspr SPRN_DABRX, r5
8943633c
PM
2478 /* Work around P7 bug where DABR can get corrupted on mtspr */
24791: mtspr SPRN_DABR,r4
2480 mfspr r5, SPRN_DABR
2481 cmpd r4, r5
2482 bne 1b
2483 isync
a8606e20
PM
2484 li r3,0
2485 blr
2486
e8ebedbf
MN
24872:
2488BEGIN_FTR_SECTION
2489 /* POWER9 with disabled DAWR */
ca9a16c3 2490 li r3, H_HARDWARE
e8ebedbf
MN
2491 blr
2492END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
8563bf52 2493 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
e8ebedbf 2494 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
760a7364 2495 rlwimi r5, r4, 2, DAWRX_WT
8563bf52
PM
2496 clrrdi r4, r4, 3
2497 std r4, VCPU_DAWR(r3)
2498 std r5, VCPU_DAWRX(r3)
2499 mtspr SPRN_DAWR, r4
2500 mtspr SPRN_DAWRX, r5
2501 li r3, 0
a8606e20
PM
2502 blr
2503
1f09c3ed 2504_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
19ccb76a
PM
2505 ori r11,r11,MSR_EE
2506 std r11,VCPU_MSR(r3)
2507 li r0,1
2508 stb r0,VCPU_CEDED(r3)
2509 sync /* order setting ceded vs. testing prodded */
2510 lbz r5,VCPU_PRODDED(r3)
2511 cmpwi r5,0
04f995a5 2512 bne kvm_cede_prodded
6af27c84
PM
2513 li r12,0 /* set trap to 0 to say hcall is handled */
2514 stw r12,VCPU_TRAP(r3)
19ccb76a 2515 li r0,H_SUCCESS
c75df6f9 2516 std r0,VCPU_GPR(R3)(r3)
19ccb76a
PM
2517
2518 /*
2519 * Set our bit in the bitmask of napping threads unless all the
2520 * other threads are already napping, in which case we send this
2521 * up to the host.
2522 */
2523 ld r5,HSTATE_KVM_VCORE(r13)
e0b7ec05 2524 lbz r6,HSTATE_PTID(r13)
19ccb76a
PM
2525 lwz r8,VCORE_ENTRY_EXIT(r5)
2526 clrldi r8,r8,56
2527 li r0,1
2528 sld r0,r0,r6
2529 addi r6,r5,VCORE_NAPPING_THREADS
253031: lwarx r4,0,r6
2531 or r4,r4,r0
7d6c40da
PM
2532 cmpw r4,r8
2533 beq kvm_cede_exit
19ccb76a
PM
2534 stwcx. r4,0,r6
2535 bne 31b
7d6c40da 2536 /* order napping_threads update vs testing entry_exit_map */
f019b7ad 2537 isync
e0b7ec05 2538 li r0,NAPPING_CEDE
19ccb76a 2539 stb r0,HSTATE_NAPPING(r13)
19ccb76a
PM
2540 lwz r7,VCORE_ENTRY_EXIT(r5)
2541 cmpwi r7,0x100
2542 bge 33f /* another thread already exiting */
2543
2544/*
2545 * Although not specifically required by the architecture, POWER7
2546 * preserves the following registers in nap mode, even if an SMT mode
2547 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2548 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2549 */
2550 /* Save non-volatile GPRs */
c75df6f9
MN
2551 std r14, VCPU_GPR(R14)(r3)
2552 std r15, VCPU_GPR(R15)(r3)
2553 std r16, VCPU_GPR(R16)(r3)
2554 std r17, VCPU_GPR(R17)(r3)
2555 std r18, VCPU_GPR(R18)(r3)
2556 std r19, VCPU_GPR(R19)(r3)
2557 std r20, VCPU_GPR(R20)(r3)
2558 std r21, VCPU_GPR(R21)(r3)
2559 std r22, VCPU_GPR(R22)(r3)
2560 std r23, VCPU_GPR(R23)(r3)
2561 std r24, VCPU_GPR(R24)(r3)
2562 std r25, VCPU_GPR(R25)(r3)
2563 std r26, VCPU_GPR(R26)(r3)
2564 std r27, VCPU_GPR(R27)(r3)
2565 std r28, VCPU_GPR(R28)(r3)
2566 std r29, VCPU_GPR(R29)(r3)
2567 std r30, VCPU_GPR(R30)(r3)
2568 std r31, VCPU_GPR(R31)(r3)
19ccb76a
PM
2569
2570 /* save FP state */
595e4f7e 2571 bl kvmppc_save_fp
19ccb76a 2572
93d17397 2573#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4bb3c7a0
PM
2574/*
2575 * Branch around the call if both CPU_FTR_TM and
2576 * CPU_FTR_P9_TM_HV_ASSIST are off.
2577 */
93d17397 2578BEGIN_FTR_SECTION
4bb3c7a0
PM
2579 b 91f
2580END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
67f8a8c1 2581 /*
7854f754 2582 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
67f8a8c1 2583 */
6f597c6b
SG
2584 ld r3, HSTATE_KVM_VCPU(r13)
2585 ld r4, VCPU_MSR(r3)
7854f754 2586 li r5, 0 /* don't preserve non-vol regs */
7b0e827c 2587 bl kvmppc_save_tm_hv
7854f754 2588 nop
4bb3c7a0 258991:
93d17397
PM
2590#endif
2591
fd6d53b1
PM
2592 /*
2593 * Set DEC to the smaller of DEC and HDEC, so that we wake
2594 * no later than the end of our timeslice (HDEC interrupts
2595 * don't wake us from nap).
2596 */
2597 mfspr r3, SPRN_DEC
2598 mfspr r4, SPRN_HDEC
2599 mftb r5
1bc3fe81
PM
2600BEGIN_FTR_SECTION
2601 /* On P9 check whether the guest has large decrementer mode enabled */
2602 ld r6, HSTATE_KVM_VCORE(r13)
2603 ld r6, VCORE_LPCR(r6)
2604 andis. r6, r6, LPCR_LD@h
2605 bne 68f
2606END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2f272463 2607 extsw r3, r3
1bc3fe81 260868: EXTEND_HDEC(r4)
2f272463 2609 cmpd r3, r4
fd6d53b1
PM
2610 ble 67f
2611 mtspr SPRN_DEC, r4
261267:
2613 /* save expiry time of guest decrementer */
fd6d53b1
PM
2614 add r3, r3, r5
2615 ld r4, HSTATE_KVM_VCPU(r13)
2616 ld r5, HSTATE_KVM_VCORE(r13)
57b8daa7 2617 ld r6, VCORE_TB_OFFSET_APPL(r5)
fd6d53b1
PM
2618 subf r3, r6, r3 /* convert to host TB value */
2619 std r3, VCPU_DEC_EXPIRES(r4)
2620
b6c295df
PM
2621#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2622 ld r4, HSTATE_KVM_VCPU(r13)
2623 addi r3, r4, VCPU_TB_CEDE
2624 bl kvmhv_accumulate_time
2625#endif
2626
ccc07772
PM
2627 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2628
19ccb76a 2629 /*
aa31e843 2630 * Take a nap until a decrementer or external or doobell interrupt
ccc07772 2631 * occurs, with PECE1 and PECE0 set in LPCR.
66feed61 2632 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
ccc07772 2633 * Also clear the runlatch bit before napping.
19ccb76a 2634 */
56548fc0 2635kvm_do_nap:
1f09c3ed
PM
2636 mfspr r0, SPRN_CTRLF
2637 clrrdi r0, r0, 1
2638 mtspr SPRN_CTRLT, r0
582b910e 2639
f0888f70
PM
2640 li r0,1
2641 stb r0,HSTATE_HWTHREAD_REQ(r13)
19ccb76a
PM
2642 mfspr r5,SPRN_LPCR
2643 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
aa31e843 2644BEGIN_FTR_SECTION
66feed61 2645 ori r5, r5, LPCR_PECEDH
ccc07772 2646 rlwimi r5, r3, 0, LPCR_PECEDP
aa31e843 2647END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
bf53c88e
PM
2648
2649kvm_nap_sequence: /* desired LPCR value in r5 */
2650BEGIN_FTR_SECTION
2651 /*
2652 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2653 * enable state loss = 1 (allow SMT mode switch)
2654 * requested level = 0 (just stop dispatching)
2655 */
2656 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2657 mtspr SPRN_PSSCR, r3
2658 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2659 li r4, LPCR_PECE_HVEE@higher
2660 sldi r4, r4, 32
2661 or r5, r5, r4
2662END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
19ccb76a
PM
2663 mtspr SPRN_LPCR,r5
2664 isync
2665 li r0, 0
2666 std r0, HSTATE_SCRATCH0(r13)
2667 ptesync
2668 ld r0, HSTATE_SCRATCH0(r13)
26691: cmpd r0, r0
2670 bne 1b
bf53c88e 2671BEGIN_FTR_SECTION
19ccb76a 2672 nap
bf53c88e
PM
2673FTR_SECTION_ELSE
2674 PPC_STOP
2675ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
19ccb76a
PM
2676 b .
2677
e3bbbbfa
PM
267833: mr r4, r3
2679 li r3, 0
2680 li r12, 0
2681 b 34f
2682
19ccb76a 2683kvm_end_cede:
4619ac88
PM
2684 /* get vcpu pointer */
2685 ld r4, HSTATE_KVM_VCPU(r13)
2686
19ccb76a
PM
2687 /* Woken by external or decrementer interrupt */
2688 ld r1, HSTATE_HOST_R1(r13)
19ccb76a 2689
b6c295df
PM
2690#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2691 addi r3, r4, VCPU_TB_RMINTR
2692 bl kvmhv_accumulate_time
2693#endif
2694
93d17397 2695#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
4bb3c7a0
PM
2696/*
2697 * Branch around the call if both CPU_FTR_TM and
2698 * CPU_FTR_P9_TM_HV_ASSIST are off.
2699 */
93d17397 2700BEGIN_FTR_SECTION
4bb3c7a0
PM
2701 b 91f
2702END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
67f8a8c1 2703 /*
7854f754 2704 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
67f8a8c1 2705 */
6f597c6b
SG
2706 mr r3, r4
2707 ld r4, VCPU_MSR(r3)
7854f754 2708 li r5, 0 /* don't preserve non-vol regs */
7b0e827c 2709 bl kvmppc_restore_tm_hv
7854f754 2710 nop
6f597c6b 2711 ld r4, HSTATE_KVM_VCPU(r13)
4bb3c7a0 271291:
93d17397
PM
2713#endif
2714
19ccb76a
PM
2715 /* load up FP state */
2716 bl kvmppc_load_fp
2717
fd6d53b1
PM
2718 /* Restore guest decrementer */
2719 ld r3, VCPU_DEC_EXPIRES(r4)
2720 ld r5, HSTATE_KVM_VCORE(r13)
57b8daa7 2721 ld r6, VCORE_TB_OFFSET_APPL(r5)
fd6d53b1
PM
2722 add r3, r3, r6 /* convert host TB to guest TB value */
2723 mftb r7
2724 subf r3, r7, r3
2725 mtspr SPRN_DEC, r3
2726
19ccb76a 2727 /* Load NV GPRS */
c75df6f9
MN
2728 ld r14, VCPU_GPR(R14)(r4)
2729 ld r15, VCPU_GPR(R15)(r4)
2730 ld r16, VCPU_GPR(R16)(r4)
2731 ld r17, VCPU_GPR(R17)(r4)
2732 ld r18, VCPU_GPR(R18)(r4)
2733 ld r19, VCPU_GPR(R19)(r4)
2734 ld r20, VCPU_GPR(R20)(r4)
2735 ld r21, VCPU_GPR(R21)(r4)
2736 ld r22, VCPU_GPR(R22)(r4)
2737 ld r23, VCPU_GPR(R23)(r4)
2738 ld r24, VCPU_GPR(R24)(r4)
2739 ld r25, VCPU_GPR(R25)(r4)
2740 ld r26, VCPU_GPR(R26)(r4)
2741 ld r27, VCPU_GPR(R27)(r4)
2742 ld r28, VCPU_GPR(R28)(r4)
2743 ld r29, VCPU_GPR(R29)(r4)
2744 ld r30, VCPU_GPR(R30)(r4)
2745 ld r31, VCPU_GPR(R31)(r4)
37f55d30 2746
e3bbbbfa
PM
2747 /* Check the wake reason in SRR1 to see why we got here */
2748 bl kvmppc_check_wake_reason
19ccb76a 2749
37f55d30
SW
2750 /*
2751 * Restore volatile registers since we could have called a
2752 * C routine in kvmppc_check_wake_reason
2753 * r4 = VCPU
2754 * r3 tells us whether we need to return to host or not
2755 * WARNING: it gets checked further down:
2756 * should not modify r3 until this check is done.
2757 */
2758 ld r4, HSTATE_KVM_VCPU(r13)
2759
19ccb76a 2760 /* clear our bit in vcore->napping_threads */
e3bbbbfa
PM
276134: ld r5,HSTATE_KVM_VCORE(r13)
2762 lbz r7,HSTATE_PTID(r13)
19ccb76a 2763 li r0,1
e3bbbbfa 2764 sld r0,r0,r7
19ccb76a
PM
2765 addi r6,r5,VCORE_NAPPING_THREADS
276632: lwarx r7,0,r6
2767 andc r7,r7,r0
2768 stwcx. r7,0,r6
2769 bne 32b
2770 li r0,0
2771 stb r0,HSTATE_NAPPING(r13)
2772
37f55d30 2773 /* See if the wake reason saved in r3 means we need to exit */
e3bbbbfa 2774 stw r12, VCPU_TRAP(r4)
4619ac88 2775 mr r9, r4
e3bbbbfa
PM
2776 cmpdi r3, 0
2777 bgt guest_exit_cont
df709a29 2778 b maybe_reenter_guest
19ccb76a
PM
2779
2780 /* cede when already previously prodded case */
04f995a5
PM
2781kvm_cede_prodded:
2782 li r0,0
19ccb76a
PM
2783 stb r0,VCPU_PRODDED(r3)
2784 sync /* order testing prodded vs. clearing ceded */
2785 stb r0,VCPU_CEDED(r3)
2786 li r3,H_SUCCESS
2787 blr
2788
2789 /* we've ceded but we want to give control to the host */
04f995a5 2790kvm_cede_exit:
6af27c84 2791 ld r9, HSTATE_KVM_VCPU(r13)
9b9b13a6
BH
2792#ifdef CONFIG_KVM_XICS
2793 /* Abort if we still have a pending escalation */
2794 lbz r5, VCPU_XIVE_ESC_ON(r9)
2795 cmpwi r5, 0
2796 beq 1f
2797 li r0, 0
2798 stb r0, VCPU_CEDED(r9)
27991: /* Enable XIVE escalation */
2800 li r5, XIVE_ESB_SET_PQ_00
2801 mfmsr r0
2802 andi. r0, r0, MSR_DR /* in real mode? */
2803 beq 1f
2804 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2805 cmpdi r10, 0
2806 beq 3f
2807 ldx r0, r10, r5
2808 b 2f
28091: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2810 cmpdi r10, 0
2811 beq 3f
2812 ldcix r0, r10, r5
28132: sync
2814 li r0, 1
2815 stb r0, VCPU_XIVE_ESC_ON(r9)
2816#endif /* CONFIG_KVM_XICS */
28173: b guest_exit_cont
19ccb76a 2818
b4072df4
PM
2819 /* Try to handle a machine check in real mode */
2820machine_check_realmode:
2821 mr r3, r9 /* get vcpu pointer */
b1576fec 2822 bl kvmppc_realmode_machine_check
b4072df4 2823 nop
b4072df4
PM
2824 ld r9, HSTATE_KVM_VCPU(r13)
2825 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
74845bc2 2826 /*
e20bbd3d
AP
2827 * For the guest that is FWNMI capable, deliver all the MCE errors
2828 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2829 * reason. This new approach injects machine check errors in guest
2830 * address space to guest with additional information in the form
2831 * of RTAS event, thus enabling guest kernel to suitably handle
2832 * such errors.
966d713e 2833 *
e20bbd3d
AP
2834 * For the guest that is not FWNMI capable (old QEMU) fallback
2835 * to old behaviour for backward compatibility:
2836 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2837 * through machine check interrupt (set HSRR0 to 0x200).
2838 * For handled errors (no-fatal), just go back to guest execution
2839 * with current HSRR0.
966d713e
MS
2840 * if we receive machine check with MSR(RI=0) then deliver it to
2841 * guest as machine check causing guest to crash.
74845bc2 2842 */
74845bc2 2843 ld r11, VCPU_MSR(r9)
1c9e3d51 2844 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
df709a29 2845 bne guest_exit_cont /* if so, exit to host */
e20bbd3d
AP
2846 /* Check if guest is capable of handling NMI exit */
2847 ld r10, VCPU_KVM(r9)
2848 lbz r10, KVM_FWNMI(r10)
2849 cmpdi r10, 1 /* FWNMI capable? */
df709a29 2850 beq guest_exit_cont /* if so, exit with KVM_EXIT_NMI. */
e20bbd3d
AP
2851
2852 /* if not, fall through for backward compatibility. */
966d713e
MS
2853 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2854 beq 1f /* Deliver a machine check to guest */
2855 ld r10, VCPU_PC(r9)
2856 cmpdi r3, 0 /* Did we handle MCE ? */
74845bc2 2857 bne 2f /* Continue guest execution. */
b4072df4 2858 /* If not, deliver a machine check. SRR0/1 are already set */
966d713e 28591: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
e4e38121 2860 bl kvmppc_msr_interrupt
74845bc2 28612: b fast_interrupt_c_return
b4072df4 2862
df709a29
PM
2863/*
2864 * Call C code to handle a HMI in real mode.
2865 * Only the primary thread does the call, secondary threads are handled
2866 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
2867 * r9 points to the vcpu on entry
2868 */
2869hmi_realmode:
2870 lbz r0, HSTATE_PTID(r13)
2871 cmpwi r0, 0
2872 bne guest_exit_cont
2873 bl kvmppc_realmode_hmi_handler
2874 ld r9, HSTATE_KVM_VCPU(r13)
2875 li r12, BOOK3S_INTERRUPT_HMI
2876 b guest_exit_cont
2877
e3bbbbfa
PM
2878/*
2879 * Check the reason we woke from nap, and take appropriate action.
1f09c3ed 2880 * Returns (in r3):
e3bbbbfa
PM
2881 * 0 if nothing needs to be done
2882 * 1 if something happened that needs to be handled by the host
66feed61 2883 * -1 if there was a guest wakeup (IPI or msgsnd)
e3c13e56
SW
2884 * -2 if we handled a PCI passthrough interrupt (returned by
2885 * kvmppc_read_intr only)
e3bbbbfa
PM
2886 *
2887 * Also sets r12 to the interrupt vector for any interrupt that needs
2888 * to be handled now by the host (0x500 for external interrupt), or zero.
37f55d30
SW
2889 * Modifies all volatile registers (since it may call a C function).
2890 * This routine calls kvmppc_read_intr, a C function, if an external
2891 * interrupt is pending.
e3bbbbfa
PM
2892 */
2893kvmppc_check_wake_reason:
2894 mfspr r6, SPRN_SRR1
aa31e843
PM
2895BEGIN_FTR_SECTION
2896 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2897FTR_SECTION_ELSE
2898 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2899ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2900 cmpwi r6, 8 /* was it an external interrupt? */
37f55d30 2901 beq 7f /* if so, see what it was */
e3bbbbfa
PM
2902 li r3, 0
2903 li r12, 0
2904 cmpwi r6, 6 /* was it the decrementer? */
2905 beq 0f
aa31e843
PM
2906BEGIN_FTR_SECTION
2907 cmpwi r6, 5 /* privileged doorbell? */
2908 beq 0f
5d00f66b
PM
2909 cmpwi r6, 3 /* hypervisor doorbell? */
2910 beq 3f
aa31e843 2911END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
fd7bacbc
MS
2912 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2913 beq 4f
e3bbbbfa
PM
2914 li r3, 1 /* anything else, return 1 */
29150: blr
2916
5d00f66b
PM
2917 /* hypervisor doorbell */
29183: li r12, BOOK3S_INTERRUPT_H_DOORBELL
70aa3961
GS
2919
2920 /*
2921 * Clear the doorbell as we will invoke the handler
2922 * explicitly in the guest exit path.
2923 */
2924 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2925 PPC_MSGCLR(6)
66feed61 2926 /* see if it's a host IPI */
5d00f66b 2927 li r3, 1
2cde3716
NP
2928BEGIN_FTR_SECTION
2929 PPC_MSGSYNC
2930 lwsync
2931END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
66feed61
PM
2932 lbz r0, HSTATE_HOST_IPI(r13)
2933 cmpwi r0, 0
2934 bnelr
70aa3961 2935 /* if not, return -1 */
66feed61 2936 li r3, -1
5d00f66b
PM
2937 blr
2938
fd7bacbc
MS
2939 /* Woken up due to Hypervisor maintenance interrupt */
29404: li r12, BOOK3S_INTERRUPT_HMI
2941 li r3, 1
2942 blr
2943
37f55d30
SW
2944 /* external interrupt - create a stack frame so we can call C */
29457: mflr r0
2946 std r0, PPC_LR_STKOFF(r1)
2947 stdu r1, -PPC_MIN_STKFRM(r1)
2948 bl kvmppc_read_intr
2949 nop
2950 li r12, BOOK3S_INTERRUPT_EXTERNAL
f7af5209
SW
2951 cmpdi r3, 1
2952 ble 1f
2953
2954 /*
2955 * Return code of 2 means PCI passthrough interrupt, but
2956 * we need to return back to host to complete handling the
2957 * interrupt. Trap reason is expected in r12 by guest
2958 * exit code.
2959 */
2960 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
29611:
37f55d30
SW
2962 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2963 addi r1, r1, PPC_MIN_STKFRM
2964 mtlr r0
2965 blr
371fefd6 2966
de56a948
PM
2967/*
2968 * Save away FP, VMX and VSX registers.
2969 * r3 = vcpu pointer
595e4f7e
PM
2970 * N.B. r30 and r31 are volatile across this function,
2971 * thus it is not callable from C.
a8606e20 2972 */
595e4f7e
PM
2973kvmppc_save_fp:
2974 mflr r30
2975 mr r31,r3
8943633c
PM
2976 mfmsr r5
2977 ori r8,r5,MSR_FP
de56a948
PM
2978#ifdef CONFIG_ALTIVEC
2979BEGIN_FTR_SECTION
2980 oris r8,r8,MSR_VEC@h
2981END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2982#endif
2983#ifdef CONFIG_VSX
2984BEGIN_FTR_SECTION
2985 oris r8,r8,MSR_VSX@h
2986END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2987#endif
2988 mtmsrd r8
595e4f7e 2989 addi r3,r3,VCPU_FPRS
9bf163f8 2990 bl store_fp_state
de56a948
PM
2991#ifdef CONFIG_ALTIVEC
2992BEGIN_FTR_SECTION
595e4f7e 2993 addi r3,r31,VCPU_VRS
9bf163f8 2994 bl store_vr_state
de56a948
PM
2995END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2996#endif
2997 mfspr r6,SPRN_VRSAVE
e724f080 2998 stw r6,VCPU_VRSAVE(r31)
595e4f7e 2999 mtlr r30
de56a948
PM
3000 blr
3001
3002/*
3003 * Load up FP, VMX and VSX registers
3004 * r4 = vcpu pointer
595e4f7e
PM
3005 * N.B. r30 and r31 are volatile across this function,
3006 * thus it is not callable from C.
de56a948 3007 */
de56a948 3008kvmppc_load_fp:
595e4f7e
PM
3009 mflr r30
3010 mr r31,r4
de56a948
PM
3011 mfmsr r9
3012 ori r8,r9,MSR_FP
3013#ifdef CONFIG_ALTIVEC
3014BEGIN_FTR_SECTION
3015 oris r8,r8,MSR_VEC@h
3016END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3017#endif
3018#ifdef CONFIG_VSX
3019BEGIN_FTR_SECTION
3020 oris r8,r8,MSR_VSX@h
3021END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3022#endif
3023 mtmsrd r8
595e4f7e 3024 addi r3,r4,VCPU_FPRS
9bf163f8 3025 bl load_fp_state
de56a948
PM
3026#ifdef CONFIG_ALTIVEC
3027BEGIN_FTR_SECTION
595e4f7e 3028 addi r3,r31,VCPU_VRS
9bf163f8 3029 bl load_vr_state
de56a948
PM
3030END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3031#endif
e724f080 3032 lwz r7,VCPU_VRSAVE(r31)
de56a948 3033 mtspr SPRN_VRSAVE,r7
595e4f7e
PM
3034 mtlr r30
3035 mr r4,r31
de56a948 3036 blr
44a3add8 3037
f024ee09
PM
3038#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3039/*
3040 * Save transactional state and TM-related registers.
6f597c6b
SG
3041 * Called with r3 pointing to the vcpu struct and r4 containing
3042 * the guest MSR value.
7854f754
PM
3043 * r5 is non-zero iff non-volatile register state needs to be maintained.
3044 * If r5 == 0, this can modify all checkpointed registers, but
6f597c6b 3045 * restores r1 and r2 before exit.
f024ee09 3046 */
7854f754
PM
3047_GLOBAL_TOC(kvmppc_save_tm_hv)
3048EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
7b0e827c
PM
3049 /* See if we need to handle fake suspend mode */
3050BEGIN_FTR_SECTION
caa3be92 3051 b __kvmppc_save_tm
7b0e827c
PM
3052END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3053
3054 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3055 cmpwi r0, 0
caa3be92 3056 beq __kvmppc_save_tm
7b0e827c
PM
3057
3058 /* The following code handles the fake_suspend = 1 case */
f024ee09
PM
3059 mflr r0
3060 std r0, PPC_LR_STKOFF(r1)
87a11bb6 3061 stdu r1, -PPC_MIN_STKFRM(r1)
f024ee09
PM
3062
3063 /* Turn on TM. */
3064 mfmsr r8
3065 li r0, 1
3066 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3067 mtmsrd r8
3068
87a11bb6
SJS
3069 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3070 beq 4f
7b0e827c 3071BEGIN_FTR_SECTION
87a11bb6 3072 bl pnv_power9_force_smt4_catch
7b0e827c 3073END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
87a11bb6 3074 nop
4bb3c7a0 3075
7b0e827c
PM
3076 /* We have to treclaim here because that's the only way to do S->N */
3077 li r3, TM_CAUSE_KVM_RESCHED
f024ee09
PM
3078 TRECLAIM(R3)
3079
4bb3c7a0
PM
3080 /*
3081 * We were in fake suspend, so we are not going to save the
3082 * register state as the guest checkpointed state (since
3083 * we already have it), therefore we can now use any volatile GPR.
7854f754
PM
3084 * In fact treclaim in fake suspend state doesn't modify
3085 * any registers.
4bb3c7a0 3086 */
7b0e827c 3087
7854f754 3088BEGIN_FTR_SECTION
87a11bb6 3089 bl pnv_power9_force_smt4_release
7854f754 3090END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
87a11bb6
SJS
3091 nop
3092
30934:
4bb3c7a0
PM
3094 mfspr r3, SPRN_PSSCR
3095 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3096 li r0, PSSCR_FAKE_SUSPEND
3097 andc r3, r3, r0
3098 mtspr SPRN_PSSCR, r3
4bb3c7a0 3099
681c617b 3100 /* Don't save TEXASR, use value from last exit in real suspend state */
f024ee09 3101 ld r9, HSTATE_KVM_VCPU(r13)
f024ee09
PM
3102 mfspr r5, SPRN_TFHAR
3103 mfspr r6, SPRN_TFIAR
f024ee09
PM
3104 std r5, VCPU_TFHAR(r9)
3105 std r6, VCPU_TFIAR(r9)
f024ee09 3106
87a11bb6 3107 addi r1, r1, PPC_MIN_STKFRM
f024ee09
PM
3108 ld r0, PPC_LR_STKOFF(r1)
3109 mtlr r0
3110 blr
3111
3112/*
3113 * Restore transactional state and TM-related registers.
6f597c6b
SG
3114 * Called with r3 pointing to the vcpu struct
3115 * and r4 containing the guest MSR value.
7854f754 3116 * r5 is non-zero iff non-volatile register state needs to be maintained.
f024ee09 3117 * This potentially modifies all checkpointed registers.
6f597c6b 3118 * It restores r1 and r2 from the PACA.
f024ee09 3119 */
7854f754
PM
3120_GLOBAL_TOC(kvmppc_restore_tm_hv)
3121EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
7b0e827c
PM
3122 /*
3123 * If we are doing TM emulation for the guest on a POWER9 DD2,
3124 * then we don't actually do a trechkpt -- we either set up
3125 * fake-suspend mode, or emulate a TM rollback.
3126 */
3127BEGIN_FTR_SECTION
caa3be92 3128 b __kvmppc_restore_tm
7b0e827c 3129END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
f024ee09
PM
3130 mflr r0
3131 std r0, PPC_LR_STKOFF(r1)
3132
7b0e827c
PM
3133 li r0, 0
3134 stb r0, HSTATE_FAKE_SUSPEND(r13)
3135
3136 /* Turn on TM so we can restore TM SPRs */
f024ee09 3137 mfmsr r5
7b0e827c
PM
3138 li r0, 1
3139 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
f024ee09
PM
3140 mtmsrd r5
3141
3142 /*
3143 * The user may change these outside of a transaction, so they must
3144 * always be context switched.
3145 */
6f597c6b
SG
3146 ld r5, VCPU_TFHAR(r3)
3147 ld r6, VCPU_TFIAR(r3)
3148 ld r7, VCPU_TEXASR(r3)
f024ee09
PM
3149 mtspr SPRN_TFHAR, r5
3150 mtspr SPRN_TFIAR, r6
3151 mtspr SPRN_TEXASR, r7
3152
6f597c6b 3153 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
f024ee09 3154 beqlr /* TM not active in guest */
f024ee09 3155
7b0e827c 3156 /* Make sure the failure summary is set */
f024ee09
PM
3157 oris r7, r7, (TEXASR_FS)@h
3158 mtspr SPRN_TEXASR, r7
3159
4bb3c7a0
PM
3160 cmpwi r5, 1 /* check for suspended state */
3161 bgt 10f
3162 stb r5, HSTATE_FAKE_SUSPEND(r13)
7b0e827c 3163 b 9f /* and return */
4bb3c7a0
PM
316410: stdu r1, -PPC_MIN_STKFRM(r1)
3165 /* guest is in transactional state, so simulate rollback */
4bb3c7a0
PM
3166 bl kvmhv_emulate_tm_rollback
3167 nop
4bb3c7a0 3168 addi r1, r1, PPC_MIN_STKFRM
7b0e827c
PM
31699: ld r0, PPC_LR_STKOFF(r1)
3170 mtlr r0
3171 blr
7b0e827c 3172#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
f024ee09 3173
44a3add8
PM
3174/*
3175 * We come here if we get any exception or interrupt while we are
3176 * executing host real mode code while in guest MMU context.
857b99e1
PM
3177 * r12 is (CR << 32) | vector
3178 * r13 points to our PACA
3179 * r12 is saved in HSTATE_SCRATCH0(r13)
3180 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3181 * r9 is saved in HSTATE_SCRATCH2(r13)
3182 * r13 is saved in HSPRG1
3183 * cfar is saved in HSTATE_CFAR(r13)
3184 * ppr is saved in HSTATE_PPR(r13)
44a3add8
PM
3185 */
3186kvmppc_bad_host_intr:
857b99e1
PM
3187 /*
3188 * Switch to the emergency stack, but start half-way down in
3189 * case we were already on it.
3190 */
3191 mr r9, r1
3192 std r1, PACAR1(r13)
3193 ld r1, PACAEMERGSP(r13)
3194 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3195 std r9, 0(r1)
3196 std r0, GPR0(r1)
3197 std r9, GPR1(r1)
3198 std r2, GPR2(r1)
3199 SAVE_4GPRS(3, r1)
3200 SAVE_2GPRS(7, r1)
3201 srdi r0, r12, 32
3202 clrldi r12, r12, 32
3203 std r0, _CCR(r1)
3204 std r12, _TRAP(r1)
3205 andi. r0, r12, 2
3206 beq 1f
3207 mfspr r3, SPRN_HSRR0
3208 mfspr r4, SPRN_HSRR1
3209 mfspr r5, SPRN_HDAR
3210 mfspr r6, SPRN_HDSISR
3211 b 2f
32121: mfspr r3, SPRN_SRR0
3213 mfspr r4, SPRN_SRR1
3214 mfspr r5, SPRN_DAR
3215 mfspr r6, SPRN_DSISR
32162: std r3, _NIP(r1)
3217 std r4, _MSR(r1)
3218 std r5, _DAR(r1)
3219 std r6, _DSISR(r1)
3220 ld r9, HSTATE_SCRATCH2(r13)
3221 ld r12, HSTATE_SCRATCH0(r13)
3222 GET_SCRATCH0(r0)
3223 SAVE_4GPRS(9, r1)
3224 std r0, GPR13(r1)
3225 SAVE_NVGPRS(r1)
3226 ld r5, HSTATE_CFAR(r13)
3227 std r5, ORIG_GPR3(r1)
3228 mflr r3
3229#ifdef CONFIG_RELOCATABLE
3230 ld r4, HSTATE_SCRATCH1(r13)
3231#else
3232 mfctr r4
3233#endif
3234 mfxer r5
4e26bc4a 3235 lbz r6, PACAIRQSOFTMASK(r13)
857b99e1
PM
3236 std r3, _LINK(r1)
3237 std r4, _CTR(r1)
3238 std r5, _XER(r1)
3239 std r6, SOFTE(r1)
3240 ld r2, PACATOC(r13)
3241 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3242 std r3, STACK_FRAME_OVERHEAD-16(r1)
3243
3244 /*
3245 * On POWER9 do a minimal restore of the MMU and call C code,
3246 * which will print a message and panic.
3247 * XXX On POWER7 and POWER8, we just spin here since we don't
3248 * know what the other threads are doing (and we don't want to
3249 * coordinate with them) - but at least we now have register state
3250 * in memory that we might be able to look at from another CPU.
3251 */
3252BEGIN_FTR_SECTION
44a3add8 3253 b .
857b99e1
PM
3254END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3255 ld r9, HSTATE_KVM_VCPU(r13)
3256 ld r10, VCPU_KVM(r9)
3257
3258 li r0, 0
3259 mtspr SPRN_AMR, r0
3260 mtspr SPRN_IAMR, r0
3261 mtspr SPRN_CIABR, r0
3262 mtspr SPRN_DAWRX, r0
3263
857b99e1
PM
3264BEGIN_MMU_FTR_SECTION
3265 b 4f
3266END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3267
3268 slbmte r0, r0
3269 slbia
3270 ptesync
3271 ld r8, PACA_SLBSHADOWPTR(r13)
3272 .rept SLB_NUM_BOLTED
3273 li r3, SLBSHADOW_SAVEAREA
3274 LDX_BE r5, r8, r3
3275 addi r3, r3, 8
3276 LDX_BE r6, r8, r3
3277 andis. r7, r5, SLB_ESID_V@h
3278 beq 3f
3279 slbmte r6, r5
32803: addi r8, r8, 16
3281 .endr
3282
32834: lwz r7, KVM_HOST_LPID(r10)
3284 mtspr SPRN_LPID, r7
3285 mtspr SPRN_PID, r0
3286 ld r8, KVM_HOST_LPCR(r10)
3287 mtspr SPRN_LPCR, r8
3288 isync
3289 li r0, KVM_GUEST_MODE_NONE
3290 stb r0, HSTATE_IN_GUEST(r13)
3291
3292 /*
3293 * Turn on the MMU and jump to C code
3294 */
3295 bcl 20, 31, .+4
32965: mflr r3
3297 addi r3, r3, 9f - 5b
eadce3b4
NP
3298 li r4, -1
3299 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
857b99e1
PM
3300 ld r4, PACAKMSR(r13)
3301 mtspr SPRN_SRR0, r3
3302 mtspr SPRN_SRR1, r4
222f20f1 3303 RFI_TO_KERNEL
857b99e1
PM
33049: addi r3, r1, STACK_FRAME_OVERHEAD
3305 bl kvmppc_bad_interrupt
3306 b 9b
e4e38121
MN
3307
3308/*
3309 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3310 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3311 * r11 has the guest MSR value (in/out)
3312 * r9 has a vcpu pointer (in)
3313 * r0 is used as a scratch register
3314 */
3315kvmppc_msr_interrupt:
3316 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3317 cmpwi r0, 2 /* Check if we are in transactional state.. */
3318 ld r11, VCPU_INTR_MSR(r9)
3319 bne 1f
3320 /* ... if transactional, change to suspended */
3321 li r0, 1
33221: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3323 blr
9bc01a9b 3324
41f4e631
PM
3325/*
3326 * Load up guest PMU state. R3 points to the vcpu struct.
3327 */
3328_GLOBAL(kvmhv_load_guest_pmu)
3329EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
3330 mr r4, r3
3331 mflr r0
3332 li r3, 1
3333 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3334 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3335 isync
3336BEGIN_FTR_SECTION
3337 ld r3, VCPU_MMCR(r4)
3338 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3339 cmpwi r5, MMCR0_PMAO
3340 beql kvmppc_fix_pmao
3341END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3342 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
3343 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
3344 lwz r6, VCPU_PMC + 8(r4)
3345 lwz r7, VCPU_PMC + 12(r4)
3346 lwz r8, VCPU_PMC + 16(r4)
3347 lwz r9, VCPU_PMC + 20(r4)
3348 mtspr SPRN_PMC1, r3
3349 mtspr SPRN_PMC2, r5
3350 mtspr SPRN_PMC3, r6
3351 mtspr SPRN_PMC4, r7
3352 mtspr SPRN_PMC5, r8
3353 mtspr SPRN_PMC6, r9
3354 ld r3, VCPU_MMCR(r4)
3355 ld r5, VCPU_MMCR + 8(r4)
3356 ld r6, VCPU_MMCR + 16(r4)
3357 ld r7, VCPU_SIAR(r4)
3358 ld r8, VCPU_SDAR(r4)
3359 mtspr SPRN_MMCR1, r5
3360 mtspr SPRN_MMCRA, r6
3361 mtspr SPRN_SIAR, r7
3362 mtspr SPRN_SDAR, r8
3363BEGIN_FTR_SECTION
3364 ld r5, VCPU_MMCR + 24(r4)
3365 ld r6, VCPU_SIER(r4)
3366 mtspr SPRN_MMCR2, r5
3367 mtspr SPRN_SIER, r6
3368BEGIN_FTR_SECTION_NESTED(96)
3369 lwz r7, VCPU_PMC + 24(r4)
3370 lwz r8, VCPU_PMC + 28(r4)
3371 ld r9, VCPU_MMCR + 32(r4)
3372 mtspr SPRN_SPMC1, r7
3373 mtspr SPRN_SPMC2, r8
3374 mtspr SPRN_MMCRS, r9
3375END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3376END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3377 mtspr SPRN_MMCR0, r3
3378 isync
3379 mtlr r0
3380 blr
3381
3382/*
3383 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
3384 */
3385_GLOBAL(kvmhv_load_host_pmu)
3386EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
3387 mflr r0
3388 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
3389 cmpwi r4, 0
3390 beq 23f /* skip if not */
3391BEGIN_FTR_SECTION
3392 ld r3, HSTATE_MMCR0(r13)
3393 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3394 cmpwi r4, MMCR0_PMAO
3395 beql kvmppc_fix_pmao
3396END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3397 lwz r3, HSTATE_PMC1(r13)
3398 lwz r4, HSTATE_PMC2(r13)
3399 lwz r5, HSTATE_PMC3(r13)
3400 lwz r6, HSTATE_PMC4(r13)
3401 lwz r8, HSTATE_PMC5(r13)
3402 lwz r9, HSTATE_PMC6(r13)
3403 mtspr SPRN_PMC1, r3
3404 mtspr SPRN_PMC2, r4
3405 mtspr SPRN_PMC3, r5
3406 mtspr SPRN_PMC4, r6
3407 mtspr SPRN_PMC5, r8
3408 mtspr SPRN_PMC6, r9
3409 ld r3, HSTATE_MMCR0(r13)
3410 ld r4, HSTATE_MMCR1(r13)
3411 ld r5, HSTATE_MMCRA(r13)
3412 ld r6, HSTATE_SIAR(r13)
3413 ld r7, HSTATE_SDAR(r13)
3414 mtspr SPRN_MMCR1, r4
3415 mtspr SPRN_MMCRA, r5
3416 mtspr SPRN_SIAR, r6
3417 mtspr SPRN_SDAR, r7
3418BEGIN_FTR_SECTION
3419 ld r8, HSTATE_MMCR2(r13)
3420 ld r9, HSTATE_SIER(r13)
3421 mtspr SPRN_MMCR2, r8
3422 mtspr SPRN_SIER, r9
3423END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3424 mtspr SPRN_MMCR0, r3
3425 isync
3426 mtlr r0
342723: blr
3428
3429/*
3430 * Save guest PMU state into the vcpu struct.
3431 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
3432 */
3433_GLOBAL(kvmhv_save_guest_pmu)
3434EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
3435 mr r9, r3
3436 mr r8, r4
3437BEGIN_FTR_SECTION
3438 /*
3439 * POWER8 seems to have a hardware bug where setting
3440 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
3441 * when some counters are already negative doesn't seem
3442 * to cause a performance monitor alert (and hence interrupt).
3443 * The effect of this is that when saving the PMU state,
3444 * if there is no PMU alert pending when we read MMCR0
3445 * before freezing the counters, but one becomes pending
3446 * before we read the counters, we lose it.
3447 * To work around this, we need a way to freeze the counters
3448 * before reading MMCR0. Normally, freezing the counters
3449 * is done by writing MMCR0 (to set MMCR0[FC]) which
3450 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
3451 * we can also freeze the counters using MMCR2, by writing
3452 * 1s to all the counter freeze condition bits (there are
3453 * 9 bits each for 6 counters).
3454 */
3455 li r3, -1 /* set all freeze bits */
3456 clrrdi r3, r3, 10
3457 mfspr r10, SPRN_MMCR2
3458 mtspr SPRN_MMCR2, r3
3459 isync
3460END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3461 li r3, 1
3462 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3463 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
3464 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3465 mfspr r6, SPRN_MMCRA
3466 /* Clear MMCRA in order to disable SDAR updates */
3467 li r7, 0
3468 mtspr SPRN_MMCRA, r7
3469 isync
3470 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
3471 bne 21f
3472 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
3473 b 22f
347421: mfspr r5, SPRN_MMCR1
3475 mfspr r7, SPRN_SIAR
3476 mfspr r8, SPRN_SDAR
3477 std r4, VCPU_MMCR(r9)
3478 std r5, VCPU_MMCR + 8(r9)
3479 std r6, VCPU_MMCR + 16(r9)
3480BEGIN_FTR_SECTION
3481 std r10, VCPU_MMCR + 24(r9)
3482END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3483 std r7, VCPU_SIAR(r9)
3484 std r8, VCPU_SDAR(r9)
3485 mfspr r3, SPRN_PMC1
3486 mfspr r4, SPRN_PMC2
3487 mfspr r5, SPRN_PMC3
3488 mfspr r6, SPRN_PMC4
3489 mfspr r7, SPRN_PMC5
3490 mfspr r8, SPRN_PMC6
3491 stw r3, VCPU_PMC(r9)
3492 stw r4, VCPU_PMC + 4(r9)
3493 stw r5, VCPU_PMC + 8(r9)
3494 stw r6, VCPU_PMC + 12(r9)
3495 stw r7, VCPU_PMC + 16(r9)
3496 stw r8, VCPU_PMC + 20(r9)
3497BEGIN_FTR_SECTION
3498 mfspr r5, SPRN_SIER
3499 std r5, VCPU_SIER(r9)
3500BEGIN_FTR_SECTION_NESTED(96)
3501 mfspr r6, SPRN_SPMC1
3502 mfspr r7, SPRN_SPMC2
3503 mfspr r8, SPRN_MMCRS
3504 stw r6, VCPU_PMC + 24(r9)
3505 stw r7, VCPU_PMC + 28(r9)
3506 std r8, VCPU_MMCR + 32(r9)
3507 lis r4, 0x8000
3508 mtspr SPRN_MMCRS, r4
3509END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3510END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
351122: blr
3512
9bc01a9b
PM
3513/*
3514 * This works around a hardware bug on POWER8E processors, where
3515 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3516 * performance monitor interrupt. Instead, when we need to have
3517 * an interrupt pending, we have to arrange for a counter to overflow.
3518 */
3519kvmppc_fix_pmao:
3520 li r3, 0
3521 mtspr SPRN_MMCR2, r3
3522 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3523 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3524 mtspr SPRN_MMCR0, r3
3525 lis r3, 0x7fff
3526 ori r3, r3, 0xffff
3527 mtspr SPRN_PMC6, r3
3528 isync
3529 blr
b6c295df
PM
3530
3531#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3532/*
3533 * Start timing an activity
3534 * r3 = pointer to time accumulation struct, r4 = vcpu
3535 */
3536kvmhv_start_timing:
3537 ld r5, HSTATE_KVM_VCORE(r13)
57b8daa7
PM
3538 ld r6, VCORE_TB_OFFSET_APPL(r5)
3539 mftb r5
3540 subf r5, r6, r5 /* subtract current timebase offset */
b6c295df
PM
3541 std r3, VCPU_CUR_ACTIVITY(r4)
3542 std r5, VCPU_ACTIVITY_START(r4)
3543 blr
3544
3545/*
3546 * Accumulate time to one activity and start another.
3547 * r3 = pointer to new time accumulation struct, r4 = vcpu
3548 */
3549kvmhv_accumulate_time:
3550 ld r5, HSTATE_KVM_VCORE(r13)
57b8daa7
PM
3551 ld r8, VCORE_TB_OFFSET_APPL(r5)
3552 ld r5, VCPU_CUR_ACTIVITY(r4)
b6c295df
PM
3553 ld r6, VCPU_ACTIVITY_START(r4)
3554 std r3, VCPU_CUR_ACTIVITY(r4)
3555 mftb r7
57b8daa7 3556 subf r7, r8, r7 /* subtract current timebase offset */
b6c295df
PM
3557 std r7, VCPU_ACTIVITY_START(r4)
3558 cmpdi r5, 0
3559 beqlr
3560 subf r3, r6, r7
3561 ld r8, TAS_SEQCOUNT(r5)
3562 cmpdi r8, 0
3563 addi r8, r8, 1
3564 std r8, TAS_SEQCOUNT(r5)
3565 lwsync
3566 ld r7, TAS_TOTAL(r5)
3567 add r7, r7, r3
3568 std r7, TAS_TOTAL(r5)
3569 ld r6, TAS_MIN(r5)
3570 ld r7, TAS_MAX(r5)
3571 beq 3f
3572 cmpd r3, r6
3573 bge 1f
35743: std r3, TAS_MIN(r5)
35751: cmpd r3, r7
3576 ble 2f
3577 std r3, TAS_MAX(r5)
35782: lwsync
3579 addi r8, r8, 1
3580 std r8, TAS_SEQCOUNT(r5)
3581 blr
3582#endif