Merge tag 'bcachefs-2024-10-05' of git://evilpiepirate.org/bcachefs
[linux-2.6-block.git] / arch / powerpc / kvm / book3s_emulate.c
CommitLineData
d94d71cb 1// SPDX-License-Identifier: GPL-2.0-only
c215c6e4 2/*
c215c6e4
AG
3 *
4 * Copyright SUSE Linux Products GmbH 2009
5 *
6 * Authors: Alexander Graf <agraf@suse.de>
7 */
8
9#include <asm/kvm_ppc.h>
10#include <asm/disassemble.h>
11#include <asm/kvm_book3s.h>
12#include <asm/reg.h>
95327d08 13#include <asm/switch_to.h>
b0a94d4e 14#include <asm/time.h>
5706340a 15#include <asm/tm.h>
5358a963 16#include "book3s.h"
533082ae 17#include <asm/asm-prototypes.h>
c215c6e4
AG
18
19#define OP_19_XOP_RFID 18
20#define OP_19_XOP_RFI 50
21
22#define OP_31_XOP_MFMSR 83
23#define OP_31_XOP_MTMSR 146
24#define OP_31_XOP_MTMSRD 178
71db4089 25#define OP_31_XOP_MTSR 210
c215c6e4
AG
26#define OP_31_XOP_MTSRIN 242
27#define OP_31_XOP_TLBIEL 274
50c7bb80
AG
28/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
29#define OP_31_XOP_FAKE_SC1 308
c215c6e4
AG
30#define OP_31_XOP_SLBMTE 402
31#define OP_31_XOP_SLBIE 434
32#define OP_31_XOP_SLBIA 498
c664876c 33#define OP_31_XOP_MFSR 595
c215c6e4 34#define OP_31_XOP_MFSRIN 659
bd7cdbb7 35#define OP_31_XOP_DCBA 758
c215c6e4
AG
36#define OP_31_XOP_SLBMFEV 851
37#define OP_31_XOP_EIOIO 854
38#define OP_31_XOP_SLBMFEE 915
41a8645a 39#define OP_31_XOP_SLBFEE 979
c215c6e4 40
5706340a 41#define OP_31_XOP_TBEGIN 654
26798f88 42#define OP_31_XOP_TABORT 910
5706340a 43
03c81682 44#define OP_31_XOP_TRECLAIM 942
e32c53d1 45#define OP_31_XOP_TRCHKPT 1006
03c81682 46
c215c6e4
AG
47/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
48#define OP_31_XOP_DCBZ 1010
49
ca7f4203
AG
50#define OP_LFS 48
51#define OP_LFD 50
52#define OP_STFS 52
53#define OP_STFD 54
54
d6d549b2
AG
55#define SPRN_GQR0 912
56#define SPRN_GQR1 913
57#define SPRN_GQR2 914
58#define SPRN_GQR3 915
59#define SPRN_GQR4 916
60#define SPRN_GQR5 917
61#define SPRN_GQR6 918
62#define SPRN_GQR7 919
63
317a8fa3
AG
64enum priv_level {
65 PRIV_PROBLEM = 0,
66 PRIV_SUPER = 1,
67 PRIV_HYPER = 2,
68};
69
70static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
71{
72 /* PAPR VMs only access supervisor SPRs */
73 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
74 return false;
75
76 /* Limit user space to its own small SPR set */
5deb8e7a 77 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
317a8fa3
AG
78 return false;
79
80 return true;
81}
82
de7ad932
SG
83#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
84static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
85{
86 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
87 sizeof(vcpu->arch.gpr_tm));
88 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
89 sizeof(struct thread_fp_state));
90 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
91 sizeof(struct thread_vr_state));
92 vcpu->arch.ppr_tm = vcpu->arch.ppr;
93 vcpu->arch.dscr_tm = vcpu->arch.dscr;
94 vcpu->arch.amr_tm = vcpu->arch.amr;
95 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
96 vcpu->arch.tar_tm = vcpu->arch.tar;
97 vcpu->arch.lr_tm = vcpu->arch.regs.link;
fd0944ba 98 vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
de7ad932
SG
99 vcpu->arch.xer_tm = vcpu->arch.regs.xer;
100 vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
101}
102
103static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
104{
105 memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
106 sizeof(vcpu->arch.regs.gpr));
107 memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
108 sizeof(struct thread_fp_state));
109 memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
110 sizeof(struct thread_vr_state));
111 vcpu->arch.ppr = vcpu->arch.ppr_tm;
112 vcpu->arch.dscr = vcpu->arch.dscr_tm;
113 vcpu->arch.amr = vcpu->arch.amr_tm;
114 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
115 vcpu->arch.tar = vcpu->arch.tar_tm;
116 vcpu->arch.regs.link = vcpu->arch.lr_tm;
fd0944ba 117 vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
de7ad932
SG
118 vcpu->arch.regs.xer = vcpu->arch.xer_tm;
119 vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
120}
121
03c81682
SG
122static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
123{
124 unsigned long guest_msr = kvmppc_get_msr(vcpu);
125 int fc_val = ra_val ? ra_val : 1;
a50623fb 126 uint64_t texasr;
03c81682
SG
127
128 /* CR0 = 0 | MSR[TS] | 0 */
fd0944ba 129 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
03c81682
SG
130 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
131 << CR0_SHIFT);
132
133 preempt_disable();
a50623fb
PM
134 tm_enable();
135 texasr = mfspr(SPRN_TEXASR);
03c81682
SG
136 kvmppc_save_tm_pr(vcpu);
137 kvmppc_copyfrom_vcpu_tm(vcpu);
138
03c81682 139 /* failure recording depends on Failure Summary bit */
a50623fb
PM
140 if (!(texasr & TEXASR_FS)) {
141 texasr &= ~TEXASR_FC;
142 texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
03c81682 143
a50623fb 144 texasr &= ~(TEXASR_PR | TEXASR_HV);
03c81682 145 if (kvmppc_get_msr(vcpu) & MSR_PR)
a50623fb 146 texasr |= TEXASR_PR;
03c81682
SG
147
148 if (kvmppc_get_msr(vcpu) & MSR_HV)
a50623fb 149 texasr |= TEXASR_HV;
03c81682 150
a50623fb 151 vcpu->arch.texasr = texasr;
03c81682 152 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
a50623fb 153 mtspr(SPRN_TEXASR, texasr);
03c81682
SG
154 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
155 }
156 tm_disable();
157 /*
158 * treclaim need quit to non-transactional state.
159 */
160 guest_msr &= ~(MSR_TS_MASK);
161 kvmppc_set_msr(vcpu, guest_msr);
162 preempt_enable();
7284ca8a
SG
163
164 if (vcpu->arch.shadow_fscr & FSCR_TAR)
165 mtspr(SPRN_TAR, vcpu->arch.tar);
03c81682 166}
e32c53d1
SG
167
168static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
169{
170 unsigned long guest_msr = kvmppc_get_msr(vcpu);
171
172 preempt_disable();
173 /*
174 * need flush FP/VEC/VSX to vcpu save area before
175 * copy.
176 */
177 kvmppc_giveup_ext(vcpu, MSR_VSX);
7284ca8a 178 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
e32c53d1
SG
179 kvmppc_copyto_vcpu_tm(vcpu);
180 kvmppc_save_tm_sprs(vcpu);
181
182 /*
183 * as a result of trecheckpoint. set TS to suspended.
184 */
185 guest_msr &= ~(MSR_TS_MASK);
186 guest_msr |= MSR_TS_S;
187 kvmppc_set_msr(vcpu, guest_msr);
188 kvmppc_restore_tm_pr(vcpu);
189 preempt_enable();
190}
26798f88
SG
191
192/* emulate tabort. at guest privilege state */
68ab07b9 193void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
26798f88
SG
194{
195 /* currently we only emulate tabort. but no emulation of other
196 * tabort variants since there is no kernel usage of them at
197 * present.
198 */
199 unsigned long guest_msr = kvmppc_get_msr(vcpu);
f61e0d3c 200 uint64_t org_texasr;
26798f88
SG
201
202 preempt_disable();
203 tm_enable();
f61e0d3c 204 org_texasr = mfspr(SPRN_TEXASR);
26798f88
SG
205 tm_abort(ra_val);
206
207 /* CR0 = 0 | MSR[TS] | 0 */
fd0944ba 208 vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
26798f88
SG
209 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
210 << CR0_SHIFT);
211
212 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
213 /* failure recording depends on Failure Summary bit,
214 * and tabort will be treated as nops in non-transactional
215 * state.
216 */
f61e0d3c 217 if (!(org_texasr & TEXASR_FS) &&
26798f88
SG
218 MSR_TM_ACTIVE(guest_msr)) {
219 vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
220 if (guest_msr & MSR_PR)
221 vcpu->arch.texasr |= TEXASR_PR;
222
223 if (guest_msr & MSR_HV)
224 vcpu->arch.texasr |= TEXASR_HV;
225
226 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
26798f88
SG
227 }
228 tm_disable();
229 preempt_enable();
230}
231
de7ad932
SG
232#endif
233
8c99d345 234int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
3a167bea 235 unsigned int inst, int *advance)
c215c6e4
AG
236{
237 int emulated = EMULATE_DONE;
c46dc9a8
AG
238 int rt = get_rt(inst);
239 int rs = get_rs(inst);
240 int ra = get_ra(inst);
241 int rb = get_rb(inst);
42188365 242 u32 inst_sc = 0x44000002;
c215c6e4
AG
243
244 switch (get_op(inst)) {
42188365
AG
245 case 0:
246 emulated = EMULATE_FAIL;
247 if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
248 (inst == swab32(inst_sc))) {
249 /*
250 * This is the byte reversed syscall instruction of our
251 * hypercall handler. Early versions of LE Linux didn't
252 * swap the instructions correctly and ended up in
253 * illegal instructions.
254 * Just always fail hypercalls on these broken systems.
255 */
256 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
257 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
258 emulated = EMULATE_DONE;
259 }
260 break;
c215c6e4
AG
261 case 19:
262 switch (get_xop(inst)) {
263 case OP_19_XOP_RFID:
401a89e9
SG
264 case OP_19_XOP_RFI: {
265 unsigned long srr1 = kvmppc_get_srr1(vcpu);
266#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
267 unsigned long cur_msr = kvmppc_get_msr(vcpu);
268
269 /*
270 * add rules to fit in ISA specification regarding TM
1fd02f66 271 * state transition in TM disable/Suspended state,
401a89e9
SG
272 * and target TM state is TM inactive(00) state. (the
273 * change should be suppressed).
274 */
275 if (((cur_msr & MSR_TM) == 0) &&
276 ((srr1 & MSR_TM) == 0) &&
277 MSR_TM_SUSPENDED(cur_msr) &&
278 !MSR_TM_ACTIVE(srr1))
279 srr1 |= MSR_TS_S;
280#endif
5deb8e7a 281 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
401a89e9 282 kvmppc_set_msr(vcpu, srr1);
c215c6e4
AG
283 *advance = 0;
284 break;
401a89e9 285 }
c215c6e4
AG
286
287 default:
288 emulated = EMULATE_FAIL;
289 break;
290 }
291 break;
292 case 31:
293 switch (get_xop(inst)) {
294 case OP_31_XOP_MFMSR:
5deb8e7a 295 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
c215c6e4
AG
296 break;
297 case OP_31_XOP_MTMSRD:
298 {
c46dc9a8 299 ulong rs_val = kvmppc_get_gpr(vcpu, rs);
c215c6e4 300 if (inst & 0x10000) {
5deb8e7a 301 ulong new_msr = kvmppc_get_msr(vcpu);
c46dc9a8
AG
302 new_msr &= ~(MSR_RI | MSR_EE);
303 new_msr |= rs_val & (MSR_RI | MSR_EE);
5deb8e7a 304 kvmppc_set_msr_fast(vcpu, new_msr);
c215c6e4 305 } else
c46dc9a8 306 kvmppc_set_msr(vcpu, rs_val);
c215c6e4
AG
307 break;
308 }
309 case OP_31_XOP_MTMSR:
c46dc9a8 310 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
c215c6e4 311 break;
c664876c
AG
312 case OP_31_XOP_MFSR:
313 {
314 int srnum;
315
316 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
317 if (vcpu->arch.mmu.mfsrin) {
318 u32 sr;
319 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
c46dc9a8 320 kvmppc_set_gpr(vcpu, rt, sr);
c664876c
AG
321 }
322 break;
323 }
c215c6e4
AG
324 case OP_31_XOP_MFSRIN:
325 {
326 int srnum;
327
c46dc9a8 328 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
c215c6e4
AG
329 if (vcpu->arch.mmu.mfsrin) {
330 u32 sr;
331 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
c46dc9a8 332 kvmppc_set_gpr(vcpu, rt, sr);
c215c6e4
AG
333 }
334 break;
335 }
71db4089
AG
336 case OP_31_XOP_MTSR:
337 vcpu->arch.mmu.mtsrin(vcpu,
338 (inst >> 16) & 0xf,
c46dc9a8 339 kvmppc_get_gpr(vcpu, rs));
71db4089 340 break;
c215c6e4
AG
341 case OP_31_XOP_MTSRIN:
342 vcpu->arch.mmu.mtsrin(vcpu,
c46dc9a8
AG
343 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
344 kvmppc_get_gpr(vcpu, rs));
c215c6e4
AG
345 break;
346 case OP_31_XOP_TLBIE:
347 case OP_31_XOP_TLBIEL:
348 {
349 bool large = (inst & 0x00200000) ? true : false;
c46dc9a8 350 ulong addr = kvmppc_get_gpr(vcpu, rb);
c215c6e4
AG
351 vcpu->arch.mmu.tlbie(vcpu, addr, large);
352 break;
353 }
2ba9f0d8 354#ifdef CONFIG_PPC_BOOK3S_64
50c7bb80
AG
355 case OP_31_XOP_FAKE_SC1:
356 {
357 /* SC 1 papr hypercalls */
358 ulong cmd = kvmppc_get_gpr(vcpu, 3);
359 int i;
360
5deb8e7a 361 if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
50c7bb80
AG
362 !vcpu->arch.papr_enabled) {
363 emulated = EMULATE_FAIL;
364 break;
365 }
366
367 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
368 break;
369
8c99d345 370 vcpu->run->papr_hcall.nr = cmd;
50c7bb80
AG
371 for (i = 0; i < 9; ++i) {
372 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
8c99d345 373 vcpu->run->papr_hcall.args[i] = gpr;
50c7bb80
AG
374 }
375
8c99d345 376 vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
0f47f9b5 377 vcpu->arch.hcall_needed = 1;
c402a3f4 378 emulated = EMULATE_EXIT_USER;
50c7bb80
AG
379 break;
380 }
381#endif
c215c6e4
AG
382 case OP_31_XOP_EIOIO:
383 break;
384 case OP_31_XOP_SLBMTE:
385 if (!vcpu->arch.mmu.slbmte)
386 return EMULATE_FAIL;
387
8e5b26b5 388 vcpu->arch.mmu.slbmte(vcpu,
c46dc9a8
AG
389 kvmppc_get_gpr(vcpu, rs),
390 kvmppc_get_gpr(vcpu, rb));
c215c6e4
AG
391 break;
392 case OP_31_XOP_SLBIE:
393 if (!vcpu->arch.mmu.slbie)
394 return EMULATE_FAIL;
395
8e5b26b5 396 vcpu->arch.mmu.slbie(vcpu,
c46dc9a8 397 kvmppc_get_gpr(vcpu, rb));
c215c6e4
AG
398 break;
399 case OP_31_XOP_SLBIA:
400 if (!vcpu->arch.mmu.slbia)
401 return EMULATE_FAIL;
402
403 vcpu->arch.mmu.slbia(vcpu);
404 break;
41a8645a
PM
405 case OP_31_XOP_SLBFEE:
406 if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
407 return EMULATE_FAIL;
408 } else {
409 ulong b, t;
410 ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
411
412 b = kvmppc_get_gpr(vcpu, rb);
413 if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
414 cr |= 2 << CR0_SHIFT;
415 kvmppc_set_gpr(vcpu, rt, t);
416 /* copy XER[SO] bit to CR0[SO] */
417 cr |= (vcpu->arch.regs.xer & 0x80000000) >>
418 (31 - CR0_SHIFT);
419 kvmppc_set_cr(vcpu, cr);
420 }
421 break;
c215c6e4
AG
422 case OP_31_XOP_SLBMFEE:
423 if (!vcpu->arch.mmu.slbmfee) {
424 emulated = EMULATE_FAIL;
425 } else {
c46dc9a8 426 ulong t, rb_val;
c215c6e4 427
c46dc9a8
AG
428 rb_val = kvmppc_get_gpr(vcpu, rb);
429 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
430 kvmppc_set_gpr(vcpu, rt, t);
c215c6e4
AG
431 }
432 break;
433 case OP_31_XOP_SLBMFEV:
434 if (!vcpu->arch.mmu.slbmfev) {
435 emulated = EMULATE_FAIL;
436 } else {
c46dc9a8 437 ulong t, rb_val;
c215c6e4 438
c46dc9a8
AG
439 rb_val = kvmppc_get_gpr(vcpu, rb);
440 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
441 kvmppc_set_gpr(vcpu, rt, t);
c215c6e4
AG
442 }
443 break;
bd7cdbb7
AG
444 case OP_31_XOP_DCBA:
445 /* Gets treated as NOP */
446 break;
c215c6e4
AG
447 case OP_31_XOP_DCBZ:
448 {
c46dc9a8
AG
449 ulong rb_val = kvmppc_get_gpr(vcpu, rb);
450 ulong ra_val = 0;
5467a97d 451 ulong addr, vaddr;
c215c6e4 452 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
9fb244a2
AG
453 u32 dsisr;
454 int r;
c215c6e4 455
c46dc9a8
AG
456 if (ra)
457 ra_val = kvmppc_get_gpr(vcpu, ra);
c215c6e4 458
c46dc9a8 459 addr = (ra_val + rb_val) & ~31ULL;
5deb8e7a 460 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
c215c6e4 461 addr &= 0xffffffff;
5467a97d 462 vaddr = addr;
c215c6e4 463
9fb244a2
AG
464 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
465 if ((r == -ENOENT) || (r == -EPERM)) {
466 *advance = 0;
5deb8e7a 467 kvmppc_set_dar(vcpu, vaddr);
a2d56020 468 vcpu->arch.fault_dar = vaddr;
9fb244a2
AG
469
470 dsisr = DSISR_ISSTORE;
471 if (r == -ENOENT)
472 dsisr |= DSISR_NOHPTE;
473 else if (r == -EPERM)
474 dsisr |= DSISR_PROTFAULT;
475
5deb8e7a 476 kvmppc_set_dsisr(vcpu, dsisr);
a2d56020 477 vcpu->arch.fault_dsisr = dsisr;
9fb244a2 478
c215c6e4
AG
479 kvmppc_book3s_queue_irqprio(vcpu,
480 BOOK3S_INTERRUPT_DATA_STORAGE);
c215c6e4
AG
481 }
482
483 break;
484 }
5706340a
SG
485#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
486 case OP_31_XOP_TBEGIN:
487 {
488 if (!cpu_has_feature(CPU_FTR_TM))
489 break;
490
491 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
492 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
493 emulated = EMULATE_AGAIN;
494 break;
495 }
496
497 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
498 preempt_disable();
fd0944ba
PM
499 vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
500 (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
5706340a
SG
501
502 vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
503 (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
504 << TEXASR_FC_LG));
505
506 if ((inst >> 21) & 0x1)
507 vcpu->arch.texasr |= TEXASR_ROT;
508
509 if (kvmppc_get_msr(vcpu) & MSR_HV)
510 vcpu->arch.texasr |= TEXASR_HV;
511
512 vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
513 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
514
515 kvmppc_restore_tm_sprs(vcpu);
516 preempt_enable();
517 } else
518 emulated = EMULATE_FAIL;
519 break;
520 }
26798f88
SG
521 case OP_31_XOP_TABORT:
522 {
523 ulong guest_msr = kvmppc_get_msr(vcpu);
524 unsigned long ra_val = 0;
525
526 if (!cpu_has_feature(CPU_FTR_TM))
527 break;
528
529 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
530 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
531 emulated = EMULATE_AGAIN;
532 break;
533 }
534
535 /* only emulate for privilege guest, since problem state
536 * guest can run with TM enabled and we don't expect to
537 * trap at here for that case.
538 */
539 WARN_ON(guest_msr & MSR_PR);
540
541 if (ra)
542 ra_val = kvmppc_get_gpr(vcpu, ra);
543
544 kvmppc_emulate_tabort(vcpu, ra_val);
545 break;
546 }
03c81682
SG
547 case OP_31_XOP_TRECLAIM:
548 {
549 ulong guest_msr = kvmppc_get_msr(vcpu);
550 unsigned long ra_val = 0;
551
552 if (!cpu_has_feature(CPU_FTR_TM))
553 break;
554
555 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
556 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
557 emulated = EMULATE_AGAIN;
558 break;
559 }
560
561 /* generate interrupts based on priorities */
562 if (guest_msr & MSR_PR) {
563 /* Privileged Instruction type Program Interrupt */
564 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
565 emulated = EMULATE_AGAIN;
566 break;
567 }
568
569 if (!MSR_TM_ACTIVE(guest_msr)) {
570 /* TM bad thing interrupt */
571 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
572 emulated = EMULATE_AGAIN;
573 break;
574 }
575
576 if (ra)
577 ra_val = kvmppc_get_gpr(vcpu, ra);
578 kvmppc_emulate_treclaim(vcpu, ra_val);
579 break;
580 }
e32c53d1
SG
581 case OP_31_XOP_TRCHKPT:
582 {
583 ulong guest_msr = kvmppc_get_msr(vcpu);
584 unsigned long texasr;
585
586 if (!cpu_has_feature(CPU_FTR_TM))
587 break;
588
589 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
590 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
591 emulated = EMULATE_AGAIN;
592 break;
593 }
594
595 /* generate interrupt based on priorities */
596 if (guest_msr & MSR_PR) {
597 /* Privileged Instruction type Program Intr */
598 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
599 emulated = EMULATE_AGAIN;
600 break;
601 }
602
603 tm_enable();
604 texasr = mfspr(SPRN_TEXASR);
605 tm_disable();
606
607 if (MSR_TM_ACTIVE(guest_msr) ||
608 !(texasr & (TEXASR_FS))) {
609 /* TM bad thing interrupt */
610 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
611 emulated = EMULATE_AGAIN;
612 break;
613 }
614
615 kvmppc_emulate_trchkpt(vcpu);
616 break;
617 }
5706340a 618#endif
c215c6e4
AG
619 default:
620 emulated = EMULATE_FAIL;
621 }
622 break;
623 default:
624 emulated = EMULATE_FAIL;
625 }
626
831317b6 627 if (emulated == EMULATE_FAIL)
8c99d345 628 emulated = kvmppc_emulate_paired_single(vcpu);
831317b6 629
c215c6e4
AG
630 return emulated;
631}
632
e15a1137
AG
633void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
634 u32 val)
635{
636 if (upper) {
637 /* Upper BAT */
638 u32 bl = (val >> 2) & 0x7ff;
639 bat->bepi_mask = (~bl << 17);
640 bat->bepi = val & 0xfffe0000;
641 bat->vs = (val & 2) ? 1 : 0;
642 bat->vp = (val & 1) ? 1 : 0;
643 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
644 } else {
645 /* Lower BAT */
646 bat->brpn = val & 0xfffe0000;
647 bat->wimg = (val >> 3) & 0xf;
648 bat->pp = val & 3;
649 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
650 }
651}
652
c1c88e2f 653static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
c04a695a
AG
654{
655 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
656 struct kvmppc_bat *bat;
657
658 switch (sprn) {
659 case SPRN_IBAT0U ... SPRN_IBAT3L:
660 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
661 break;
662 case SPRN_IBAT4U ... SPRN_IBAT7L:
663 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
664 break;
665 case SPRN_DBAT0U ... SPRN_DBAT3L:
666 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
667 break;
668 case SPRN_DBAT4U ... SPRN_DBAT7L:
669 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
670 break;
671 default:
672 BUG();
673 }
674
c1c88e2f 675 return bat;
c215c6e4
AG
676}
677
3a167bea 678int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
c215c6e4
AG
679{
680 int emulated = EMULATE_DONE;
681
682 switch (sprn) {
683 case SPRN_SDR1:
317a8fa3
AG
684 if (!spr_allowed(vcpu, PRIV_HYPER))
685 goto unprivileged;
8e5b26b5 686 to_book3s(vcpu)->sdr1 = spr_val;
c215c6e4
AG
687 break;
688 case SPRN_DSISR:
5deb8e7a 689 kvmppc_set_dsisr(vcpu, spr_val);
c215c6e4
AG
690 break;
691 case SPRN_DAR:
5deb8e7a 692 kvmppc_set_dar(vcpu, spr_val);
c215c6e4
AG
693 break;
694 case SPRN_HIOR:
8e5b26b5 695 to_book3s(vcpu)->hior = spr_val;
c215c6e4
AG
696 break;
697 case SPRN_IBAT0U ... SPRN_IBAT3L:
698 case SPRN_IBAT4U ... SPRN_IBAT7L:
699 case SPRN_DBAT0U ... SPRN_DBAT3L:
700 case SPRN_DBAT4U ... SPRN_DBAT7L:
c1c88e2f
AG
701 {
702 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
703
704 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
c215c6e4
AG
705 /* BAT writes happen so rarely that we're ok to flush
706 * everything here */
707 kvmppc_mmu_pte_flush(vcpu, 0, 0);
c04a695a 708 kvmppc_mmu_flush_segments(vcpu);
c215c6e4 709 break;
c1c88e2f 710 }
c215c6e4 711 case SPRN_HID0:
8e5b26b5 712 to_book3s(vcpu)->hid[0] = spr_val;
c215c6e4
AG
713 break;
714 case SPRN_HID1:
8e5b26b5 715 to_book3s(vcpu)->hid[1] = spr_val;
c215c6e4 716 break;
ad679719 717 case SPRN_HID2_750FX:
8e5b26b5 718 to_book3s(vcpu)->hid[2] = spr_val;
c215c6e4 719 break;
d6d549b2
AG
720 case SPRN_HID2_GEKKO:
721 to_book3s(vcpu)->hid[2] = spr_val;
722 /* HID2.PSE controls paired single on gekko */
723 switch (vcpu->arch.pvr) {
724 case 0x00080200: /* lonestar 2.0 */
725 case 0x00088202: /* lonestar 2.2 */
726 case 0x70000100: /* gekko 1.0 */
727 case 0x00080100: /* gekko 2.0 */
728 case 0x00083203: /* gekko 2.3a */
729 case 0x00083213: /* gekko 2.3b */
730 case 0x00083204: /* gekko 2.4 */
731 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
b83d4a9c
AG
732 case 0x00087200: /* broadway */
733 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
734 /* Native paired singles */
735 } else if (spr_val & (1 << 29)) { /* HID2.PSE */
d6d549b2
AG
736 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
737 kvmppc_giveup_ext(vcpu, MSR_FP);
738 } else {
739 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
740 }
741 break;
742 }
743 break;
c215c6e4 744 case SPRN_HID4:
d6d549b2 745 case SPRN_HID4_GEKKO:
8e5b26b5 746 to_book3s(vcpu)->hid[4] = spr_val;
c215c6e4
AG
747 break;
748 case SPRN_HID5:
8e5b26b5 749 to_book3s(vcpu)->hid[5] = spr_val;
c215c6e4
AG
750 /* guest HID5 set can change is_dcbz32 */
751 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
752 (mfmsr() & MSR_HV))
753 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
754 break;
d6d549b2
AG
755 case SPRN_GQR0:
756 case SPRN_GQR1:
757 case SPRN_GQR2:
758 case SPRN_GQR3:
759 case SPRN_GQR4:
760 case SPRN_GQR5:
761 case SPRN_GQR6:
762 case SPRN_GQR7:
763 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
764 break;
8e6afa36 765#ifdef CONFIG_PPC_BOOK3S_64
616dff86 766 case SPRN_FSCR:
8e6afa36 767 kvmppc_set_fscr(vcpu, spr_val);
616dff86 768 break;
2e23f544
AG
769 case SPRN_BESCR:
770 vcpu->arch.bescr = spr_val;
771 break;
772 case SPRN_EBBHR:
773 vcpu->arch.ebbhr = spr_val;
774 break;
775 case SPRN_EBBRR:
776 vcpu->arch.ebbrr = spr_val;
777 break;
9916d57e
AG
778#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
779 case SPRN_TFHAR:
9916d57e 780 case SPRN_TEXASR:
9916d57e 781 case SPRN_TFIAR:
533082ae
SG
782 if (!cpu_has_feature(CPU_FTR_TM))
783 break;
784
785 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
786 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
787 emulated = EMULATE_AGAIN;
788 break;
789 }
790
791 if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
792 !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
793 (sprn == SPRN_TFHAR))) {
794 /* it is illegal to mtspr() TM regs in
795 * other than non-transactional state, with
796 * the exception of TFHAR in suspend state.
797 */
798 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
799 emulated = EMULATE_AGAIN;
800 break;
801 }
802
803 tm_enable();
804 if (sprn == SPRN_TFHAR)
805 mtspr(SPRN_TFHAR, spr_val);
806 else if (sprn == SPRN_TEXASR)
807 mtspr(SPRN_TEXASR, spr_val);
808 else
809 mtspr(SPRN_TFIAR, spr_val);
810 tm_disable();
811
9916d57e
AG
812 break;
813#endif
2e23f544 814#endif
c215c6e4
AG
815 case SPRN_ICTC:
816 case SPRN_THRM1:
817 case SPRN_THRM2:
818 case SPRN_THRM3:
819 case SPRN_CTRLF:
820 case SPRN_CTRLT:
d6d549b2 821 case SPRN_L2CR:
b0a94d4e 822 case SPRN_DSCR:
d6d549b2
AG
823 case SPRN_MMCR0_GEKKO:
824 case SPRN_MMCR1_GEKKO:
825 case SPRN_PMC1_GEKKO:
826 case SPRN_PMC2_GEKKO:
827 case SPRN_PMC3_GEKKO:
828 case SPRN_PMC4_GEKKO:
829 case SPRN_WPAR_GEKKO:
f2be6550 830 case SPRN_MSSSR0:
f3532028 831 case SPRN_DABR:
f8f6eb0d
AG
832#ifdef CONFIG_PPC_BOOK3S_64
833 case SPRN_MMCRS:
834 case SPRN_MMCRA:
835 case SPRN_MMCR0:
836 case SPRN_MMCR1:
837 case SPRN_MMCR2:
fa73c3b2 838 case SPRN_UMMCR2:
9f378b9f
AK
839 case SPRN_UAMOR:
840 case SPRN_IAMR:
841 case SPRN_AMR:
f8f6eb0d 842#endif
c215c6e4 843 break;
317a8fa3 844unprivileged:
c215c6e4 845 default:
feafd13c
TH
846 pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
847 if (sprn & 0x10) {
848 if (kvmppc_get_msr(vcpu) & MSR_PR) {
849 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
850 emulated = EMULATE_AGAIN;
851 }
852 } else {
853 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
854 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
855 emulated = EMULATE_AGAIN;
856 }
857 }
c215c6e4
AG
858 break;
859 }
860
861 return emulated;
862}
863
3a167bea 864int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
c215c6e4
AG
865{
866 int emulated = EMULATE_DONE;
867
868 switch (sprn) {
c04a695a
AG
869 case SPRN_IBAT0U ... SPRN_IBAT3L:
870 case SPRN_IBAT4U ... SPRN_IBAT7L:
871 case SPRN_DBAT0U ... SPRN_DBAT3L:
872 case SPRN_DBAT4U ... SPRN_DBAT7L:
c1c88e2f
AG
873 {
874 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
875
876 if (sprn % 2)
54771e62 877 *spr_val = bat->raw >> 32;
c1c88e2f 878 else
54771e62 879 *spr_val = bat->raw;
c1c88e2f 880
c04a695a 881 break;
c1c88e2f 882 }
c215c6e4 883 case SPRN_SDR1:
317a8fa3
AG
884 if (!spr_allowed(vcpu, PRIV_HYPER))
885 goto unprivileged;
54771e62 886 *spr_val = to_book3s(vcpu)->sdr1;
c215c6e4
AG
887 break;
888 case SPRN_DSISR:
5deb8e7a 889 *spr_val = kvmppc_get_dsisr(vcpu);
c215c6e4
AG
890 break;
891 case SPRN_DAR:
5deb8e7a 892 *spr_val = kvmppc_get_dar(vcpu);
c215c6e4
AG
893 break;
894 case SPRN_HIOR:
54771e62 895 *spr_val = to_book3s(vcpu)->hior;
c215c6e4
AG
896 break;
897 case SPRN_HID0:
54771e62 898 *spr_val = to_book3s(vcpu)->hid[0];
c215c6e4
AG
899 break;
900 case SPRN_HID1:
54771e62 901 *spr_val = to_book3s(vcpu)->hid[1];
c215c6e4 902 break;
ad679719 903 case SPRN_HID2_750FX:
d6d549b2 904 case SPRN_HID2_GEKKO:
54771e62 905 *spr_val = to_book3s(vcpu)->hid[2];
c215c6e4
AG
906 break;
907 case SPRN_HID4:
d6d549b2 908 case SPRN_HID4_GEKKO:
54771e62 909 *spr_val = to_book3s(vcpu)->hid[4];
c215c6e4
AG
910 break;
911 case SPRN_HID5:
54771e62 912 *spr_val = to_book3s(vcpu)->hid[5];
c215c6e4 913 break;
aacf9aa3 914 case SPRN_CFAR:
b0a94d4e 915 case SPRN_DSCR:
54771e62 916 *spr_val = 0;
aacf9aa3 917 break;
b0a94d4e 918 case SPRN_PURR:
3cd60e31
AK
919 /*
920 * On exit we would have updated purr
921 */
922 *spr_val = vcpu->arch.purr;
b0a94d4e
PM
923 break;
924 case SPRN_SPURR:
3cd60e31
AK
925 /*
926 * On exit we would have updated spurr
927 */
928 *spr_val = vcpu->arch.spurr;
b0a94d4e 929 break;
8f42ab27 930 case SPRN_VTB:
88b02cf9 931 *spr_val = to_book3s(vcpu)->vtb;
8f42ab27 932 break;
06da28e7
AK
933 case SPRN_IC:
934 *spr_val = vcpu->arch.ic;
935 break;
d6d549b2
AG
936 case SPRN_GQR0:
937 case SPRN_GQR1:
938 case SPRN_GQR2:
939 case SPRN_GQR3:
940 case SPRN_GQR4:
941 case SPRN_GQR5:
942 case SPRN_GQR6:
943 case SPRN_GQR7:
54771e62 944 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
d6d549b2 945 break;
8e6afa36 946#ifdef CONFIG_PPC_BOOK3S_64
616dff86
AG
947 case SPRN_FSCR:
948 *spr_val = vcpu->arch.fscr;
949 break;
2e23f544
AG
950 case SPRN_BESCR:
951 *spr_val = vcpu->arch.bescr;
952 break;
953 case SPRN_EBBHR:
954 *spr_val = vcpu->arch.ebbhr;
955 break;
956 case SPRN_EBBRR:
957 *spr_val = vcpu->arch.ebbrr;
958 break;
9916d57e
AG
959#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
960 case SPRN_TFHAR:
9916d57e 961 case SPRN_TEXASR:
9916d57e 962 case SPRN_TFIAR:
533082ae
SG
963 if (!cpu_has_feature(CPU_FTR_TM))
964 break;
965
966 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
967 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
968 emulated = EMULATE_AGAIN;
969 break;
970 }
971
972 tm_enable();
973 if (sprn == SPRN_TFHAR)
974 *spr_val = mfspr(SPRN_TFHAR);
975 else if (sprn == SPRN_TEXASR)
976 *spr_val = mfspr(SPRN_TEXASR);
977 else if (sprn == SPRN_TFIAR)
978 *spr_val = mfspr(SPRN_TFIAR);
979 tm_disable();
9916d57e
AG
980 break;
981#endif
2e23f544 982#endif
c215c6e4
AG
983 case SPRN_THRM1:
984 case SPRN_THRM2:
985 case SPRN_THRM3:
986 case SPRN_CTRLF:
987 case SPRN_CTRLT:
d6d549b2
AG
988 case SPRN_L2CR:
989 case SPRN_MMCR0_GEKKO:
990 case SPRN_MMCR1_GEKKO:
991 case SPRN_PMC1_GEKKO:
992 case SPRN_PMC2_GEKKO:
993 case SPRN_PMC3_GEKKO:
994 case SPRN_PMC4_GEKKO:
995 case SPRN_WPAR_GEKKO:
f2be6550 996 case SPRN_MSSSR0:
f3532028 997 case SPRN_DABR:
f8f6eb0d
AG
998#ifdef CONFIG_PPC_BOOK3S_64
999 case SPRN_MMCRS:
1000 case SPRN_MMCRA:
1001 case SPRN_MMCR0:
1002 case SPRN_MMCR1:
1003 case SPRN_MMCR2:
fa73c3b2 1004 case SPRN_UMMCR2:
a5948fa0 1005 case SPRN_TIR:
9f378b9f
AK
1006 case SPRN_UAMOR:
1007 case SPRN_IAMR:
1008 case SPRN_AMR:
f8f6eb0d 1009#endif
54771e62 1010 *spr_val = 0;
c215c6e4
AG
1011 break;
1012 default:
317a8fa3 1013unprivileged:
feafd13c
TH
1014 pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
1015 if (sprn & 0x10) {
1016 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1017 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
1018 emulated = EMULATE_AGAIN;
1019 }
1020 } else {
1021 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
1022 sprn == 4 || sprn == 5 || sprn == 6) {
1023 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1024 emulated = EMULATE_AGAIN;
1025 }
1026 }
1027
c215c6e4
AG
1028 break;
1029 }
1030
1031 return emulated;
1032}
1033
ca7f4203
AG
1034u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
1035{
ddca156a 1036 return make_dsisr(inst);
ca7f4203
AG
1037}
1038
1039ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
1040{
7310f3a5
AK
1041#ifdef CONFIG_PPC_BOOK3S_64
1042 /*
1043 * Linux's fix_alignment() assumes that DAR is valid, so can we
1044 */
1045 return vcpu->arch.fault_dar;
1046#else
ca7f4203 1047 ulong dar = 0;
c46dc9a8
AG
1048 ulong ra = get_ra(inst);
1049 ulong rb = get_rb(inst);
ca7f4203
AG
1050
1051 switch (get_op(inst)) {
1052 case OP_LFS:
1053 case OP_LFD:
1054 case OP_STFD:
1055 case OP_STFS:
ca7f4203
AG
1056 if (ra)
1057 dar = kvmppc_get_gpr(vcpu, ra);
1058 dar += (s32)((s16)inst);
1059 break;
1060 case 31:
ca7f4203
AG
1061 if (ra)
1062 dar = kvmppc_get_gpr(vcpu, ra);
c46dc9a8 1063 dar += kvmppc_get_gpr(vcpu, rb);
ca7f4203
AG
1064 break;
1065 default:
1066 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
1067 break;
1068 }
1069
1070 return dar;
7310f3a5 1071#endif
ca7f4203 1072}