1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kdebug.h>
13 #include <linux/module.h>
14 #include <linux/percpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <linux/sched/signal.h>
19 #include <linux/kvm_host.h>
21 #include <asm/hwcap.h>
23 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
24 KVM_GENERIC_VCPU_STATS(),
25 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
26 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
27 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
28 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
29 STATS_DESC_COUNTER(VCPU, exits)
32 const struct kvm_stats_header kvm_vcpu_stats_header = {
33 .name_size = KVM_STATS_NAME_SIZE,
34 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
35 .id_offset = sizeof(struct kvm_stats_header),
36 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
37 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
38 sizeof(kvm_vcpu_stats_desc),
42 static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
44 unsigned long isa = vcpu->arch.isa;
45 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
47 cntx->sstatus &= ~SR_FS;
48 if (riscv_isa_extension_available(&isa, f) ||
49 riscv_isa_extension_available(&isa, d))
50 cntx->sstatus |= SR_FS_INITIAL;
52 cntx->sstatus |= SR_FS_OFF;
55 static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
57 cntx->sstatus &= ~SR_FS;
58 cntx->sstatus |= SR_FS_CLEAN;
61 static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
64 if ((cntx->sstatus & SR_FS) == SR_FS_DIRTY) {
65 if (riscv_isa_extension_available(&isa, d))
66 __kvm_riscv_fp_d_save(cntx);
67 else if (riscv_isa_extension_available(&isa, f))
68 __kvm_riscv_fp_f_save(cntx);
69 kvm_riscv_vcpu_fp_clean(cntx);
73 static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
76 if ((cntx->sstatus & SR_FS) != SR_FS_OFF) {
77 if (riscv_isa_extension_available(&isa, d))
78 __kvm_riscv_fp_d_restore(cntx);
79 else if (riscv_isa_extension_available(&isa, f))
80 __kvm_riscv_fp_f_restore(cntx);
81 kvm_riscv_vcpu_fp_clean(cntx);
85 static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
87 /* No need to check host sstatus as it can be modified outside */
88 if (riscv_isa_extension_available(NULL, d))
89 __kvm_riscv_fp_d_save(cntx);
90 else if (riscv_isa_extension_available(NULL, f))
91 __kvm_riscv_fp_f_save(cntx);
94 static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
96 if (riscv_isa_extension_available(NULL, d))
97 __kvm_riscv_fp_d_restore(cntx);
98 else if (riscv_isa_extension_available(NULL, f))
99 __kvm_riscv_fp_f_restore(cntx);
102 static void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
105 static void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
109 static void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
113 static void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
116 static void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx)
121 #define KVM_RISCV_ISA_ALLOWED (riscv_isa_extension_mask(a) | \
122 riscv_isa_extension_mask(c) | \
123 riscv_isa_extension_mask(d) | \
124 riscv_isa_extension_mask(f) | \
125 riscv_isa_extension_mask(i) | \
126 riscv_isa_extension_mask(m) | \
127 riscv_isa_extension_mask(s) | \
128 riscv_isa_extension_mask(u))
130 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
132 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
133 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
134 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
135 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
137 memcpy(csr, reset_csr, sizeof(*csr));
139 memcpy(cntx, reset_cntx, sizeof(*cntx));
141 kvm_riscv_vcpu_fp_reset(vcpu);
143 kvm_riscv_vcpu_timer_reset(vcpu);
145 WRITE_ONCE(vcpu->arch.irqs_pending, 0);
146 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
149 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
154 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
156 struct kvm_cpu_context *cntx;
158 /* Mark this VCPU never ran */
159 vcpu->arch.ran_atleast_once = false;
161 /* Setup ISA features available to VCPU */
162 vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
164 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
165 cntx = &vcpu->arch.guest_reset_context;
166 cntx->sstatus = SR_SPP | SR_SPIE;
168 cntx->hstatus |= HSTATUS_VTW;
169 cntx->hstatus |= HSTATUS_SPVP;
170 cntx->hstatus |= HSTATUS_SPV;
172 /* Setup VCPU timer */
173 kvm_riscv_vcpu_timer_init(vcpu);
176 kvm_riscv_reset_vcpu(vcpu);
181 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
185 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
187 /* Cleanup VCPU timer */
188 kvm_riscv_vcpu_timer_deinit(vcpu);
190 /* Flush the pages pre-allocated for Stage2 page table mappings */
191 kvm_riscv_stage2_flush_cache(vcpu);
194 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
196 return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER);
199 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
203 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
207 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
209 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
210 !vcpu->arch.power_off && !vcpu->arch.pause);
213 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
215 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
218 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
220 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
223 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
225 return VM_FAULT_SIGBUS;
228 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
229 const struct kvm_one_reg *reg)
231 unsigned long __user *uaddr =
232 (unsigned long __user *)(unsigned long)reg->addr;
233 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
235 KVM_REG_RISCV_CONFIG);
236 unsigned long reg_val;
238 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
242 case KVM_REG_RISCV_CONFIG_REG(isa):
243 reg_val = vcpu->arch.isa;
249 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
255 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
256 const struct kvm_one_reg *reg)
258 unsigned long __user *uaddr =
259 (unsigned long __user *)(unsigned long)reg->addr;
260 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
262 KVM_REG_RISCV_CONFIG);
263 unsigned long reg_val;
265 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
268 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
272 case KVM_REG_RISCV_CONFIG_REG(isa):
273 if (!vcpu->arch.ran_atleast_once) {
274 vcpu->arch.isa = reg_val;
275 vcpu->arch.isa &= riscv_isa_extension_base(NULL);
276 vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
277 kvm_riscv_vcpu_fp_reset(vcpu);
289 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
290 const struct kvm_one_reg *reg)
292 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
293 unsigned long __user *uaddr =
294 (unsigned long __user *)(unsigned long)reg->addr;
295 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
298 unsigned long reg_val;
300 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
302 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
305 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
306 reg_val = cntx->sepc;
307 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
308 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
309 reg_val = ((unsigned long *)cntx)[reg_num];
310 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
311 reg_val = (cntx->sstatus & SR_SPP) ?
312 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
316 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
322 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
323 const struct kvm_one_reg *reg)
325 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
326 unsigned long __user *uaddr =
327 (unsigned long __user *)(unsigned long)reg->addr;
328 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
331 unsigned long reg_val;
333 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
335 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
338 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
341 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
342 cntx->sepc = reg_val;
343 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
344 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
345 ((unsigned long *)cntx)[reg_num] = reg_val;
346 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
347 if (reg_val == KVM_RISCV_MODE_S)
348 cntx->sstatus |= SR_SPP;
350 cntx->sstatus &= ~SR_SPP;
357 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
358 const struct kvm_one_reg *reg)
360 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
361 unsigned long __user *uaddr =
362 (unsigned long __user *)(unsigned long)reg->addr;
363 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
366 unsigned long reg_val;
368 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
370 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
373 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
374 kvm_riscv_vcpu_flush_interrupts(vcpu);
375 reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
377 reg_val = ((unsigned long *)csr)[reg_num];
379 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
385 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
386 const struct kvm_one_reg *reg)
388 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
389 unsigned long __user *uaddr =
390 (unsigned long __user *)(unsigned long)reg->addr;
391 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
394 unsigned long reg_val;
396 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
398 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
401 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
404 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
405 reg_val &= VSIP_VALID_MASK;
406 reg_val <<= VSIP_TO_HVIP_SHIFT;
409 ((unsigned long *)csr)[reg_num] = reg_val;
411 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
412 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
417 static int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
418 const struct kvm_one_reg *reg,
421 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
422 unsigned long isa = vcpu->arch.isa;
423 unsigned long __user *uaddr =
424 (unsigned long __user *)(unsigned long)reg->addr;
425 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
430 if ((rtype == KVM_REG_RISCV_FP_F) &&
431 riscv_isa_extension_available(&isa, f)) {
432 if (KVM_REG_SIZE(reg->id) != sizeof(u32))
434 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
435 reg_val = &cntx->fp.f.fcsr;
436 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
437 reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
438 reg_val = &cntx->fp.f.f[reg_num];
441 } else if ((rtype == KVM_REG_RISCV_FP_D) &&
442 riscv_isa_extension_available(&isa, d)) {
443 if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
444 if (KVM_REG_SIZE(reg->id) != sizeof(u32))
446 reg_val = &cntx->fp.d.fcsr;
447 } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
448 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
449 if (KVM_REG_SIZE(reg->id) != sizeof(u64))
451 reg_val = &cntx->fp.d.f[reg_num];
457 if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id)))
463 static int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
464 const struct kvm_one_reg *reg,
467 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
468 unsigned long isa = vcpu->arch.isa;
469 unsigned long __user *uaddr =
470 (unsigned long __user *)(unsigned long)reg->addr;
471 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
476 if ((rtype == KVM_REG_RISCV_FP_F) &&
477 riscv_isa_extension_available(&isa, f)) {
478 if (KVM_REG_SIZE(reg->id) != sizeof(u32))
480 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
481 reg_val = &cntx->fp.f.fcsr;
482 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
483 reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
484 reg_val = &cntx->fp.f.f[reg_num];
487 } else if ((rtype == KVM_REG_RISCV_FP_D) &&
488 riscv_isa_extension_available(&isa, d)) {
489 if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
490 if (KVM_REG_SIZE(reg->id) != sizeof(u32))
492 reg_val = &cntx->fp.d.fcsr;
493 } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
494 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
495 if (KVM_REG_SIZE(reg->id) != sizeof(u64))
497 reg_val = &cntx->fp.d.f[reg_num];
503 if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
509 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
510 const struct kvm_one_reg *reg)
512 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
513 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
514 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
515 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
516 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
517 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
518 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
519 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
520 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
521 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
523 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
524 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
530 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
531 const struct kvm_one_reg *reg)
533 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
534 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
535 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
536 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
537 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
538 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
539 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
540 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
541 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
542 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
544 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
545 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
551 long kvm_arch_vcpu_async_ioctl(struct file *filp,
552 unsigned int ioctl, unsigned long arg)
554 struct kvm_vcpu *vcpu = filp->private_data;
555 void __user *argp = (void __user *)arg;
557 if (ioctl == KVM_INTERRUPT) {
558 struct kvm_interrupt irq;
560 if (copy_from_user(&irq, argp, sizeof(irq)))
563 if (irq.irq == KVM_INTERRUPT_SET)
564 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
566 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
572 long kvm_arch_vcpu_ioctl(struct file *filp,
573 unsigned int ioctl, unsigned long arg)
575 struct kvm_vcpu *vcpu = filp->private_data;
576 void __user *argp = (void __user *)arg;
580 case KVM_SET_ONE_REG:
581 case KVM_GET_ONE_REG: {
582 struct kvm_one_reg reg;
585 if (copy_from_user(®, argp, sizeof(reg)))
588 if (ioctl == KVM_SET_ONE_REG)
589 r = kvm_riscv_vcpu_set_reg(vcpu, ®);
591 r = kvm_riscv_vcpu_get_reg(vcpu, ®);
601 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
602 struct kvm_sregs *sregs)
607 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
608 struct kvm_sregs *sregs)
613 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
618 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
623 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
624 struct kvm_translation *tr)
629 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
634 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
639 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
641 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
642 unsigned long mask, val;
644 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
645 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
646 val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
653 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
656 struct kvm_vcpu_arch *v = &vcpu->arch;
657 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
659 /* Read current HVIP and VSIE CSRs */
660 csr->vsie = csr_read(CSR_VSIE);
662 /* Sync-up HVIP.VSSIP bit changes does by Guest */
663 hvip = csr_read(CSR_HVIP);
664 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
665 if (hvip & (1UL << IRQ_VS_SOFT)) {
666 if (!test_and_set_bit(IRQ_VS_SOFT,
667 &v->irqs_pending_mask))
668 set_bit(IRQ_VS_SOFT, &v->irqs_pending);
670 if (!test_and_set_bit(IRQ_VS_SOFT,
671 &v->irqs_pending_mask))
672 clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
677 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
679 if (irq != IRQ_VS_SOFT &&
680 irq != IRQ_VS_TIMER &&
684 set_bit(irq, &vcpu->arch.irqs_pending);
685 smp_mb__before_atomic();
686 set_bit(irq, &vcpu->arch.irqs_pending_mask);
693 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
695 if (irq != IRQ_VS_SOFT &&
696 irq != IRQ_VS_TIMER &&
700 clear_bit(irq, &vcpu->arch.irqs_pending);
701 smp_mb__before_atomic();
702 set_bit(irq, &vcpu->arch.irqs_pending_mask);
707 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
709 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
710 << VSIP_TO_HVIP_SHIFT) & mask;
712 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
715 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
717 vcpu->arch.power_off = true;
718 kvm_make_request(KVM_REQ_SLEEP, vcpu);
722 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
724 vcpu->arch.power_off = false;
725 kvm_vcpu_wake_up(vcpu);
728 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
729 struct kvm_mp_state *mp_state)
731 if (vcpu->arch.power_off)
732 mp_state->mp_state = KVM_MP_STATE_STOPPED;
734 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
739 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
740 struct kvm_mp_state *mp_state)
744 switch (mp_state->mp_state) {
745 case KVM_MP_STATE_RUNNABLE:
746 vcpu->arch.power_off = false;
748 case KVM_MP_STATE_STOPPED:
749 kvm_riscv_vcpu_power_off(vcpu);
758 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
759 struct kvm_guest_debug *dbg)
761 /* TODO; To be implemented later. */
765 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
767 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
769 csr_write(CSR_VSSTATUS, csr->vsstatus);
770 csr_write(CSR_VSIE, csr->vsie);
771 csr_write(CSR_VSTVEC, csr->vstvec);
772 csr_write(CSR_VSSCRATCH, csr->vsscratch);
773 csr_write(CSR_VSEPC, csr->vsepc);
774 csr_write(CSR_VSCAUSE, csr->vscause);
775 csr_write(CSR_VSTVAL, csr->vstval);
776 csr_write(CSR_HVIP, csr->hvip);
777 csr_write(CSR_VSATP, csr->vsatp);
779 kvm_riscv_stage2_update_hgatp(vcpu);
781 kvm_riscv_vcpu_timer_restore(vcpu);
783 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
784 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
790 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
792 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
796 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
798 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
800 csr_write(CSR_HGATP, 0);
802 csr->vsstatus = csr_read(CSR_VSSTATUS);
803 csr->vsie = csr_read(CSR_VSIE);
804 csr->vstvec = csr_read(CSR_VSTVEC);
805 csr->vsscratch = csr_read(CSR_VSSCRATCH);
806 csr->vsepc = csr_read(CSR_VSEPC);
807 csr->vscause = csr_read(CSR_VSCAUSE);
808 csr->vstval = csr_read(CSR_VSTVAL);
809 csr->hvip = csr_read(CSR_HVIP);
810 csr->vsatp = csr_read(CSR_VSATP);
813 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
815 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
817 if (kvm_request_pending(vcpu)) {
818 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
819 rcuwait_wait_event(wait,
820 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
823 if (vcpu->arch.power_off || vcpu->arch.pause) {
825 * Awaken to handle a signal, request to
828 kvm_make_request(KVM_REQ_SLEEP, vcpu);
832 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
833 kvm_riscv_reset_vcpu(vcpu);
835 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
836 kvm_riscv_stage2_update_hgatp(vcpu);
838 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
839 __kvm_riscv_hfence_gvma_all();
843 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
845 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
847 csr_write(CSR_HVIP, csr->hvip);
850 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
853 struct kvm_cpu_trap trap;
854 struct kvm_run *run = vcpu->run;
856 /* Mark this VCPU ran at least once */
857 vcpu->arch.ran_atleast_once = true;
859 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
861 /* Process MMIO value returned from user-space */
862 if (run->exit_reason == KVM_EXIT_MMIO) {
863 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
865 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
870 /* Process SBI value returned from user-space */
871 if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
872 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
874 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
879 if (run->immediate_exit) {
880 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
886 kvm_sigset_activate(vcpu);
889 run->exit_reason = KVM_EXIT_UNKNOWN;
891 /* Check conditions before entering the guest */
894 kvm_riscv_stage2_vmid_update(vcpu);
896 kvm_riscv_check_vcpu_requests(vcpu);
903 * Exit if we have a signal pending so that we can deliver
904 * the signal to user space.
906 if (signal_pending(current)) {
908 run->exit_reason = KVM_EXIT_INTR;
912 * Ensure we set mode to IN_GUEST_MODE after we disable
913 * interrupts and before the final VCPU requests check.
914 * See the comment in kvm_vcpu_exiting_guest_mode() and
915 * Documentation/virtual/kvm/vcpu-requests.rst
917 vcpu->mode = IN_GUEST_MODE;
919 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
920 smp_mb__after_srcu_read_unlock();
923 * We might have got VCPU interrupts updated asynchronously
924 * so update it in HW.
926 kvm_riscv_vcpu_flush_interrupts(vcpu);
928 /* Update HVIP CSR for current CPU */
929 kvm_riscv_update_hvip(vcpu);
932 kvm_riscv_stage2_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
933 kvm_request_pending(vcpu)) {
934 vcpu->mode = OUTSIDE_GUEST_MODE;
937 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
941 guest_enter_irqoff();
943 __kvm_riscv_switch_to(&vcpu->arch);
945 vcpu->mode = OUTSIDE_GUEST_MODE;
949 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
950 * get an interrupt between __kvm_riscv_switch_to() and
951 * local_irq_enable() which can potentially change CSRs.
953 trap.sepc = vcpu->arch.guest_context.sepc;
954 trap.scause = csr_read(CSR_SCAUSE);
955 trap.stval = csr_read(CSR_STVAL);
956 trap.htval = csr_read(CSR_HTVAL);
957 trap.htinst = csr_read(CSR_HTINST);
959 /* Syncup interrupts state with HW */
960 kvm_riscv_vcpu_sync_interrupts(vcpu);
963 * We may have taken a host interrupt in VS/VU-mode (i.e.
964 * while executing the guest). This interrupt is still
965 * pending, as we haven't serviced it yet!
967 * We're now back in HS-mode with interrupts disabled
968 * so enabling the interrupts now will have the effect
969 * of taking the interrupt again, in HS-mode this time.
974 * We do local_irq_enable() before calling guest_exit() so
975 * that if a timer interrupt hits while running the guest
976 * we account that tick as being spent in the guest. We
977 * enable preemption after calling guest_exit() so that if
978 * we get preempted we make sure ticks after that is not
979 * counted as guest time.
985 vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
987 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
990 kvm_sigset_deactivate(vcpu);
994 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);