1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
6 * Anup Patel <anup.patel@wdc.com>
9 #include <linux/bitops.h>
10 #include <linux/entry-kvm.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/kdebug.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/sched/signal.h>
20 #include <linux/kvm_host.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
26 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
27 KVM_GENERIC_VCPU_STATS(),
28 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
29 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
30 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
31 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
32 STATS_DESC_COUNTER(VCPU, csr_exit_user),
33 STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
34 STATS_DESC_COUNTER(VCPU, signal_exits),
35 STATS_DESC_COUNTER(VCPU, exits)
38 const struct kvm_stats_header kvm_vcpu_stats_header = {
39 .name_size = KVM_STATS_NAME_SIZE,
40 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
41 .id_offset = sizeof(struct kvm_stats_header),
42 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
43 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
44 sizeof(kvm_vcpu_stats_desc),
47 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
49 #define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
51 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
52 static const unsigned long kvm_isa_ext_arr[] = {
53 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
54 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
55 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
56 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
57 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
58 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
59 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
61 KVM_ISA_EXT_ARR(SSTC),
62 KVM_ISA_EXT_ARR(SVINVAL),
63 KVM_ISA_EXT_ARR(SVPBMT),
64 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
65 KVM_ISA_EXT_ARR(ZICBOM),
68 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
72 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
73 if (kvm_isa_ext_arr[i] == base_ext)
77 return KVM_RISCV_ISA_EXT_MAX;
80 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
83 case KVM_RISCV_ISA_EXT_H:
92 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
95 case KVM_RISCV_ISA_EXT_A:
96 case KVM_RISCV_ISA_EXT_C:
97 case KVM_RISCV_ISA_EXT_I:
98 case KVM_RISCV_ISA_EXT_M:
99 case KVM_RISCV_ISA_EXT_SSTC:
100 case KVM_RISCV_ISA_EXT_SVINVAL:
101 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
110 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
112 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
113 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
114 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
115 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
119 * The preemption should be disabled here because it races with
120 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
121 * also calls vcpu_load/put.
124 loaded = (vcpu->cpu != -1);
126 kvm_arch_vcpu_put(vcpu);
128 vcpu->arch.last_exit_cpu = -1;
130 memcpy(csr, reset_csr, sizeof(*csr));
132 memcpy(cntx, reset_cntx, sizeof(*cntx));
134 kvm_riscv_vcpu_fp_reset(vcpu);
136 kvm_riscv_vcpu_timer_reset(vcpu);
138 WRITE_ONCE(vcpu->arch.irqs_pending, 0);
139 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
141 vcpu->arch.hfence_head = 0;
142 vcpu->arch.hfence_tail = 0;
143 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
145 /* Reset the guest CSRs for hotplug usecase */
147 kvm_arch_vcpu_load(vcpu, smp_processor_id());
151 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
156 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
158 struct kvm_cpu_context *cntx;
159 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
160 unsigned long host_isa, i;
162 /* Mark this VCPU never ran */
163 vcpu->arch.ran_atleast_once = false;
164 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
165 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
167 /* Setup ISA features available to VCPU */
168 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
169 host_isa = kvm_isa_ext_arr[i];
170 if (__riscv_isa_extension_available(NULL, host_isa) &&
171 kvm_riscv_vcpu_isa_enable_allowed(i))
172 set_bit(host_isa, vcpu->arch.isa);
175 /* Setup vendor, arch, and implementation details */
176 vcpu->arch.mvendorid = sbi_get_mvendorid();
177 vcpu->arch.marchid = sbi_get_marchid();
178 vcpu->arch.mimpid = sbi_get_mimpid();
180 /* Setup VCPU hfence queue */
181 spin_lock_init(&vcpu->arch.hfence_lock);
183 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
184 cntx = &vcpu->arch.guest_reset_context;
185 cntx->sstatus = SR_SPP | SR_SPIE;
187 cntx->hstatus |= HSTATUS_VTW;
188 cntx->hstatus |= HSTATUS_SPVP;
189 cntx->hstatus |= HSTATUS_SPV;
191 /* By default, make CY, TM, and IR counters accessible in VU mode */
192 reset_csr->scounteren = 0x7;
194 /* Setup VCPU timer */
195 kvm_riscv_vcpu_timer_init(vcpu);
198 kvm_riscv_reset_vcpu(vcpu);
203 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
206 * vcpu with id 0 is the designated boot cpu.
207 * Keep all vcpus with non-zero id in power-off state so that
208 * they can be brought up using SBI HSM extension.
210 if (vcpu->vcpu_idx != 0)
211 kvm_riscv_vcpu_power_off(vcpu);
214 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
216 /* Cleanup VCPU timer */
217 kvm_riscv_vcpu_timer_deinit(vcpu);
219 /* Free unused pages pre-allocated for G-stage page table mappings */
220 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
223 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
225 return kvm_riscv_vcpu_timer_pending(vcpu);
228 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
232 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
236 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
238 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
239 !vcpu->arch.power_off && !vcpu->arch.pause);
242 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
244 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
247 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
249 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
252 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
254 return VM_FAULT_SIGBUS;
257 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
258 const struct kvm_one_reg *reg)
260 unsigned long __user *uaddr =
261 (unsigned long __user *)(unsigned long)reg->addr;
262 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
264 KVM_REG_RISCV_CONFIG);
265 unsigned long reg_val;
267 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
271 case KVM_REG_RISCV_CONFIG_REG(isa):
272 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
274 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
275 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
277 reg_val = riscv_cbom_block_size;
279 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
280 reg_val = vcpu->arch.mvendorid;
282 case KVM_REG_RISCV_CONFIG_REG(marchid):
283 reg_val = vcpu->arch.marchid;
285 case KVM_REG_RISCV_CONFIG_REG(mimpid):
286 reg_val = vcpu->arch.mimpid;
292 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
298 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
299 const struct kvm_one_reg *reg)
301 unsigned long __user *uaddr =
302 (unsigned long __user *)(unsigned long)reg->addr;
303 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
305 KVM_REG_RISCV_CONFIG);
306 unsigned long i, isa_ext, reg_val;
308 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
311 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
315 case KVM_REG_RISCV_CONFIG_REG(isa):
317 * This ONE REG interface is only defined for
318 * single letter extensions.
320 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
323 if (!vcpu->arch.ran_atleast_once) {
324 /* Ignore the enable/disable request for certain extensions */
325 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
326 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
327 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
331 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
332 if (reg_val & BIT(i))
334 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
335 if (!(reg_val & BIT(i)))
338 reg_val &= riscv_isa_extension_base(NULL);
339 /* Do not modify anything beyond single letter extensions */
340 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
341 (reg_val & KVM_RISCV_BASE_ISA_MASK);
342 vcpu->arch.isa[0] = reg_val;
343 kvm_riscv_vcpu_fp_reset(vcpu);
348 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
350 case KVM_REG_RISCV_CONFIG_REG(mvendorid):
351 if (!vcpu->arch.ran_atleast_once)
352 vcpu->arch.mvendorid = reg_val;
356 case KVM_REG_RISCV_CONFIG_REG(marchid):
357 if (!vcpu->arch.ran_atleast_once)
358 vcpu->arch.marchid = reg_val;
362 case KVM_REG_RISCV_CONFIG_REG(mimpid):
363 if (!vcpu->arch.ran_atleast_once)
364 vcpu->arch.mimpid = reg_val;
375 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
376 const struct kvm_one_reg *reg)
378 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
379 unsigned long __user *uaddr =
380 (unsigned long __user *)(unsigned long)reg->addr;
381 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
384 unsigned long reg_val;
386 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
388 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
391 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
392 reg_val = cntx->sepc;
393 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
394 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
395 reg_val = ((unsigned long *)cntx)[reg_num];
396 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
397 reg_val = (cntx->sstatus & SR_SPP) ?
398 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
402 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
408 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
409 const struct kvm_one_reg *reg)
411 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
412 unsigned long __user *uaddr =
413 (unsigned long __user *)(unsigned long)reg->addr;
414 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
417 unsigned long reg_val;
419 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
421 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
424 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
427 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
428 cntx->sepc = reg_val;
429 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
430 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
431 ((unsigned long *)cntx)[reg_num] = reg_val;
432 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
433 if (reg_val == KVM_RISCV_MODE_S)
434 cntx->sstatus |= SR_SPP;
436 cntx->sstatus &= ~SR_SPP;
443 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
444 const struct kvm_one_reg *reg)
446 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
447 unsigned long __user *uaddr =
448 (unsigned long __user *)(unsigned long)reg->addr;
449 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
452 unsigned long reg_val;
454 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
456 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
459 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
460 kvm_riscv_vcpu_flush_interrupts(vcpu);
461 reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
463 reg_val = ((unsigned long *)csr)[reg_num];
465 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
471 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
472 const struct kvm_one_reg *reg)
474 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
475 unsigned long __user *uaddr =
476 (unsigned long __user *)(unsigned long)reg->addr;
477 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
480 unsigned long reg_val;
482 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
484 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
487 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
490 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
491 reg_val &= VSIP_VALID_MASK;
492 reg_val <<= VSIP_TO_HVIP_SHIFT;
495 ((unsigned long *)csr)[reg_num] = reg_val;
497 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
498 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
503 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
504 const struct kvm_one_reg *reg)
506 unsigned long __user *uaddr =
507 (unsigned long __user *)(unsigned long)reg->addr;
508 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
510 KVM_REG_RISCV_ISA_EXT);
511 unsigned long reg_val = 0;
512 unsigned long host_isa_ext;
514 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
517 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
518 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
521 host_isa_ext = kvm_isa_ext_arr[reg_num];
522 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
523 reg_val = 1; /* Mark the given extension as available */
525 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
531 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
532 const struct kvm_one_reg *reg)
534 unsigned long __user *uaddr =
535 (unsigned long __user *)(unsigned long)reg->addr;
536 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
538 KVM_REG_RISCV_ISA_EXT);
539 unsigned long reg_val;
540 unsigned long host_isa_ext;
542 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
545 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
546 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
549 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
552 host_isa_ext = kvm_isa_ext_arr[reg_num];
553 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
556 if (!vcpu->arch.ran_atleast_once) {
558 * All multi-letter extension and a few single letter
559 * extension can be disabled
562 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
563 set_bit(host_isa_ext, vcpu->arch.isa);
565 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
566 clear_bit(host_isa_ext, vcpu->arch.isa);
569 kvm_riscv_vcpu_fp_reset(vcpu);
577 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
578 const struct kvm_one_reg *reg)
580 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
581 case KVM_REG_RISCV_CONFIG:
582 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
583 case KVM_REG_RISCV_CORE:
584 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
585 case KVM_REG_RISCV_CSR:
586 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
587 case KVM_REG_RISCV_TIMER:
588 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
589 case KVM_REG_RISCV_FP_F:
590 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
592 case KVM_REG_RISCV_FP_D:
593 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
595 case KVM_REG_RISCV_ISA_EXT:
596 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
604 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
605 const struct kvm_one_reg *reg)
607 switch (reg->id & KVM_REG_RISCV_TYPE_MASK) {
608 case KVM_REG_RISCV_CONFIG:
609 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
610 case KVM_REG_RISCV_CORE:
611 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
612 case KVM_REG_RISCV_CSR:
613 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
614 case KVM_REG_RISCV_TIMER:
615 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
616 case KVM_REG_RISCV_FP_F:
617 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
619 case KVM_REG_RISCV_FP_D:
620 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
622 case KVM_REG_RISCV_ISA_EXT:
623 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
631 long kvm_arch_vcpu_async_ioctl(struct file *filp,
632 unsigned int ioctl, unsigned long arg)
634 struct kvm_vcpu *vcpu = filp->private_data;
635 void __user *argp = (void __user *)arg;
637 if (ioctl == KVM_INTERRUPT) {
638 struct kvm_interrupt irq;
640 if (copy_from_user(&irq, argp, sizeof(irq)))
643 if (irq.irq == KVM_INTERRUPT_SET)
644 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
646 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
652 long kvm_arch_vcpu_ioctl(struct file *filp,
653 unsigned int ioctl, unsigned long arg)
655 struct kvm_vcpu *vcpu = filp->private_data;
656 void __user *argp = (void __user *)arg;
660 case KVM_SET_ONE_REG:
661 case KVM_GET_ONE_REG: {
662 struct kvm_one_reg reg;
665 if (copy_from_user(®, argp, sizeof(reg)))
668 if (ioctl == KVM_SET_ONE_REG)
669 r = kvm_riscv_vcpu_set_reg(vcpu, ®);
671 r = kvm_riscv_vcpu_get_reg(vcpu, ®);
681 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
682 struct kvm_sregs *sregs)
687 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
688 struct kvm_sregs *sregs)
693 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
698 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
703 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
704 struct kvm_translation *tr)
709 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
714 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
719 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
721 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
722 unsigned long mask, val;
724 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
725 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
726 val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
733 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
736 struct kvm_vcpu_arch *v = &vcpu->arch;
737 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
739 /* Read current HVIP and VSIE CSRs */
740 csr->vsie = csr_read(CSR_VSIE);
742 /* Sync-up HVIP.VSSIP bit changes does by Guest */
743 hvip = csr_read(CSR_HVIP);
744 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
745 if (hvip & (1UL << IRQ_VS_SOFT)) {
746 if (!test_and_set_bit(IRQ_VS_SOFT,
747 &v->irqs_pending_mask))
748 set_bit(IRQ_VS_SOFT, &v->irqs_pending);
750 if (!test_and_set_bit(IRQ_VS_SOFT,
751 &v->irqs_pending_mask))
752 clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
756 /* Sync-up timer CSRs */
757 kvm_riscv_vcpu_timer_sync(vcpu);
760 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
762 if (irq != IRQ_VS_SOFT &&
763 irq != IRQ_VS_TIMER &&
767 set_bit(irq, &vcpu->arch.irqs_pending);
768 smp_mb__before_atomic();
769 set_bit(irq, &vcpu->arch.irqs_pending_mask);
776 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
778 if (irq != IRQ_VS_SOFT &&
779 irq != IRQ_VS_TIMER &&
783 clear_bit(irq, &vcpu->arch.irqs_pending);
784 smp_mb__before_atomic();
785 set_bit(irq, &vcpu->arch.irqs_pending_mask);
790 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
792 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
793 << VSIP_TO_HVIP_SHIFT) & mask;
795 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
798 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
800 vcpu->arch.power_off = true;
801 kvm_make_request(KVM_REQ_SLEEP, vcpu);
805 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
807 vcpu->arch.power_off = false;
808 kvm_vcpu_wake_up(vcpu);
811 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
812 struct kvm_mp_state *mp_state)
814 if (vcpu->arch.power_off)
815 mp_state->mp_state = KVM_MP_STATE_STOPPED;
817 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
822 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
823 struct kvm_mp_state *mp_state)
827 switch (mp_state->mp_state) {
828 case KVM_MP_STATE_RUNNABLE:
829 vcpu->arch.power_off = false;
831 case KVM_MP_STATE_STOPPED:
832 kvm_riscv_vcpu_power_off(vcpu);
841 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
842 struct kvm_guest_debug *dbg)
844 /* TODO; To be implemented later. */
848 static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
852 if (riscv_isa_extension_available(isa, SVPBMT))
853 henvcfg |= ENVCFG_PBMTE;
855 if (riscv_isa_extension_available(isa, SSTC))
856 henvcfg |= ENVCFG_STCE;
858 if (riscv_isa_extension_available(isa, ZICBOM))
859 henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
861 csr_write(CSR_HENVCFG, henvcfg);
863 csr_write(CSR_HENVCFGH, henvcfg >> 32);
867 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
869 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
871 csr_write(CSR_VSSTATUS, csr->vsstatus);
872 csr_write(CSR_VSIE, csr->vsie);
873 csr_write(CSR_VSTVEC, csr->vstvec);
874 csr_write(CSR_VSSCRATCH, csr->vsscratch);
875 csr_write(CSR_VSEPC, csr->vsepc);
876 csr_write(CSR_VSCAUSE, csr->vscause);
877 csr_write(CSR_VSTVAL, csr->vstval);
878 csr_write(CSR_HVIP, csr->hvip);
879 csr_write(CSR_VSATP, csr->vsatp);
881 kvm_riscv_vcpu_update_config(vcpu->arch.isa);
883 kvm_riscv_gstage_update_hgatp(vcpu);
885 kvm_riscv_vcpu_timer_restore(vcpu);
887 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
888 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
894 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
896 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
900 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
902 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
904 kvm_riscv_vcpu_timer_save(vcpu);
906 csr->vsstatus = csr_read(CSR_VSSTATUS);
907 csr->vsie = csr_read(CSR_VSIE);
908 csr->vstvec = csr_read(CSR_VSTVEC);
909 csr->vsscratch = csr_read(CSR_VSSCRATCH);
910 csr->vsepc = csr_read(CSR_VSEPC);
911 csr->vscause = csr_read(CSR_VSCAUSE);
912 csr->vstval = csr_read(CSR_VSTVAL);
913 csr->hvip = csr_read(CSR_HVIP);
914 csr->vsatp = csr_read(CSR_VSATP);
917 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
919 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
921 if (kvm_request_pending(vcpu)) {
922 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
923 kvm_vcpu_srcu_read_unlock(vcpu);
924 rcuwait_wait_event(wait,
925 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
927 kvm_vcpu_srcu_read_lock(vcpu);
929 if (vcpu->arch.power_off || vcpu->arch.pause) {
931 * Awaken to handle a signal, request to
934 kvm_make_request(KVM_REQ_SLEEP, vcpu);
938 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
939 kvm_riscv_reset_vcpu(vcpu);
941 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
942 kvm_riscv_gstage_update_hgatp(vcpu);
944 if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
945 kvm_riscv_fence_i_process(vcpu);
948 * The generic KVM_REQ_TLB_FLUSH is same as
949 * KVM_REQ_HFENCE_GVMA_VMID_ALL
951 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
952 kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
954 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
955 kvm_riscv_hfence_vvma_all_process(vcpu);
957 if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
958 kvm_riscv_hfence_process(vcpu);
962 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
964 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
966 csr_write(CSR_HVIP, csr->hvip);
970 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
971 * the vCPU is running.
973 * This must be noinstr as instrumentation may make use of RCU, and this is not
974 * safe during the EQS.
976 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
978 guest_state_enter_irqoff();
979 __kvm_riscv_switch_to(&vcpu->arch);
980 vcpu->arch.last_exit_cpu = vcpu->cpu;
981 guest_state_exit_irqoff();
984 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
987 struct kvm_cpu_trap trap;
988 struct kvm_run *run = vcpu->run;
990 /* Mark this VCPU ran at least once */
991 vcpu->arch.ran_atleast_once = true;
993 kvm_vcpu_srcu_read_lock(vcpu);
995 switch (run->exit_reason) {
997 /* Process MMIO value returned from user-space */
998 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
1000 case KVM_EXIT_RISCV_SBI:
1001 /* Process SBI value returned from user-space */
1002 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
1004 case KVM_EXIT_RISCV_CSR:
1005 /* Process CSR value returned from user-space */
1006 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
1013 kvm_vcpu_srcu_read_unlock(vcpu);
1017 if (run->immediate_exit) {
1018 kvm_vcpu_srcu_read_unlock(vcpu);
1024 kvm_sigset_activate(vcpu);
1027 run->exit_reason = KVM_EXIT_UNKNOWN;
1029 /* Check conditions before entering the guest */
1030 ret = xfer_to_guest_mode_handle_work(vcpu);
1035 kvm_riscv_gstage_vmid_update(vcpu);
1037 kvm_riscv_check_vcpu_requests(vcpu);
1039 local_irq_disable();
1042 * Ensure we set mode to IN_GUEST_MODE after we disable
1043 * interrupts and before the final VCPU requests check.
1044 * See the comment in kvm_vcpu_exiting_guest_mode() and
1045 * Documentation/virt/kvm/vcpu-requests.rst
1047 vcpu->mode = IN_GUEST_MODE;
1049 kvm_vcpu_srcu_read_unlock(vcpu);
1050 smp_mb__after_srcu_read_unlock();
1053 * We might have got VCPU interrupts updated asynchronously
1054 * so update it in HW.
1056 kvm_riscv_vcpu_flush_interrupts(vcpu);
1058 /* Update HVIP CSR for current CPU */
1059 kvm_riscv_update_hvip(vcpu);
1062 kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
1063 kvm_request_pending(vcpu) ||
1064 xfer_to_guest_mode_work_pending()) {
1065 vcpu->mode = OUTSIDE_GUEST_MODE;
1067 kvm_vcpu_srcu_read_lock(vcpu);
1072 * Cleanup stale TLB enteries
1074 * Note: This should be done after G-stage VMID has been
1075 * updated using kvm_riscv_gstage_vmid_ver_changed()
1077 kvm_riscv_local_tlb_sanitize(vcpu);
1079 guest_timing_enter_irqoff();
1081 kvm_riscv_vcpu_enter_exit(vcpu);
1083 vcpu->mode = OUTSIDE_GUEST_MODE;
1087 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1088 * get an interrupt between __kvm_riscv_switch_to() and
1089 * local_irq_enable() which can potentially change CSRs.
1091 trap.sepc = vcpu->arch.guest_context.sepc;
1092 trap.scause = csr_read(CSR_SCAUSE);
1093 trap.stval = csr_read(CSR_STVAL);
1094 trap.htval = csr_read(CSR_HTVAL);
1095 trap.htinst = csr_read(CSR_HTINST);
1097 /* Syncup interrupts state with HW */
1098 kvm_riscv_vcpu_sync_interrupts(vcpu);
1103 * We must ensure that any pending interrupts are taken before
1104 * we exit guest timing so that timer ticks are accounted as
1105 * guest time. Transiently unmask interrupts so that any
1106 * pending interrupts are taken.
1108 * There's no barrier which ensures that pending interrupts are
1109 * recognised, so we just hope that the CPU takes any pending
1110 * interrupts between the enable and disable.
1113 local_irq_disable();
1115 guest_timing_exit_irqoff();
1121 kvm_vcpu_srcu_read_lock(vcpu);
1123 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
1126 kvm_sigset_deactivate(vcpu);
1130 kvm_vcpu_srcu_read_unlock(vcpu);