Commit | Line | Data |
---|---|---|
dfe3dc07 TZ |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * Copyright (C) 2020-2023 Loongson Technology Corporation Limited | |
4 | */ | |
5 | ||
6 | #ifndef __ASM_LOONGARCH_KVM_VCPU_H__ | |
7 | #define __ASM_LOONGARCH_KVM_VCPU_H__ | |
8 | ||
9 | #include <linux/kvm_host.h> | |
10 | #include <asm/loongarch.h> | |
11 | ||
12 | /* Controlled by 0x5 guest estat */ | |
13 | #define CPU_SIP0 (_ULCAST_(1)) | |
14 | #define CPU_SIP1 (_ULCAST_(1) << 1) | |
15 | #define CPU_PMU (_ULCAST_(1) << 10) | |
16 | #define CPU_TIMER (_ULCAST_(1) << 11) | |
17 | #define CPU_IPI (_ULCAST_(1) << 12) | |
18 | ||
19 | /* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */ | |
20 | #define CPU_IP0 (_ULCAST_(1)) | |
21 | #define CPU_IP1 (_ULCAST_(1) << 1) | |
22 | #define CPU_IP2 (_ULCAST_(1) << 2) | |
23 | #define CPU_IP3 (_ULCAST_(1) << 3) | |
24 | #define CPU_IP4 (_ULCAST_(1) << 4) | |
25 | #define CPU_IP5 (_ULCAST_(1) << 5) | |
26 | #define CPU_IP6 (_ULCAST_(1) << 6) | |
27 | #define CPU_IP7 (_ULCAST_(1) << 7) | |
28 | ||
29 | #define MNSEC_PER_SEC (NSEC_PER_SEC >> 20) | |
30 | ||
31 | /* KVM_IRQ_LINE irq field index values */ | |
32 | #define KVM_LOONGSON_IRQ_TYPE_SHIFT 24 | |
33 | #define KVM_LOONGSON_IRQ_TYPE_MASK 0xff | |
34 | #define KVM_LOONGSON_IRQ_VCPU_SHIFT 16 | |
35 | #define KVM_LOONGSON_IRQ_VCPU_MASK 0xff | |
36 | #define KVM_LOONGSON_IRQ_NUM_SHIFT 0 | |
37 | #define KVM_LOONGSON_IRQ_NUM_MASK 0xffff | |
38 | ||
39 | typedef union loongarch_instruction larch_inst; | |
40 | typedef int (*exit_handle_fn)(struct kvm_vcpu *); | |
41 | ||
42 | int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst); | |
43 | int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst); | |
44 | int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run); | |
45 | int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run); | |
46 | int kvm_emu_idle(struct kvm_vcpu *vcpu); | |
47 | int kvm_pending_timer(struct kvm_vcpu *vcpu); | |
48 | int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault); | |
49 | void kvm_deliver_intr(struct kvm_vcpu *vcpu); | |
50 | void kvm_deliver_exception(struct kvm_vcpu *vcpu); | |
51 | ||
52 | void kvm_own_fpu(struct kvm_vcpu *vcpu); | |
53 | void kvm_lose_fpu(struct kvm_vcpu *vcpu); | |
54 | void kvm_save_fpu(struct loongarch_fpu *fpu); | |
55 | void kvm_restore_fpu(struct loongarch_fpu *fpu); | |
56 | void kvm_restore_fcsr(struct loongarch_fpu *fpu); | |
57 | ||
db1ecca2 TZ |
58 | #ifdef CONFIG_CPU_HAS_LSX |
59 | int kvm_own_lsx(struct kvm_vcpu *vcpu); | |
60 | void kvm_save_lsx(struct loongarch_fpu *fpu); | |
61 | void kvm_restore_lsx(struct loongarch_fpu *fpu); | |
62 | #else | |
48ef9e87 | 63 | static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; } |
db1ecca2 TZ |
64 | static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } |
65 | static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } | |
118e10cd TZ |
66 | #endif |
67 | ||
68 | #ifdef CONFIG_CPU_HAS_LASX | |
69 | int kvm_own_lasx(struct kvm_vcpu *vcpu); | |
70 | void kvm_save_lasx(struct loongarch_fpu *fpu); | |
71 | void kvm_restore_lasx(struct loongarch_fpu *fpu); | |
72 | #else | |
48ef9e87 | 73 | static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; } |
118e10cd TZ |
74 | static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } |
75 | static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } | |
db1ecca2 TZ |
76 | #endif |
77 | ||
dfe3dc07 TZ |
78 | void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); |
79 | void kvm_reset_timer(struct kvm_vcpu *vcpu); | |
80 | void kvm_save_timer(struct kvm_vcpu *vcpu); | |
81 | void kvm_restore_timer(struct kvm_vcpu *vcpu); | |
82 | ||
83 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); | |
84 | ||
85 | /* | |
86 | * Loongarch KVM guest interrupt handling | |
87 | */ | |
88 | static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq) | |
89 | { | |
90 | set_bit(irq, &vcpu->arch.irq_pending); | |
91 | clear_bit(irq, &vcpu->arch.irq_clear); | |
92 | } | |
93 | ||
94 | static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq) | |
95 | { | |
96 | clear_bit(irq, &vcpu->arch.irq_pending); | |
97 | set_bit(irq, &vcpu->arch.irq_clear); | |
98 | } | |
99 | ||
100 | static inline int kvm_queue_exception(struct kvm_vcpu *vcpu, | |
101 | unsigned int code, unsigned int subcode) | |
102 | { | |
103 | /* only one exception can be injected */ | |
104 | if (!vcpu->arch.exception_pending) { | |
105 | set_bit(code, &vcpu->arch.exception_pending); | |
106 | vcpu->arch.esubcode = subcode; | |
107 | return 0; | |
108 | } else | |
109 | return -1; | |
110 | } | |
111 | ||
112 | #endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */ |