Commit | Line | Data |
---|---|---|
b0c632db | 1 | /* |
a53c8fab | 2 | * definition for kvm on s390 |
b0c632db | 3 | * |
a53c8fab | 4 | * Copyright IBM Corp. 2008, 2009 |
b0c632db HC |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License (version 2 only) | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * Author(s): Carsten Otte <cotte@de.ibm.com> | |
11 | * Christian Borntraeger <borntraeger@de.ibm.com> | |
628eb9b8 | 12 | * Christian Ehrhardt <ehrhardt@de.ibm.com> |
b0c632db HC |
13 | */ |
14 | ||
15 | #ifndef ARCH_S390_KVM_S390_H | |
16 | #define ARCH_S390_KVM_S390_H | |
8f2abe6a | 17 | |
ca872302 | 18 | #include <linux/hrtimer.h> |
ba5c1e9b | 19 | #include <linux/kvm.h> |
8f2abe6a CB |
20 | #include <linux/kvm_host.h> |
21 | ||
22 | typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); | |
23 | ||
78c4b59f MM |
24 | /* declare vfacilities extern */ |
25 | extern unsigned long *vfacilities; | |
26 | ||
7feb6bb8 MM |
27 | /* Transactional Memory Execution related macros */ |
28 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) | |
7feb6bb8 MM |
29 | #define TDB_FORMAT1 1 |
30 | #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) | |
31 | ||
b0c632db HC |
32 | #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ |
33 | do { \ | |
34 | debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ | |
35 | d_args); \ | |
36 | } while (0) | |
37 | ||
38 | #define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\ | |
39 | do { \ | |
40 | debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \ | |
41 | "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \ | |
42 | d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\ | |
43 | d_args); \ | |
44 | } while (0) | |
ba5c1e9b | 45 | |
7a42fdc2 | 46 | static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) |
ba5c1e9b | 47 | { |
7a42fdc2 | 48 | return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; |
ba5c1e9b CO |
49 | } |
50 | ||
e08b9637 CO |
51 | static inline int kvm_is_ucontrol(struct kvm *kvm) |
52 | { | |
53 | #ifdef CONFIG_KVM_S390_UCONTROL | |
54 | if (kvm->arch.gmap) | |
55 | return 0; | |
56 | return 1; | |
57 | #else | |
58 | return 0; | |
59 | #endif | |
60 | } | |
8d26cf7b | 61 | |
fda902cb MM |
62 | #define GUEST_PREFIX_SHIFT 13 |
63 | static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu) | |
64 | { | |
65 | return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; | |
66 | } | |
67 | ||
8d26cf7b CB |
68 | static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) |
69 | { | |
fda902cb | 70 | vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; |
d3d692c8 | 71 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
2c70fe44 | 72 | kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); |
8d26cf7b CB |
73 | } |
74 | ||
b1c571a5 CH |
75 | static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) |
76 | { | |
0c29b229 CB |
77 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
78 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | |
b1c571a5 CH |
79 | |
80 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | |
81 | } | |
82 | ||
83 | static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, | |
84 | u64 *address1, u64 *address2) | |
85 | { | |
0c29b229 CB |
86 | u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; |
87 | u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; | |
88 | u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; | |
89 | u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff; | |
b1c571a5 CH |
90 | |
91 | *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; | |
92 | *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | |
93 | } | |
94 | ||
69d0d3a3 CB |
95 | static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) |
96 | { | |
ff7158b2 TH |
97 | if (r1) |
98 | *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20; | |
99 | if (r2) | |
100 | *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; | |
69d0d3a3 CB |
101 | } |
102 | ||
b1c571a5 CH |
103 | static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) |
104 | { | |
0c29b229 CB |
105 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
106 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + | |
b1c571a5 | 107 | ((vcpu->arch.sie_block->ipb & 0xff00) << 4); |
0c29b229 CB |
108 | /* The displacement is a 20bit _SIGNED_ value */ |
109 | if (disp2 & 0x80000) | |
110 | disp2+=0xfff00000; | |
b1c571a5 | 111 | |
0c29b229 | 112 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; |
b1c571a5 CH |
113 | } |
114 | ||
115 | static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) | |
116 | { | |
0c29b229 CB |
117 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
118 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | |
b1c571a5 CH |
119 | |
120 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | |
121 | } | |
122 | ||
ea828ebf TH |
123 | /* Set the condition code in the guest program status word */ |
124 | static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |
125 | { | |
126 | vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44); | |
127 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; | |
128 | } | |
129 | ||
6352e4d2 DH |
130 | /* are cpu states controlled by user space */ |
131 | static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) | |
132 | { | |
133 | return kvm->arch.user_cpu_state_ctrl != 0; | |
134 | } | |
135 | ||
ba5c1e9b | 136 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); |
0e9c85a5 | 137 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); |
ca872302 | 138 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); |
614aeab4 | 139 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); |
2ed10cc1 | 140 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); |
67335e63 | 141 | void kvm_s390_clear_float_irqs(struct kvm *kvm); |
db4a29cb HC |
142 | int __must_check kvm_s390_inject_vm(struct kvm *kvm, |
143 | struct kvm_s390_interrupt *s390int); | |
144 | int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |
383d0b05 | 145 | struct kvm_s390_irq *irq); |
db4a29cb | 146 | int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); |
fa6b7fe9 CH |
147 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
148 | u64 cr6, u64 schid); | |
2f32d4ea CH |
149 | void kvm_s390_reinject_io_int(struct kvm *kvm, |
150 | struct kvm_s390_interrupt_info *inti); | |
841b91c5 | 151 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); |
453423dc | 152 | |
04b41acd TH |
153 | /* implemented in intercept.c */ |
154 | void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc); | |
155 | int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); | |
156 | ||
453423dc | 157 | /* implemented in priv.c */ |
a3fb577e | 158 | int is_valid_psw(psw_t *psw); |
70455a36 | 159 | int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); |
bb25b9ba | 160 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); |
8c3f61e2 | 161 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu); |
48a3e950 CH |
162 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); |
163 | int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); | |
aba07508 | 164 | int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu); |
953ed88d TH |
165 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu); |
166 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); | |
5288fbf0 CB |
167 | |
168 | /* implemented in sigp.c */ | |
169 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); | |
4953919f | 170 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); |
5288fbf0 CB |
171 | |
172 | /* implemented in kvm-s390.c */ | |
fa576c58 | 173 | long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable); |
e879892c TH |
174 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); |
175 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); | |
6852d7b6 DH |
176 | void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); |
177 | void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); | |
49b99e1e CB |
178 | void s390_vcpu_block(struct kvm_vcpu *vcpu); |
179 | void s390_vcpu_unblock(struct kvm_vcpu *vcpu); | |
180 | void exit_sie(struct kvm_vcpu *vcpu); | |
181 | void exit_sie_sync(struct kvm_vcpu *vcpu); | |
b31605c1 DD |
182 | int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); |
183 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); | |
184 | /* is cmma enabled */ | |
185 | bool kvm_s390_cmma_enabled(struct kvm *kvm); | |
280ef0f1 | 186 | int test_vfacility(unsigned long nr); |
b31605c1 | 187 | |
e28acfea CB |
188 | /* implemented in diag.c */ |
189 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); | |
bcd84683 JF |
190 | /* implemented in interrupt.c */ |
191 | int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, | |
192 | struct kvm_s390_pgm_info *pgm_info); | |
e28acfea | 193 | |
1b0462e5 HC |
194 | /** |
195 | * kvm_s390_inject_prog_cond - conditionally inject a program check | |
196 | * @vcpu: virtual cpu | |
197 | * @rc: original return/error code | |
198 | * | |
199 | * This function is supposed to be used after regular guest access functions | |
200 | * failed, to conditionally inject a program check to a vcpu. The typical | |
201 | * pattern would look like | |
202 | * | |
203 | * rc = write_guest(vcpu, addr, data, len); | |
204 | * if (rc) | |
205 | * return kvm_s390_inject_prog_cond(vcpu, rc); | |
206 | * | |
207 | * A negative return code from guest access functions implies an internal error | |
208 | * like e.g. out of memory. In these cases no program check should be injected | |
209 | * to the guest. | |
210 | * A positive value implies that an exception happened while accessing a guest's | |
211 | * memory. In this case all data belonging to the corresponding program check | |
212 | * has been stored in vcpu->arch.pgm and can be injected with | |
213 | * kvm_s390_inject_prog_irq(). | |
214 | * | |
215 | * Returns: - the original @rc value if @rc was negative (internal error) | |
216 | * - zero if @rc was already zero | |
217 | * - zero or error code from injecting if @rc was positive | |
218 | * (program check injected to @vcpu) | |
219 | */ | |
220 | static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc) | |
221 | { | |
222 | if (rc <= 0) | |
223 | return rc; | |
224 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | |
225 | } | |
226 | ||
383d0b05 JF |
227 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, |
228 | struct kvm_s390_irq *s390irq); | |
229 | ||
3c038e6b | 230 | /* implemented in interrupt.c */ |
9a022067 | 231 | int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop); |
3c038e6b | 232 | int psw_extint_disabled(struct kvm_vcpu *vcpu); |
841b91c5 | 233 | void kvm_s390_destroy_adapters(struct kvm *kvm); |
4953919f | 234 | int kvm_s390_si_ext_call_pending(struct kvm_vcpu *vcpu); |
84877d93 | 235 | extern struct kvm_device_ops kvm_flic_ops; |
6cddd432 DH |
236 | int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); |
237 | void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); | |
3c038e6b | 238 | |
27291e21 DH |
239 | /* implemented in guestdbg.c */ |
240 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); | |
241 | void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu); | |
242 | void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu); | |
243 | int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, | |
244 | struct kvm_guest_debug *dbg); | |
245 | void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); | |
246 | void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); | |
247 | void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu); | |
248 | ||
b0c632db | 249 | #endif |