Commit | Line | Data |
---|---|---|
d809aa23 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
b0c632db | 2 | /* |
a53c8fab | 3 | * definition for kvm on s390 |
b0c632db | 4 | * |
3e6c5568 | 5 | * Copyright IBM Corp. 2008, 2020 |
b0c632db | 6 | * |
b0c632db HC |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Christian Borntraeger <borntraeger@de.ibm.com> | |
628eb9b8 | 9 | * Christian Ehrhardt <ehrhardt@de.ibm.com> |
b0c632db HC |
10 | */ |
11 | ||
12 | #ifndef ARCH_S390_KVM_S390_H | |
13 | #define ARCH_S390_KVM_S390_H | |
8f2abe6a | 14 | |
ca872302 | 15 | #include <linux/hrtimer.h> |
ba5c1e9b | 16 | #include <linux/kvm.h> |
8f2abe6a | 17 | #include <linux/kvm_host.h> |
29b40f10 | 18 | #include <linux/lockdep.h> |
9d8d5786 | 19 | #include <asm/facility.h> |
0e8bc06a | 20 | #include <asm/processor.h> |
a6940674 | 21 | #include <asm/sclp.h> |
8f2abe6a | 22 | |
413c98f2 CI |
23 | #define KVM_S390_UCONTROL_MEMSLOT (KVM_USER_MEM_SLOTS + 0) |
24 | ||
4a599328 JF |
25 | static inline void kvm_s390_fpu_store(struct kvm_run *run) |
26 | { | |
27 | fpu_stfpc(&run->s.regs.fpc); | |
28 | if (cpu_has_vx()) | |
29 | save_vx_regs((__vector128 *)&run->s.regs.vrs); | |
30 | else | |
31 | save_fp_regs((freg_t *)&run->s.regs.fprs); | |
32 | } | |
33 | ||
34 | static inline void kvm_s390_fpu_load(struct kvm_run *run) | |
35 | { | |
36 | fpu_lfpc_safe(&run->s.regs.fpc); | |
37 | if (cpu_has_vx()) | |
38 | load_vx_regs((__vector128 *)&run->s.regs.vrs); | |
39 | else | |
40 | load_fp_regs((freg_t *)&run->s.regs.fprs); | |
41 | } | |
42 | ||
7feb6bb8 | 43 | /* Transactional Memory Execution related macros */ |
0c9d8683 | 44 | #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) |
7feb6bb8 | 45 | #define TDB_FORMAT1 1 |
fe0ef003 NB |
46 | #define IS_ITDB_VALID(vcpu) \ |
47 | ((*(char *)phys_to_virt((vcpu)->arch.sie_block->itdba) == TDB_FORMAT1)) | |
7feb6bb8 | 48 | |
78f26131 | 49 | extern debug_info_t *kvm_s390_dbf; |
3e6c5568 JF |
50 | extern debug_info_t *kvm_s390_dbf_uv; |
51 | ||
52 | #define KVM_UV_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ | |
53 | do { \ | |
54 | debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, d_string "\n", \ | |
55 | d_args); \ | |
56 | debug_sprintf_event(kvm_s390_dbf_uv, d_loglevel, \ | |
57 | "%d: " d_string "\n", (d_kvm)->userspace_pid, \ | |
58 | d_args); \ | |
59 | } while (0) | |
60 | ||
78f26131 CB |
61 | #define KVM_EVENT(d_loglevel, d_string, d_args...)\ |
62 | do { \ | |
63 | debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \ | |
64 | d_args); \ | |
65 | } while (0) | |
66 | ||
b0c632db HC |
67 | #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ |
68 | do { \ | |
69 | debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ | |
70 | d_args); \ | |
71 | } while (0) | |
72 | ||
73 | #define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\ | |
74 | do { \ | |
75 | debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \ | |
76 | "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \ | |
77 | d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\ | |
78 | d_args); \ | |
79 | } while (0) | |
ba5c1e9b | 80 | |
2018224d DH |
81 | static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags) |
82 | { | |
83 | atomic_or(flags, &vcpu->arch.sie_block->cpuflags); | |
84 | } | |
85 | ||
9daecfc6 DH |
86 | static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags) |
87 | { | |
88 | atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags); | |
89 | } | |
90 | ||
8d5fb0dc DH |
91 | static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags) |
92 | { | |
93 | return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags; | |
94 | } | |
95 | ||
7a42fdc2 | 96 | static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) |
ba5c1e9b | 97 | { |
8d5fb0dc | 98 | return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED); |
ba5c1e9b CO |
99 | } |
100 | ||
5ebda316 DH |
101 | static inline int is_vcpu_idle(struct kvm_vcpu *vcpu) |
102 | { | |
4eeef242 | 103 | return test_bit(vcpu->vcpu_idx, vcpu->kvm->arch.idle_mask); |
5ebda316 DH |
104 | } |
105 | ||
e08b9637 CO |
106 | static inline int kvm_is_ucontrol(struct kvm *kvm) |
107 | { | |
108 | #ifdef CONFIG_KVM_S390_UCONTROL | |
109 | if (kvm->arch.gmap) | |
110 | return 0; | |
111 | return 1; | |
112 | #else | |
113 | return 0; | |
114 | #endif | |
115 | } | |
8d26cf7b | 116 | |
fda902cb MM |
117 | #define GUEST_PREFIX_SHIFT 13 |
118 | static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu) | |
119 | { | |
120 | return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT; | |
121 | } | |
122 | ||
8d26cf7b CB |
123 | static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix) |
124 | { | |
71db35d2 CB |
125 | VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id, |
126 | prefix); | |
fda902cb | 127 | vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT; |
d3d692c8 | 128 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
cc65c3a1 | 129 | kvm_make_request(KVM_REQ_REFRESH_GUEST_PREFIX, vcpu); |
8d26cf7b CB |
130 | } |
131 | ||
27f67f87 | 132 | static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, u8 *ar) |
b1c571a5 | 133 | { |
0c29b229 CB |
134 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
135 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | |
b1c571a5 | 136 | |
8ae04b8f AY |
137 | if (ar) |
138 | *ar = base2; | |
139 | ||
b1c571a5 CH |
140 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; |
141 | } | |
142 | ||
4c6abb7f CB |
143 | static inline u64 kvm_s390_get_base_disp_siy(struct kvm_vcpu *vcpu, u8 *ar) |
144 | { | |
145 | u32 base1 = vcpu->arch.sie_block->ipb >> 28; | |
146 | s64 disp1; | |
147 | ||
148 | /* The displacement is a 20bit _SIGNED_ value */ | |
149 | disp1 = sign_extend64(((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + | |
150 | ((vcpu->arch.sie_block->ipb & 0xff00) << 4), 19); | |
151 | ||
152 | if (ar) | |
153 | *ar = base1; | |
154 | ||
155 | return (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; | |
156 | } | |
157 | ||
b1c571a5 | 158 | static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, |
8ae04b8f | 159 | u64 *address1, u64 *address2, |
27f67f87 | 160 | u8 *ar_b1, u8 *ar_b2) |
b1c571a5 | 161 | { |
0c29b229 CB |
162 | u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; |
163 | u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; | |
164 | u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12; | |
165 | u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff; | |
b1c571a5 CH |
166 | |
167 | *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; | |
168 | *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; | |
8ae04b8f AY |
169 | |
170 | if (ar_b1) | |
171 | *ar_b1 = base1; | |
172 | if (ar_b2) | |
173 | *ar_b2 = base2; | |
b1c571a5 CH |
174 | } |
175 | ||
69d0d3a3 CB |
176 | static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) |
177 | { | |
ff7158b2 TH |
178 | if (r1) |
179 | *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20; | |
180 | if (r2) | |
181 | *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; | |
69d0d3a3 CB |
182 | } |
183 | ||
27f67f87 | 184 | static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, u8 *ar) |
b1c571a5 | 185 | { |
0c29b229 CB |
186 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
187 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + | |
b1c571a5 | 188 | ((vcpu->arch.sie_block->ipb & 0xff00) << 4); |
0c29b229 CB |
189 | /* The displacement is a 20bit _SIGNED_ value */ |
190 | if (disp2 & 0x80000) | |
191 | disp2+=0xfff00000; | |
b1c571a5 | 192 | |
8ae04b8f AY |
193 | if (ar) |
194 | *ar = base2; | |
195 | ||
0c29b229 | 196 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; |
b1c571a5 CH |
197 | } |
198 | ||
27f67f87 | 199 | static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, u8 *ar) |
b1c571a5 | 200 | { |
0c29b229 CB |
201 | u32 base2 = vcpu->arch.sie_block->ipb >> 28; |
202 | u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); | |
b1c571a5 | 203 | |
8ae04b8f AY |
204 | if (ar) |
205 | *ar = base2; | |
206 | ||
b1c571a5 CH |
207 | return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; |
208 | } | |
209 | ||
ea828ebf TH |
210 | /* Set the condition code in the guest program status word */ |
211 | static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |
212 | { | |
213 | vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44); | |
214 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; | |
215 | } | |
216 | ||
1e8d2424 | 217 | /* test availability of facility in a kvm instance */ |
9d8d5786 MM |
218 | static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) |
219 | { | |
c54f0d6a DH |
220 | return __test_facility(nr, kvm->arch.model.fac_mask) && |
221 | __test_facility(nr, kvm->arch.model.fac_list); | |
9d8d5786 MM |
222 | } |
223 | ||
18280d8b MM |
224 | static inline int set_kvm_facility(u64 *fac_list, unsigned long nr) |
225 | { | |
226 | unsigned char *ptr; | |
227 | ||
228 | if (nr >= MAX_FACILITY_BIT) | |
229 | return -EINVAL; | |
230 | ptr = (unsigned char *) fac_list + (nr >> 3); | |
231 | *ptr |= (0x80UL >> (nr & 7)); | |
232 | return 0; | |
233 | } | |
234 | ||
15c9705f DH |
235 | static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr) |
236 | { | |
237 | WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS); | |
238 | return test_bit_inv(nr, kvm->arch.cpu_feat); | |
239 | } | |
240 | ||
6352e4d2 DH |
241 | /* are cpu states controlled by user space */ |
242 | static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) | |
243 | { | |
244 | return kvm->arch.user_cpu_state_ctrl != 0; | |
245 | } | |
246 | ||
67cf68b6 EF |
247 | static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm) |
248 | { | |
249 | if (kvm->arch.user_cpu_state_ctrl) | |
250 | return; | |
251 | ||
252 | VM_EVENT(kvm, 3, "%s", "ENABLE: Userspace CPU state control"); | |
253 | kvm->arch.user_cpu_state_ctrl = 1; | |
254 | } | |
255 | ||
6a656832 MS |
256 | /* get the end gfn of the last (highest gfn) memslot */ |
257 | static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots) | |
258 | { | |
a54d8066 | 259 | struct rb_node *node; |
6a656832 MS |
260 | struct kvm_memory_slot *ms; |
261 | ||
a54d8066 | 262 | if (WARN_ON(kvm_memslots_empty(slots))) |
6a656832 MS |
263 | return 0; |
264 | ||
a54d8066 MS |
265 | node = rb_last(&slots->gfn_tree); |
266 | ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]); | |
6a656832 MS |
267 | return ms->base_gfn + ms->npages; |
268 | } | |
269 | ||
ee6a569d MM |
270 | static inline u32 kvm_s390_get_gisa_desc(struct kvm *kvm) |
271 | { | |
5a44bb06 MM |
272 | u32 gd; |
273 | ||
274 | if (!kvm->arch.gisa_int.origin) | |
275 | return 0; | |
276 | ||
277 | gd = virt_to_phys(kvm->arch.gisa_int.origin); | |
ee6a569d MM |
278 | |
279 | if (gd && sclp.has_gisaf) | |
280 | gd |= GISA_FORMAT1; | |
281 | return gd; | |
282 | } | |
283 | ||
d41993f7 CI |
284 | static inline hva_t gpa_to_hva(struct kvm *kvm, gpa_t gpa) |
285 | { | |
286 | hva_t hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); | |
287 | ||
288 | if (!kvm_is_error_hva(hva)) | |
289 | hva |= offset_in_page(gpa); | |
290 | return hva; | |
291 | } | |
292 | ||
29b40f10 JF |
293 | /* implemented in pv.c */ |
294 | int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); | |
295 | int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc); | |
fb491d55 CI |
296 | int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc); |
297 | int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc); | |
298 | int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc); | |
29b40f10 JF |
299 | int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc); |
300 | int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc); | |
301 | int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc, | |
302 | u16 *rrc); | |
303 | int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size, | |
304 | unsigned long tweak, u16 *rc, u16 *rrc); | |
fe28c786 | 305 | int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state); |
8aba0958 | 306 | int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc); |
0460eb35 JF |
307 | int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user, |
308 | u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc); | |
309 | int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user, | |
310 | u16 *rc, u16 *rrc); | |
d6c80978 CI |
311 | int kvm_s390_pv_destroy_page(struct kvm *kvm, unsigned long gaddr); |
312 | int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr); | |
313 | int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb); | |
29b40f10 JF |
314 | |
315 | static inline u64 kvm_s390_pv_get_handle(struct kvm *kvm) | |
316 | { | |
317 | return kvm->arch.pv.handle; | |
318 | } | |
319 | ||
320 | static inline u64 kvm_s390_pv_cpu_get_handle(struct kvm_vcpu *vcpu) | |
321 | { | |
322 | return vcpu->arch.pv.handle; | |
323 | } | |
324 | ||
d6c80978 CI |
325 | /** |
326 | * __kvm_s390_pv_destroy_page() - Destroy a guest page. | |
327 | * @page: the page to destroy | |
328 | * | |
329 | * An attempt will be made to destroy the given guest page. If the attempt | |
330 | * fails, an attempt is made to export the page. If both attempts fail, an | |
331 | * appropriate error is returned. | |
332 | * | |
333 | * Context: must be called holding the mm lock for gmap->mm | |
334 | */ | |
335 | static inline int __kvm_s390_pv_destroy_page(struct page *page) | |
336 | { | |
337 | struct folio *folio = page_folio(page); | |
338 | int rc; | |
339 | ||
340 | /* Large folios cannot be secure. Small folio implies FW_LEVEL_PTE. */ | |
341 | if (folio_test_large(folio)) | |
342 | return -EFAULT; | |
343 | ||
344 | rc = uv_destroy_folio(folio); | |
345 | /* | |
346 | * Fault handlers can race; it is possible that two CPUs will fault | |
347 | * on the same secure page. One CPU can destroy the page, reboot, | |
348 | * re-enter secure mode and import it, while the second CPU was | |
349 | * stuck at the beginning of the handler. At some point the second | |
350 | * CPU will be able to progress, and it will not be able to destroy | |
351 | * the page. In that case we do not want to terminate the process, | |
352 | * we instead try to export the page. | |
353 | */ | |
354 | if (rc) | |
355 | rc = uv_convert_from_secure_folio(folio); | |
356 | ||
357 | return rc; | |
358 | } | |
359 | ||
66933b78 | 360 | /* implemented in interrupt.c */ |
ba5c1e9b | 361 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); |
0e9c85a5 | 362 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); |
ca872302 | 363 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); |
614aeab4 | 364 | int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); |
2ed10cc1 | 365 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); |
67335e63 | 366 | void kvm_s390_clear_float_irqs(struct kvm *kvm); |
db4a29cb HC |
367 | int __must_check kvm_s390_inject_vm(struct kvm *kvm, |
368 | struct kvm_s390_interrupt *s390int); | |
369 | int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |
383d0b05 | 370 | struct kvm_s390_irq *irq); |
66933b78 DH |
371 | static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, |
372 | struct kvm_s390_pgm_info *pgm_info) | |
373 | { | |
374 | struct kvm_s390_irq irq = { | |
375 | .type = KVM_S390_PROGRAM_INT, | |
376 | .u.pgm = *pgm_info, | |
377 | }; | |
378 | ||
379 | return kvm_s390_inject_vcpu(vcpu, &irq); | |
380 | } | |
381 | static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | |
382 | { | |
383 | struct kvm_s390_irq irq = { | |
384 | .type = KVM_S390_PROGRAM_INT, | |
385 | .u.pgm.code = code, | |
386 | }; | |
387 | ||
388 | return kvm_s390_inject_vcpu(vcpu, &irq); | |
389 | } | |
fa6b7fe9 | 390 | struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm, |
6d3da241 | 391 | u64 isc_mask, u32 schid); |
15462e37 DH |
392 | int kvm_s390_reinject_io_int(struct kvm *kvm, |
393 | struct kvm_s390_interrupt_info *inti); | |
841b91c5 | 394 | int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked); |
453423dc | 395 | |
04b41acd | 396 | /* implemented in intercept.c */ |
0e8bc06a | 397 | u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu); |
04b41acd | 398 | int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); |
0e8bc06a DH |
399 | static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen) |
400 | { | |
401 | struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; | |
402 | ||
403 | sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen); | |
404 | } | |
405 | static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen) | |
406 | { | |
407 | kvm_s390_rewind_psw(vcpu, -ilen); | |
408 | } | |
409 | static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu) | |
410 | { | |
5ffe466c DH |
411 | /* don't inject PER events if we re-execute the instruction */ |
412 | vcpu->arch.sie_block->icptstatus &= ~0x02; | |
0e8bc06a DH |
413 | kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu)); |
414 | } | |
04b41acd | 415 | |
b7c92f1a QH |
416 | int handle_sthyi(struct kvm_vcpu *vcpu); |
417 | ||
453423dc | 418 | /* implemented in priv.c */ |
a3fb577e | 419 | int is_valid_psw(psw_t *psw); |
80cd8763 | 420 | int kvm_s390_handle_aa(struct kvm_vcpu *vcpu); |
70455a36 | 421 | int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); |
4e0b1ab7 | 422 | int kvm_s390_handle_e3(struct kvm_vcpu *vcpu); |
bb25b9ba | 423 | int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); |
8c3f61e2 | 424 | int kvm_s390_handle_01(struct kvm_vcpu *vcpu); |
48a3e950 CH |
425 | int kvm_s390_handle_b9(struct kvm_vcpu *vcpu); |
426 | int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu); | |
aba07508 | 427 | int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu); |
953ed88d TH |
428 | int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu); |
429 | int kvm_s390_handle_eb(struct kvm_vcpu *vcpu); | |
730cd632 | 430 | int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu); |
5288fbf0 | 431 | |
a3508fbe DH |
432 | /* implemented in vsie.c */ |
433 | int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu); | |
adbf1698 | 434 | void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu); |
a3508fbe DH |
435 | void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start, |
436 | unsigned long end); | |
437 | void kvm_s390_vsie_init(struct kvm *kvm); | |
438 | void kvm_s390_vsie_destroy(struct kvm *kvm); | |
d6c80978 CI |
439 | int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level); |
440 | ||
441 | /* implemented in gmap-vsie.c */ | |
442 | struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level); | |
a3508fbe | 443 | |
5288fbf0 CB |
444 | /* implemented in sigp.c */ |
445 | int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); | |
4953919f | 446 | int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu); |
5288fbf0 CB |
447 | |
448 | /* implemented in kvm-s390.c */ | |
c0573ba5 | 449 | int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); |
e879892c TH |
450 | int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr); |
451 | int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr); | |
fe28c786 JF |
452 | int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu); |
453 | int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu); | |
27406cd5 CB |
454 | void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu); |
455 | void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu); | |
9ea59728 | 456 | bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu); |
49b99e1e | 457 | void exit_sie(struct kvm_vcpu *vcpu); |
8e236546 | 458 | void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu); |
b31605c1 DD |
459 | int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu); |
460 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu); | |
4287f247 DH |
461 | void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm); |
462 | __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu); | |
be48d86f | 463 | int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rc, u16 *rrc); |
3762e905 | 464 | int __kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gfn_t gfn, gpa_t gaddr, unsigned int flags); |
c9f721ed CI |
465 | int __kvm_s390_mprotect_many(struct gmap *gmap, gpa_t gpa, u8 npages, unsigned int prot, |
466 | unsigned long bits); | |
3762e905 CI |
467 | |
468 | static inline int kvm_s390_handle_dat_fault(struct kvm_vcpu *vcpu, gpa_t gaddr, unsigned int flags) | |
469 | { | |
470 | return __kvm_s390_handle_dat_fault(vcpu, gpa_to_gfn(gaddr), gaddr, flags); | |
471 | } | |
b31605c1 | 472 | |
e28acfea CB |
473 | /* implemented in diag.c */ |
474 | int kvm_s390_handle_diag(struct kvm_vcpu *vcpu); | |
475 | ||
27406cd5 CB |
476 | static inline void kvm_s390_vcpu_block_all(struct kvm *kvm) |
477 | { | |
46808a4c | 478 | unsigned long i; |
27406cd5 CB |
479 | struct kvm_vcpu *vcpu; |
480 | ||
481 | WARN_ON(!mutex_is_locked(&kvm->lock)); | |
482 | kvm_for_each_vcpu(i, vcpu, kvm) | |
483 | kvm_s390_vcpu_block(vcpu); | |
484 | } | |
485 | ||
486 | static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm) | |
487 | { | |
46808a4c | 488 | unsigned long i; |
27406cd5 CB |
489 | struct kvm_vcpu *vcpu; |
490 | ||
491 | kvm_for_each_vcpu(i, vcpu, kvm) | |
492 | kvm_s390_vcpu_unblock(vcpu); | |
493 | } | |
494 | ||
60417fcc DH |
495 | static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm) |
496 | { | |
497 | u64 rc; | |
498 | ||
499 | preempt_disable(); | |
500 | rc = get_tod_clock_fast() + kvm->arch.epoch; | |
501 | preempt_enable(); | |
502 | return rc; | |
503 | } | |
504 | ||
1b0462e5 HC |
505 | /** |
506 | * kvm_s390_inject_prog_cond - conditionally inject a program check | |
507 | * @vcpu: virtual cpu | |
508 | * @rc: original return/error code | |
509 | * | |
510 | * This function is supposed to be used after regular guest access functions | |
511 | * failed, to conditionally inject a program check to a vcpu. The typical | |
512 | * pattern would look like | |
513 | * | |
514 | * rc = write_guest(vcpu, addr, data, len); | |
515 | * if (rc) | |
516 | * return kvm_s390_inject_prog_cond(vcpu, rc); | |
517 | * | |
518 | * A negative return code from guest access functions implies an internal error | |
519 | * like e.g. out of memory. In these cases no program check should be injected | |
520 | * to the guest. | |
521 | * A positive value implies that an exception happened while accessing a guest's | |
522 | * memory. In this case all data belonging to the corresponding program check | |
523 | * has been stored in vcpu->arch.pgm and can be injected with | |
524 | * kvm_s390_inject_prog_irq(). | |
525 | * | |
526 | * Returns: - the original @rc value if @rc was negative (internal error) | |
527 | * - zero if @rc was already zero | |
528 | * - zero or error code from injecting if @rc was positive | |
529 | * (program check injected to @vcpu) | |
530 | */ | |
531 | static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc) | |
532 | { | |
533 | if (rc <= 0) | |
534 | return rc; | |
535 | return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); | |
536 | } | |
537 | ||
383d0b05 JF |
538 | int s390int_to_s390irq(struct kvm_s390_interrupt *s390int, |
539 | struct kvm_s390_irq *s390irq); | |
540 | ||
3c038e6b | 541 | /* implemented in interrupt.c */ |
9a022067 | 542 | int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop); |
3c038e6b | 543 | int psw_extint_disabled(struct kvm_vcpu *vcpu); |
841b91c5 | 544 | void kvm_s390_destroy_adapters(struct kvm *kvm); |
ea5f4969 | 545 | int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu); |
84877d93 | 546 | extern struct kvm_device_ops kvm_flic_ops; |
6cddd432 | 547 | int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu); |
812de046 | 548 | int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu); |
6cddd432 | 549 | void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu); |
816c7667 JF |
550 | int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, |
551 | void __user *buf, int len); | |
552 | int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, | |
553 | __u8 __user *buf, int len); | |
d7c5cb01 MM |
554 | void kvm_s390_gisa_init(struct kvm *kvm); |
555 | void kvm_s390_gisa_clear(struct kvm *kvm); | |
556 | void kvm_s390_gisa_destroy(struct kvm *kvm); | |
ee6a569d MM |
557 | void kvm_s390_gisa_disable(struct kvm *kvm); |
558 | void kvm_s390_gisa_enable(struct kvm *kvm); | |
6c30cd2e | 559 | int __init kvm_s390_gib_init(u8 nisc); |
1282c21e | 560 | void kvm_s390_gib_destroy(void); |
3c038e6b | 561 | |
27291e21 DH |
562 | /* implemented in guestdbg.c */ |
563 | void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu); | |
564 | void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu); | |
565 | void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu); | |
566 | int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, | |
567 | struct kvm_guest_debug *dbg); | |
568 | void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu); | |
569 | void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu); | |
5ffe466c | 570 | int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu); |
a69cbe81 | 571 | int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu); |
27291e21 | 572 | |
60514510 ED |
573 | /* support for Basic/Extended SCA handling */ |
574 | static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm) | |
575 | { | |
bc784cce ED |
576 | struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */ |
577 | ||
578 | return &sca->ipte_control; | |
60514510 | 579 | } |
a6940674 DH |
580 | static inline int kvm_s390_use_sca_entries(void) |
581 | { | |
582 | /* | |
583 | * Without SIGP interpretation, only SRS interpretation (if available) | |
584 | * might use the entries. By not setting the entries and keeping them | |
585 | * invalid, hardware will not access them but intercept. | |
586 | */ | |
587 | return sclp.has_sigpif; | |
588 | } | |
4d62fcc0 QH |
589 | void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu, |
590 | struct mcck_volatile_info *mcck_info); | |
20c922f0 | 591 | |
05066caf CI |
592 | static inline bool kvm_s390_cur_gmap_fault_is_write(void) |
593 | { | |
594 | if (current->thread.gmap_int_code == PGM_PROTECTION) | |
595 | return true; | |
596 | return test_facility(75) && (current->thread.gmap_teid.fsi == TEID_FSI_STORE); | |
597 | } | |
598 | ||
20c922f0 TK |
599 | /** |
600 | * kvm_s390_vcpu_crypto_reset_all | |
601 | * | |
602 | * Reset the crypto attributes for each vcpu. This can be done while the vcpus | |
603 | * are running as each vcpu will be removed from SIE before resetting the crypt | |
604 | * attributes and restored to SIE afterward. | |
605 | * | |
606 | * Note: The kvm->lock must be held while calling this function | |
607 | * | |
608 | * @kvm: the KVM guest | |
609 | */ | |
610 | void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm); | |
87e28a15 | 611 | |
3f4bbb43 MR |
612 | /** |
613 | * kvm_s390_vcpu_pci_enable_interp | |
614 | * | |
615 | * Set the associated PCI attributes for each vcpu to allow for zPCI Load/Store | |
616 | * interpretation as well as adapter interruption forwarding. | |
617 | * | |
618 | * @kvm: the KVM guest | |
619 | */ | |
620 | void kvm_s390_vcpu_pci_enable_interp(struct kvm *kvm); | |
621 | ||
87e28a15 PM |
622 | /** |
623 | * diag9c_forwarding_hz | |
624 | * | |
625 | * Set the maximum number of diag9c forwarding per second | |
626 | */ | |
627 | extern unsigned int diag9c_forwarding_hz; | |
628 | ||
b0c632db | 629 | #endif |