KVM: add machine check counter to kvm_stat
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
d809aa23 1// SPDX-License-Identifier: GPL-2.0
b0c632db 2/*
bb64da9a 3 * hosting IBM Z kernel virtual machines (s390x)
b0c632db 4 *
a37cb07a 5 * Copyright IBM Corp. 2008, 2018
b0c632db
HC
6 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 10 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 11 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
12 */
13
14#include <linux/compiler.h>
15#include <linux/err.h>
16#include <linux/fs.h>
ca872302 17#include <linux/hrtimer.h>
b0c632db
HC
18#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
b2d73b2a 21#include <linux/mman.h>
b0c632db 22#include <linux/module.h>
d3217967 23#include <linux/moduleparam.h>
a374e892 24#include <linux/random.h>
b0c632db 25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
41408c28 27#include <linux/vmalloc.h>
15c9705f 28#include <linux/bitmap.h>
174cd4b1 29#include <linux/sched/signal.h>
190df4a2 30#include <linux/string.h>
174cd4b1 31
cbb870c8 32#include <asm/asm-offsets.h>
b0c632db 33#include <asm/lowcore.h>
fd5ada04 34#include <asm/stp.h>
b0c632db 35#include <asm/pgtable.h>
1e133ab2 36#include <asm/gmap.h>
f5daba1d 37#include <asm/nmi.h>
a0616cde 38#include <asm/switch_to.h>
6d3da241 39#include <asm/isc.h>
1526bf9c 40#include <asm/sclp.h>
0a763c78 41#include <asm/cpacf.h>
221bb8a4 42#include <asm/timex.h>
8f2abe6a 43#include "kvm-s390.h"
b0c632db
HC
44#include "gaccess.h"
45
ea2cdd27
DH
46#define KMSG_COMPONENT "kvm-s390"
47#undef pr_fmt
48#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
49
5786fffa
CH
50#define CREATE_TRACE_POINTS
51#include "trace.h"
ade38c31 52#include "trace-s390.h"
5786fffa 53
41408c28 54#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
55#define LOCAL_IRQS 32
56#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
57 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 58
b0c632db
HC
59#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
60
61struct kvm_stats_debugfs_item debugfs_entries[] = {
62 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 63 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
64 { "exit_validity", VCPU_STAT(exit_validity) },
65 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
66 { "exit_external_request", VCPU_STAT(exit_external_request) },
a5e0acea 67 { "exit_io_request", VCPU_STAT(exit_io_request) },
8f2abe6a 68 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b 69 { "exit_instruction", VCPU_STAT(exit_instruction) },
9ec6de19 70 { "exit_pei", VCPU_STAT(exit_pei) },
ba5c1e9b
CO
71 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
72 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
a011eeb2 73 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
f7819512 74 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 75 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 76 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ce2e4f0b 77 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 78 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 79 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
80 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
81 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 82 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 83 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
84 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
85 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
86 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
87 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
88 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
89 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
09a0fb67 90 { "deliver_io_interrupt", VCPU_STAT(deliver_io_int) },
32de0749 91 { "deliver_machine_check", VCPU_STAT(deliver_machine_check) },
ba5c1e9b 92 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
a37cb07a
CB
93 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
94 { "instruction_gs", VCPU_STAT(instruction_gs) },
95 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
96 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
97 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
69d0d3a3 98 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
a37cb07a 99 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
453423dc 100 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
a37cb07a
CB
101 { "instruction_sck", VCPU_STAT(instruction_sck) },
102 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
453423dc
CB
103 { "instruction_spx", VCPU_STAT(instruction_spx) },
104 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
105 { "instruction_stap", VCPU_STAT(instruction_stap) },
a37cb07a
CB
106 { "instruction_iske", VCPU_STAT(instruction_iske) },
107 { "instruction_ri", VCPU_STAT(instruction_ri) },
108 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
109 { "instruction_sske", VCPU_STAT(instruction_sske) },
8a242234 110 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
b31288fa 111 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
112 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
113 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
a37cb07a
CB
114 { "instruction_tb", VCPU_STAT(instruction_tb) },
115 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
bb25b9ba 116 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
a37cb07a 117 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
95ca2cb5 118 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
a3508fbe 119 { "instruction_sie", VCPU_STAT(instruction_sie) },
5288fbf0 120 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 121 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 122 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 123 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
124 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
125 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 126 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
127 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
128 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 129 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
130 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
131 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
132 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
133 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
134 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
135 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
866c138c
CB
136 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
137 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
138 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
139 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
140 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
141 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
a37cb07a 142 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
b0c632db
HC
143 { NULL }
144};
145
8fa1696e
CW
146struct kvm_s390_tod_clock_ext {
147 __u8 epoch_idx;
148 __u64 tod;
149 __u8 reserved[7];
150} __packed;
151
a411edf1
DH
152/* allow nested virtualization in KVM (if enabled by user space) */
153static int nested;
154module_param(nested, int, S_IRUGO);
155MODULE_PARM_DESC(nested, "Nested virtualization support");
156
b0c632db 157
c3b9e3e1
CB
158/*
159 * For now we handle at most 16 double words as this is what the s390 base
160 * kernel handles and stores in the prefix page. If we ever need to go beyond
161 * this, this requires changes to code, but the external uapi can stay.
162 */
163#define SIZE_INTERNAL 16
164
165/*
166 * Base feature mask that defines default mask for facilities. Consists of the
167 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
168 */
169static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
170/*
171 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
172 * and defines the facilities that can be enabled via a cpu model.
173 */
174static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
175
176static unsigned long kvm_s390_fac_size(void)
78c4b59f 177{
c3b9e3e1
CB
178 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
179 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
180 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
181 sizeof(S390_lowcore.stfle_fac_list));
182
183 return SIZE_INTERNAL;
78c4b59f
MM
184}
185
15c9705f
DH
186/* available cpu features supported by kvm */
187static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
0a763c78
DH
188/* available subfunctions indicated via query / "test bit" */
189static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
15c9705f 190
9d8d5786 191static struct gmap_notifier gmap_notifier;
a3508fbe 192static struct gmap_notifier vsie_gmap_notifier;
78f26131 193debug_info_t *kvm_s390_dbf;
9d8d5786 194
b0c632db 195/* Section: not file related */
13a34e06 196int kvm_arch_hardware_enable(void)
b0c632db
HC
197{
198 /* every s390 is virtualization enabled ;-) */
10474ae8 199 return 0;
b0c632db
HC
200}
201
414d3b07
MS
202static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
203 unsigned long end);
2c70fe44 204
1575767e
DH
205static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
206{
207 u8 delta_idx = 0;
208
209 /*
210 * The TOD jumps by delta, we have to compensate this by adding
211 * -delta to the epoch.
212 */
213 delta = -delta;
214
215 /* sign-extension - we're adding to signed values below */
216 if ((s64)delta < 0)
217 delta_idx = -1;
218
219 scb->epoch += delta;
220 if (scb->ecd & ECD_MEF) {
221 scb->epdx += delta_idx;
222 if (scb->epoch < delta)
223 scb->epdx += 1;
224 }
225}
226
fdf03650
FZ
227/*
228 * This callback is executed during stop_machine(). All CPUs are therefore
229 * temporarily stopped. In order not to change guest behavior, we have to
230 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
231 * so a CPU won't be stopped while calculating with the epoch.
232 */
233static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
234 void *v)
235{
236 struct kvm *kvm;
237 struct kvm_vcpu *vcpu;
238 int i;
239 unsigned long long *delta = v;
240
241 list_for_each_entry(kvm, &vm_list, vm_list) {
fdf03650 242 kvm_for_each_vcpu(i, vcpu, kvm) {
1575767e
DH
243 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
244 if (i == 0) {
245 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
246 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
247 }
db0758b2
DH
248 if (vcpu->arch.cputm_enabled)
249 vcpu->arch.cputm_start += *delta;
91473b48 250 if (vcpu->arch.vsie_block)
1575767e
DH
251 kvm_clock_sync_scb(vcpu->arch.vsie_block,
252 *delta);
fdf03650
FZ
253 }
254 }
255 return NOTIFY_OK;
256}
257
258static struct notifier_block kvm_clock_notifier = {
259 .notifier_call = kvm_clock_sync,
260};
261
b0c632db
HC
262int kvm_arch_hardware_setup(void)
263{
2c70fe44 264 gmap_notifier.notifier_call = kvm_gmap_notifier;
b2d73b2a 265 gmap_register_pte_notifier(&gmap_notifier);
a3508fbe
DH
266 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
267 gmap_register_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
268 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
269 &kvm_clock_notifier);
b0c632db
HC
270 return 0;
271}
272
273void kvm_arch_hardware_unsetup(void)
274{
b2d73b2a 275 gmap_unregister_pte_notifier(&gmap_notifier);
a3508fbe 276 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
277 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
278 &kvm_clock_notifier);
b0c632db
HC
279}
280
22be5a13
DH
281static void allow_cpu_feat(unsigned long nr)
282{
283 set_bit_inv(nr, kvm_s390_available_cpu_feat);
284}
285
0a763c78
DH
286static inline int plo_test_bit(unsigned char nr)
287{
288 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
d051ae53 289 int cc;
0a763c78
DH
290
291 asm volatile(
292 /* Parameter registers are ignored for "test bit" */
293 " plo 0,0,0,0(0)\n"
294 " ipm %0\n"
295 " srl %0,28\n"
296 : "=d" (cc)
297 : "d" (r0)
298 : "cc");
299 return cc == 0;
300}
301
22be5a13
DH
302static void kvm_s390_cpu_feat_init(void)
303{
0a763c78
DH
304 int i;
305
306 for (i = 0; i < 256; ++i) {
307 if (plo_test_bit(i))
308 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
309 }
310
311 if (test_facility(28)) /* TOD-clock steering */
221bb8a4
LT
312 ptff(kvm_s390_available_subfunc.ptff,
313 sizeof(kvm_s390_available_subfunc.ptff),
314 PTFF_QAF);
0a763c78
DH
315
316 if (test_facility(17)) { /* MSA */
69c0e360
MS
317 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
318 kvm_s390_available_subfunc.kmac);
319 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
320 kvm_s390_available_subfunc.kmc);
321 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
322 kvm_s390_available_subfunc.km);
323 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
324 kvm_s390_available_subfunc.kimd);
325 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
326 kvm_s390_available_subfunc.klmd);
0a763c78
DH
327 }
328 if (test_facility(76)) /* MSA3 */
69c0e360
MS
329 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
330 kvm_s390_available_subfunc.pckmo);
0a763c78 331 if (test_facility(77)) { /* MSA4 */
69c0e360
MS
332 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
333 kvm_s390_available_subfunc.kmctr);
334 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
335 kvm_s390_available_subfunc.kmf);
336 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
337 kvm_s390_available_subfunc.kmo);
338 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
339 kvm_s390_available_subfunc.pcc);
0a763c78
DH
340 }
341 if (test_facility(57)) /* MSA5 */
985a9d20 342 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
69c0e360 343 kvm_s390_available_subfunc.ppno);
0a763c78 344
e000b8e0
JH
345 if (test_facility(146)) /* MSA8 */
346 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
347 kvm_s390_available_subfunc.kma);
348
22be5a13
DH
349 if (MACHINE_HAS_ESOP)
350 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
a3508fbe
DH
351 /*
352 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
353 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
354 */
355 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
a411edf1 356 !test_facility(3) || !nested)
a3508fbe
DH
357 return;
358 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
19c439b5
DH
359 if (sclp.has_64bscao)
360 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
0615a326
DH
361 if (sclp.has_siif)
362 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
77d18f6d
DH
363 if (sclp.has_gpere)
364 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
a1b7b9b2
DH
365 if (sclp.has_gsls)
366 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
5630a8e8
DH
367 if (sclp.has_ib)
368 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
13ee3f67
DH
369 if (sclp.has_cei)
370 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
7fd7f39d
DH
371 if (sclp.has_ibs)
372 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
730cd632
FA
373 if (sclp.has_kss)
374 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
5d3876a8
DH
375 /*
376 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
377 * all skey handling functions read/set the skey from the PGSTE
378 * instead of the real storage key.
379 *
380 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
381 * pages being detected as preserved although they are resident.
382 *
383 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
384 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
385 *
386 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
387 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
388 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
389 *
390 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
391 * cannot easily shadow the SCA because of the ipte lock.
392 */
22be5a13
DH
393}
394
b0c632db
HC
395int kvm_arch_init(void *opaque)
396{
78f26131
CB
397 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
398 if (!kvm_s390_dbf)
399 return -ENOMEM;
400
401 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
402 debug_unregister(kvm_s390_dbf);
403 return -ENOMEM;
404 }
405
22be5a13
DH
406 kvm_s390_cpu_feat_init();
407
84877d93
CH
408 /* Register floating interrupt controller interface. */
409 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
410}
411
78f26131
CB
412void kvm_arch_exit(void)
413{
414 debug_unregister(kvm_s390_dbf);
415}
416
b0c632db
HC
417/* Section: device related */
418long kvm_arch_dev_ioctl(struct file *filp,
419 unsigned int ioctl, unsigned long arg)
420{
421 if (ioctl == KVM_S390_ENABLE_SIE)
422 return s390_enable_sie();
423 return -EINVAL;
424}
425
784aa3d7 426int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 427{
d7b0b5eb
CO
428 int r;
429
2bd0ac4e 430 switch (ext) {
d7b0b5eb 431 case KVM_CAP_S390_PSW:
b6cf8788 432 case KVM_CAP_S390_GMAP:
52e16b18 433 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
434#ifdef CONFIG_KVM_S390_UCONTROL
435 case KVM_CAP_S390_UCONTROL:
436#endif
3c038e6b 437 case KVM_CAP_ASYNC_PF:
60b413c9 438 case KVM_CAP_SYNC_REGS:
14eebd91 439 case KVM_CAP_ONE_REG:
d6712df9 440 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 441 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 442 case KVM_CAP_IOEVENTFD:
c05c4186 443 case KVM_CAP_DEVICE_CTRL:
d938dc55 444 case KVM_CAP_ENABLE_CAP_VM:
78599d90 445 case KVM_CAP_S390_IRQCHIP:
f2061656 446 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 447 case KVM_CAP_MP_STATE:
460df4c1 448 case KVM_CAP_IMMEDIATE_EXIT:
47b43c52 449 case KVM_CAP_S390_INJECT_IRQ:
2444b352 450 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 451 case KVM_CAP_S390_USER_STSI:
30ee2a98 452 case KVM_CAP_S390_SKEYS:
816c7667 453 case KVM_CAP_S390_IRQ_STATE:
6502a34c 454 case KVM_CAP_S390_USER_INSTR0:
4036e387 455 case KVM_CAP_S390_CMMA_MIGRATION:
47a4693e 456 case KVM_CAP_S390_AIS:
da9a1446 457 case KVM_CAP_S390_AIS_MIGRATION:
d7b0b5eb
CO
458 r = 1;
459 break;
41408c28
TH
460 case KVM_CAP_S390_MEM_OP:
461 r = MEM_OP_MAX_SIZE;
462 break;
e726b1bd
CB
463 case KVM_CAP_NR_VCPUS:
464 case KVM_CAP_MAX_VCPUS:
76a6dd72 465 r = KVM_S390_BSCA_CPU_SLOTS;
a6940674
DH
466 if (!kvm_s390_use_sca_entries())
467 r = KVM_MAX_VCPUS;
468 else if (sclp.has_esca && sclp.has_64bscao)
76a6dd72 469 r = KVM_S390_ESCA_CPU_SLOTS;
e726b1bd 470 break;
e1e2e605
NW
471 case KVM_CAP_NR_MEMSLOTS:
472 r = KVM_USER_MEM_SLOTS;
473 break;
1526bf9c 474 case KVM_CAP_S390_COW:
abf09bed 475 r = MACHINE_HAS_ESOP;
1526bf9c 476 break;
68c55750
EF
477 case KVM_CAP_S390_VECTOR_REGISTERS:
478 r = MACHINE_HAS_VX;
479 break;
c6e5f166
FZ
480 case KVM_CAP_S390_RI:
481 r = test_facility(64);
482 break;
4e0b1ab7
FZ
483 case KVM_CAP_S390_GS:
484 r = test_facility(133);
485 break;
35b3fde6
CB
486 case KVM_CAP_S390_BPB:
487 r = test_facility(82);
488 break;
2bd0ac4e 489 default:
d7b0b5eb 490 r = 0;
2bd0ac4e 491 }
d7b0b5eb 492 return r;
b0c632db
HC
493}
494
15f36ebd
JH
495static void kvm_s390_sync_dirty_log(struct kvm *kvm,
496 struct kvm_memory_slot *memslot)
497{
498 gfn_t cur_gfn, last_gfn;
499 unsigned long address;
500 struct gmap *gmap = kvm->arch.gmap;
501
15f36ebd
JH
502 /* Loop over all guest pages */
503 last_gfn = memslot->base_gfn + memslot->npages;
504 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
505 address = gfn_to_hva_memslot(memslot, cur_gfn);
506
1e133ab2 507 if (test_and_clear_guest_dirty(gmap->mm, address))
15f36ebd 508 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
509 if (fatal_signal_pending(current))
510 return;
70c88a00 511 cond_resched();
15f36ebd 512 }
15f36ebd
JH
513}
514
b0c632db 515/* Section: vm related */
a6e2f683
ED
516static void sca_del_vcpu(struct kvm_vcpu *vcpu);
517
b0c632db
HC
518/*
519 * Get (and clear) the dirty memory log for a memory slot.
520 */
521int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
522 struct kvm_dirty_log *log)
523{
15f36ebd
JH
524 int r;
525 unsigned long n;
9f6b8029 526 struct kvm_memslots *slots;
15f36ebd
JH
527 struct kvm_memory_slot *memslot;
528 int is_dirty = 0;
529
e1e8a962
JF
530 if (kvm_is_ucontrol(kvm))
531 return -EINVAL;
532
15f36ebd
JH
533 mutex_lock(&kvm->slots_lock);
534
535 r = -EINVAL;
536 if (log->slot >= KVM_USER_MEM_SLOTS)
537 goto out;
538
9f6b8029
PB
539 slots = kvm_memslots(kvm);
540 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
541 r = -ENOENT;
542 if (!memslot->dirty_bitmap)
543 goto out;
544
545 kvm_s390_sync_dirty_log(kvm, memslot);
546 r = kvm_get_dirty_log(kvm, log, &is_dirty);
547 if (r)
548 goto out;
549
550 /* Clear the dirty log */
551 if (is_dirty) {
552 n = kvm_dirty_bitmap_bytes(memslot);
553 memset(memslot->dirty_bitmap, 0, n);
554 }
555 r = 0;
556out:
557 mutex_unlock(&kvm->slots_lock);
558 return r;
b0c632db
HC
559}
560
6502a34c
DH
561static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
562{
563 unsigned int i;
564 struct kvm_vcpu *vcpu;
565
566 kvm_for_each_vcpu(i, vcpu, kvm) {
567 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
568 }
569}
570
d938dc55
CH
571static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
572{
573 int r;
574
575 if (cap->flags)
576 return -EINVAL;
577
578 switch (cap->cap) {
84223598 579 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 580 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
581 kvm->arch.use_irqchip = 1;
582 r = 0;
583 break;
2444b352 584 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 585 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
586 kvm->arch.user_sigp = 1;
587 r = 0;
588 break;
68c55750 589 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b 590 mutex_lock(&kvm->lock);
a03825bb 591 if (kvm->created_vcpus) {
5967c17b
DH
592 r = -EBUSY;
593 } else if (MACHINE_HAS_VX) {
c54f0d6a
DH
594 set_kvm_facility(kvm->arch.model.fac_mask, 129);
595 set_kvm_facility(kvm->arch.model.fac_list, 129);
2f87d942
GH
596 if (test_facility(134)) {
597 set_kvm_facility(kvm->arch.model.fac_mask, 134);
598 set_kvm_facility(kvm->arch.model.fac_list, 134);
599 }
53743aa7
MS
600 if (test_facility(135)) {
601 set_kvm_facility(kvm->arch.model.fac_mask, 135);
602 set_kvm_facility(kvm->arch.model.fac_list, 135);
603 }
18280d8b
MM
604 r = 0;
605 } else
606 r = -EINVAL;
5967c17b 607 mutex_unlock(&kvm->lock);
c92ea7b9
CB
608 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
609 r ? "(not available)" : "(success)");
68c55750 610 break;
c6e5f166
FZ
611 case KVM_CAP_S390_RI:
612 r = -EINVAL;
613 mutex_lock(&kvm->lock);
a03825bb 614 if (kvm->created_vcpus) {
c6e5f166
FZ
615 r = -EBUSY;
616 } else if (test_facility(64)) {
c54f0d6a
DH
617 set_kvm_facility(kvm->arch.model.fac_mask, 64);
618 set_kvm_facility(kvm->arch.model.fac_list, 64);
c6e5f166
FZ
619 r = 0;
620 }
621 mutex_unlock(&kvm->lock);
622 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
623 r ? "(not available)" : "(success)");
624 break;
47a4693e
YMZ
625 case KVM_CAP_S390_AIS:
626 mutex_lock(&kvm->lock);
627 if (kvm->created_vcpus) {
628 r = -EBUSY;
629 } else {
630 set_kvm_facility(kvm->arch.model.fac_mask, 72);
631 set_kvm_facility(kvm->arch.model.fac_list, 72);
47a4693e
YMZ
632 r = 0;
633 }
634 mutex_unlock(&kvm->lock);
635 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
636 r ? "(not available)" : "(success)");
637 break;
4e0b1ab7
FZ
638 case KVM_CAP_S390_GS:
639 r = -EINVAL;
640 mutex_lock(&kvm->lock);
241e3ec0 641 if (kvm->created_vcpus) {
4e0b1ab7
FZ
642 r = -EBUSY;
643 } else if (test_facility(133)) {
644 set_kvm_facility(kvm->arch.model.fac_mask, 133);
645 set_kvm_facility(kvm->arch.model.fac_list, 133);
646 r = 0;
647 }
648 mutex_unlock(&kvm->lock);
649 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
650 r ? "(not available)" : "(success)");
651 break;
e44fc8c9 652 case KVM_CAP_S390_USER_STSI:
c92ea7b9 653 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
654 kvm->arch.user_stsi = 1;
655 r = 0;
656 break;
6502a34c
DH
657 case KVM_CAP_S390_USER_INSTR0:
658 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
659 kvm->arch.user_instr0 = 1;
660 icpt_operexc_on_all_vcpus(kvm);
661 r = 0;
662 break;
d938dc55
CH
663 default:
664 r = -EINVAL;
665 break;
666 }
667 return r;
668}
669
8c0a7ce6
DD
670static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
671{
672 int ret;
673
674 switch (attr->attr) {
675 case KVM_S390_VM_MEM_LIMIT_SIZE:
676 ret = 0;
c92ea7b9 677 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
678 kvm->arch.mem_limit);
679 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
680 ret = -EFAULT;
681 break;
682 default:
683 ret = -ENXIO;
684 break;
685 }
686 return ret;
687}
688
689static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
690{
691 int ret;
692 unsigned int idx;
693 switch (attr->attr) {
694 case KVM_S390_VM_MEM_ENABLE_CMMA:
f9cbd9b0 695 ret = -ENXIO;
c24cc9c8 696 if (!sclp.has_cmma)
e6db1d61
DD
697 break;
698
4f718eab 699 ret = -EBUSY;
c92ea7b9 700 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab 701 mutex_lock(&kvm->lock);
a03825bb 702 if (!kvm->created_vcpus) {
4f718eab 703 kvm->arch.use_cmma = 1;
c9f0a2b8
JF
704 /* Not compatible with cmma. */
705 kvm->arch.use_pfmfi = 0;
4f718eab
DD
706 ret = 0;
707 }
708 mutex_unlock(&kvm->lock);
709 break;
710 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
711 ret = -ENXIO;
712 if (!sclp.has_cmma)
713 break;
c3489155
DD
714 ret = -EINVAL;
715 if (!kvm->arch.use_cmma)
716 break;
717
c92ea7b9 718 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
719 mutex_lock(&kvm->lock);
720 idx = srcu_read_lock(&kvm->srcu);
a13cff31 721 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
722 srcu_read_unlock(&kvm->srcu, idx);
723 mutex_unlock(&kvm->lock);
724 ret = 0;
725 break;
8c0a7ce6
DD
726 case KVM_S390_VM_MEM_LIMIT_SIZE: {
727 unsigned long new_limit;
728
729 if (kvm_is_ucontrol(kvm))
730 return -EINVAL;
731
732 if (get_user(new_limit, (u64 __user *)attr->addr))
733 return -EFAULT;
734
a3a92c31
DD
735 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
736 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
737 return -E2BIG;
738
a3a92c31
DD
739 if (!new_limit)
740 return -EINVAL;
741
6ea427bb 742 /* gmap_create takes last usable address */
a3a92c31
DD
743 if (new_limit != KVM_S390_NO_MEM_LIMIT)
744 new_limit -= 1;
745
8c0a7ce6
DD
746 ret = -EBUSY;
747 mutex_lock(&kvm->lock);
a03825bb 748 if (!kvm->created_vcpus) {
6ea427bb
MS
749 /* gmap_create will round the limit up */
750 struct gmap *new = gmap_create(current->mm, new_limit);
8c0a7ce6
DD
751
752 if (!new) {
753 ret = -ENOMEM;
754 } else {
6ea427bb 755 gmap_remove(kvm->arch.gmap);
8c0a7ce6
DD
756 new->private = kvm;
757 kvm->arch.gmap = new;
758 ret = 0;
759 }
760 }
761 mutex_unlock(&kvm->lock);
a3a92c31
DD
762 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
763 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
764 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
765 break;
766 }
4f718eab
DD
767 default:
768 ret = -ENXIO;
769 break;
770 }
771 return ret;
772}
773
a374e892
TK
774static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
775
776static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
777{
778 struct kvm_vcpu *vcpu;
779 int i;
780
9d8d5786 781 if (!test_kvm_facility(kvm, 76))
a374e892
TK
782 return -EINVAL;
783
784 mutex_lock(&kvm->lock);
785 switch (attr->attr) {
786 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
787 get_random_bytes(
788 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
789 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
790 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 791 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
792 break;
793 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
794 get_random_bytes(
795 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
796 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
797 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 798 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
799 break;
800 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
801 kvm->arch.crypto.aes_kw = 0;
802 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
803 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 804 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
805 break;
806 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
807 kvm->arch.crypto.dea_kw = 0;
808 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
809 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 810 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
811 break;
812 default:
813 mutex_unlock(&kvm->lock);
814 return -ENXIO;
815 }
816
817 kvm_for_each_vcpu(i, vcpu, kvm) {
818 kvm_s390_vcpu_crypto_setup(vcpu);
819 exit_sie(vcpu);
820 }
821 mutex_unlock(&kvm->lock);
822 return 0;
823}
824
190df4a2
CI
825static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
826{
827 int cx;
828 struct kvm_vcpu *vcpu;
829
830 kvm_for_each_vcpu(cx, vcpu, kvm)
831 kvm_s390_sync_request(req, vcpu);
832}
833
834/*
835 * Must be called with kvm->srcu held to avoid races on memslots, and with
1de1ea7e 836 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
190df4a2
CI
837 */
838static int kvm_s390_vm_start_migration(struct kvm *kvm)
839{
840 struct kvm_s390_migration_state *mgs;
841 struct kvm_memory_slot *ms;
842 /* should be the only one */
843 struct kvm_memslots *slots;
844 unsigned long ram_pages;
845 int slotnr;
846
847 /* migration mode already enabled */
848 if (kvm->arch.migration_state)
849 return 0;
850
851 slots = kvm_memslots(kvm);
852 if (!slots || !slots->used_slots)
853 return -EINVAL;
854
855 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
856 if (!mgs)
857 return -ENOMEM;
858 kvm->arch.migration_state = mgs;
859
860 if (kvm->arch.use_cmma) {
861 /*
32aa144f
CB
862 * Get the first slot. They are reverse sorted by base_gfn, so
863 * the first slot is also the one at the end of the address
864 * space. We have verified above that at least one slot is
865 * present.
190df4a2 866 */
32aa144f 867 ms = slots->memslots;
190df4a2
CI
868 /* round up so we only use full longs */
869 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
870 /* allocate enough bytes to store all the bits */
871 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
872 if (!mgs->pgste_bitmap) {
873 kfree(mgs);
874 kvm->arch.migration_state = NULL;
875 return -ENOMEM;
876 }
877
878 mgs->bitmap_size = ram_pages;
879 atomic64_set(&mgs->dirty_pages, ram_pages);
880 /* mark all the pages in active slots as dirty */
881 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
882 ms = slots->memslots + slotnr;
883 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
884 }
885
886 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
887 }
888 return 0;
889}
890
891/*
1de1ea7e 892 * Must be called with kvm->slots_lock to avoid races with ourselves and
190df4a2
CI
893 * kvm_s390_vm_start_migration.
894 */
895static int kvm_s390_vm_stop_migration(struct kvm *kvm)
896{
897 struct kvm_s390_migration_state *mgs;
898
899 /* migration mode already disabled */
900 if (!kvm->arch.migration_state)
901 return 0;
902 mgs = kvm->arch.migration_state;
903 kvm->arch.migration_state = NULL;
904
905 if (kvm->arch.use_cmma) {
906 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1de1ea7e
CB
907 /* We have to wait for the essa emulation to finish */
908 synchronize_srcu(&kvm->srcu);
190df4a2
CI
909 vfree(mgs->pgste_bitmap);
910 }
911 kfree(mgs);
912 return 0;
913}
914
915static int kvm_s390_vm_set_migration(struct kvm *kvm,
916 struct kvm_device_attr *attr)
917{
1de1ea7e 918 int res = -ENXIO;
190df4a2 919
1de1ea7e 920 mutex_lock(&kvm->slots_lock);
190df4a2
CI
921 switch (attr->attr) {
922 case KVM_S390_VM_MIGRATION_START:
190df4a2 923 res = kvm_s390_vm_start_migration(kvm);
190df4a2
CI
924 break;
925 case KVM_S390_VM_MIGRATION_STOP:
926 res = kvm_s390_vm_stop_migration(kvm);
927 break;
928 default:
929 break;
930 }
1de1ea7e 931 mutex_unlock(&kvm->slots_lock);
190df4a2
CI
932
933 return res;
934}
935
936static int kvm_s390_vm_get_migration(struct kvm *kvm,
937 struct kvm_device_attr *attr)
938{
939 u64 mig = (kvm->arch.migration_state != NULL);
940
941 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
942 return -ENXIO;
943
944 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
945 return -EFAULT;
946 return 0;
947}
948
8fa1696e
CW
949static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
950{
951 struct kvm_s390_vm_tod_clock gtod;
952
953 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
954 return -EFAULT;
955
0e7def5f 956 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
8fa1696e 957 return -EINVAL;
0e7def5f 958 kvm_s390_set_tod_clock(kvm, &gtod);
8fa1696e
CW
959
960 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
961 gtod.epoch_idx, gtod.tod);
962
963 return 0;
964}
965
72f25020
JH
966static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
967{
968 u8 gtod_high;
969
970 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
971 sizeof(gtod_high)))
972 return -EFAULT;
973
974 if (gtod_high != 0)
975 return -EINVAL;
58c383c6 976 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
977
978 return 0;
979}
980
981static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
982{
0e7def5f 983 struct kvm_s390_vm_tod_clock gtod = { 0 };
72f25020 984
0e7def5f
DH
985 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
986 sizeof(gtod.tod)))
72f25020
JH
987 return -EFAULT;
988
0e7def5f
DH
989 kvm_s390_set_tod_clock(kvm, &gtod);
990 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
72f25020
JH
991 return 0;
992}
993
994static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
995{
996 int ret;
997
998 if (attr->flags)
999 return -EINVAL;
1000
1001 switch (attr->attr) {
8fa1696e
CW
1002 case KVM_S390_VM_TOD_EXT:
1003 ret = kvm_s390_set_tod_ext(kvm, attr);
1004 break;
72f25020
JH
1005 case KVM_S390_VM_TOD_HIGH:
1006 ret = kvm_s390_set_tod_high(kvm, attr);
1007 break;
1008 case KVM_S390_VM_TOD_LOW:
1009 ret = kvm_s390_set_tod_low(kvm, attr);
1010 break;
1011 default:
1012 ret = -ENXIO;
1013 break;
1014 }
1015 return ret;
1016}
1017
8fa1696e
CW
1018static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
1019 struct kvm_s390_vm_tod_clock *gtod)
1020{
1021 struct kvm_s390_tod_clock_ext htod;
1022
1023 preempt_disable();
1024
1025 get_tod_clock_ext((char *)&htod);
1026
1027 gtod->tod = htod.tod + kvm->arch.epoch;
1028 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1029
1030 if (gtod->tod < htod.tod)
1031 gtod->epoch_idx += 1;
1032
1033 preempt_enable();
1034}
1035
1036static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1037{
1038 struct kvm_s390_vm_tod_clock gtod;
1039
1040 memset(&gtod, 0, sizeof(gtod));
1041
1042 if (test_kvm_facility(kvm, 139))
1043 kvm_s390_get_tod_clock_ext(kvm, &gtod);
1044 else
1045 gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
1046
1047 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1048 return -EFAULT;
1049
1050 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1051 gtod.epoch_idx, gtod.tod);
1052 return 0;
1053}
1054
72f25020
JH
1055static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1056{
1057 u8 gtod_high = 0;
1058
1059 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1060 sizeof(gtod_high)))
1061 return -EFAULT;
58c383c6 1062 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
1063
1064 return 0;
1065}
1066
1067static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1068{
5a3d883a 1069 u64 gtod;
72f25020 1070
60417fcc 1071 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
1072 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1073 return -EFAULT;
58c383c6 1074 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
1075
1076 return 0;
1077}
1078
1079static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1080{
1081 int ret;
1082
1083 if (attr->flags)
1084 return -EINVAL;
1085
1086 switch (attr->attr) {
8fa1696e
CW
1087 case KVM_S390_VM_TOD_EXT:
1088 ret = kvm_s390_get_tod_ext(kvm, attr);
1089 break;
72f25020
JH
1090 case KVM_S390_VM_TOD_HIGH:
1091 ret = kvm_s390_get_tod_high(kvm, attr);
1092 break;
1093 case KVM_S390_VM_TOD_LOW:
1094 ret = kvm_s390_get_tod_low(kvm, attr);
1095 break;
1096 default:
1097 ret = -ENXIO;
1098 break;
1099 }
1100 return ret;
1101}
1102
658b6eda
MM
1103static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1104{
1105 struct kvm_s390_vm_cpu_processor *proc;
053dd230 1106 u16 lowest_ibc, unblocked_ibc;
658b6eda
MM
1107 int ret = 0;
1108
1109 mutex_lock(&kvm->lock);
a03825bb 1110 if (kvm->created_vcpus) {
658b6eda
MM
1111 ret = -EBUSY;
1112 goto out;
1113 }
1114 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1115 if (!proc) {
1116 ret = -ENOMEM;
1117 goto out;
1118 }
1119 if (!copy_from_user(proc, (void __user *)attr->addr,
1120 sizeof(*proc))) {
9bb0ec09 1121 kvm->arch.model.cpuid = proc->cpuid;
053dd230
DH
1122 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1123 unblocked_ibc = sclp.ibc & 0xfff;
0487c44d 1124 if (lowest_ibc && proc->ibc) {
053dd230
DH
1125 if (proc->ibc > unblocked_ibc)
1126 kvm->arch.model.ibc = unblocked_ibc;
1127 else if (proc->ibc < lowest_ibc)
1128 kvm->arch.model.ibc = lowest_ibc;
1129 else
1130 kvm->arch.model.ibc = proc->ibc;
1131 }
c54f0d6a 1132 memcpy(kvm->arch.model.fac_list, proc->fac_list,
658b6eda 1133 S390_ARCH_FAC_LIST_SIZE_BYTE);
a8c39dd7
CB
1134 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1135 kvm->arch.model.ibc,
1136 kvm->arch.model.cpuid);
1137 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1138 kvm->arch.model.fac_list[0],
1139 kvm->arch.model.fac_list[1],
1140 kvm->arch.model.fac_list[2]);
658b6eda
MM
1141 } else
1142 ret = -EFAULT;
1143 kfree(proc);
1144out:
1145 mutex_unlock(&kvm->lock);
1146 return ret;
1147}
1148
15c9705f
DH
1149static int kvm_s390_set_processor_feat(struct kvm *kvm,
1150 struct kvm_device_attr *attr)
1151{
1152 struct kvm_s390_vm_cpu_feat data;
15c9705f
DH
1153
1154 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1155 return -EFAULT;
1156 if (!bitmap_subset((unsigned long *) data.feat,
1157 kvm_s390_available_cpu_feat,
1158 KVM_S390_VM_CPU_FEAT_NR_BITS))
1159 return -EINVAL;
1160
1161 mutex_lock(&kvm->lock);
2f8311c9
CB
1162 if (kvm->created_vcpus) {
1163 mutex_unlock(&kvm->lock);
1164 return -EBUSY;
15c9705f 1165 }
2f8311c9
CB
1166 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1167 KVM_S390_VM_CPU_FEAT_NR_BITS);
15c9705f 1168 mutex_unlock(&kvm->lock);
2f8311c9
CB
1169 VM_EVENT(kvm, 3, "SET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1170 data.feat[0],
1171 data.feat[1],
1172 data.feat[2]);
1173 return 0;
15c9705f
DH
1174}
1175
0a763c78
DH
1176static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1177 struct kvm_device_attr *attr)
1178{
1179 /*
1180 * Once supported by kernel + hw, we have to store the subfunctions
1181 * in kvm->arch and remember that user space configured them.
1182 */
1183 return -ENXIO;
1184}
1185
658b6eda
MM
1186static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1187{
1188 int ret = -ENXIO;
1189
1190 switch (attr->attr) {
1191 case KVM_S390_VM_CPU_PROCESSOR:
1192 ret = kvm_s390_set_processor(kvm, attr);
1193 break;
15c9705f
DH
1194 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1195 ret = kvm_s390_set_processor_feat(kvm, attr);
1196 break;
0a763c78
DH
1197 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1198 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1199 break;
658b6eda
MM
1200 }
1201 return ret;
1202}
1203
1204static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1205{
1206 struct kvm_s390_vm_cpu_processor *proc;
1207 int ret = 0;
1208
1209 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1210 if (!proc) {
1211 ret = -ENOMEM;
1212 goto out;
1213 }
9bb0ec09 1214 proc->cpuid = kvm->arch.model.cpuid;
658b6eda 1215 proc->ibc = kvm->arch.model.ibc;
c54f0d6a
DH
1216 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1217 S390_ARCH_FAC_LIST_SIZE_BYTE);
a8c39dd7
CB
1218 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1219 kvm->arch.model.ibc,
1220 kvm->arch.model.cpuid);
1221 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1222 kvm->arch.model.fac_list[0],
1223 kvm->arch.model.fac_list[1],
1224 kvm->arch.model.fac_list[2]);
658b6eda
MM
1225 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1226 ret = -EFAULT;
1227 kfree(proc);
1228out:
1229 return ret;
1230}
1231
1232static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1233{
1234 struct kvm_s390_vm_cpu_machine *mach;
1235 int ret = 0;
1236
1237 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1238 if (!mach) {
1239 ret = -ENOMEM;
1240 goto out;
1241 }
1242 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 1243 mach->ibc = sclp.ibc;
c54f0d6a 1244 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
981467c9 1245 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 1246 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
04478197 1247 sizeof(S390_lowcore.stfle_fac_list));
a8c39dd7
CB
1248 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1249 kvm->arch.model.ibc,
1250 kvm->arch.model.cpuid);
1251 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1252 mach->fac_mask[0],
1253 mach->fac_mask[1],
1254 mach->fac_mask[2]);
1255 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1256 mach->fac_list[0],
1257 mach->fac_list[1],
1258 mach->fac_list[2]);
658b6eda
MM
1259 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1260 ret = -EFAULT;
1261 kfree(mach);
1262out:
1263 return ret;
1264}
1265
15c9705f
DH
1266static int kvm_s390_get_processor_feat(struct kvm *kvm,
1267 struct kvm_device_attr *attr)
1268{
1269 struct kvm_s390_vm_cpu_feat data;
1270
1271 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1272 KVM_S390_VM_CPU_FEAT_NR_BITS);
1273 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1274 return -EFAULT;
2f8311c9
CB
1275 VM_EVENT(kvm, 3, "GET: guest feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1276 data.feat[0],
1277 data.feat[1],
1278 data.feat[2]);
15c9705f
DH
1279 return 0;
1280}
1281
1282static int kvm_s390_get_machine_feat(struct kvm *kvm,
1283 struct kvm_device_attr *attr)
1284{
1285 struct kvm_s390_vm_cpu_feat data;
1286
1287 bitmap_copy((unsigned long *) data.feat,
1288 kvm_s390_available_cpu_feat,
1289 KVM_S390_VM_CPU_FEAT_NR_BITS);
1290 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1291 return -EFAULT;
2f8311c9
CB
1292 VM_EVENT(kvm, 3, "GET: host feat: 0x%16.16llx.0x%16.16llx.0x%16.16llx",
1293 data.feat[0],
1294 data.feat[1],
1295 data.feat[2]);
15c9705f
DH
1296 return 0;
1297}
1298
0a763c78
DH
1299static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1300 struct kvm_device_attr *attr)
1301{
1302 /*
1303 * Once we can actually configure subfunctions (kernel + hw support),
1304 * we have to check if they were already set by user space, if so copy
1305 * them from kvm->arch.
1306 */
1307 return -ENXIO;
1308}
1309
1310static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1311 struct kvm_device_attr *attr)
1312{
1313 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1314 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1315 return -EFAULT;
1316 return 0;
1317}
658b6eda
MM
1318static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1319{
1320 int ret = -ENXIO;
1321
1322 switch (attr->attr) {
1323 case KVM_S390_VM_CPU_PROCESSOR:
1324 ret = kvm_s390_get_processor(kvm, attr);
1325 break;
1326 case KVM_S390_VM_CPU_MACHINE:
1327 ret = kvm_s390_get_machine(kvm, attr);
1328 break;
15c9705f
DH
1329 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1330 ret = kvm_s390_get_processor_feat(kvm, attr);
1331 break;
1332 case KVM_S390_VM_CPU_MACHINE_FEAT:
1333 ret = kvm_s390_get_machine_feat(kvm, attr);
1334 break;
0a763c78
DH
1335 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1336 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1337 break;
1338 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1339 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1340 break;
658b6eda
MM
1341 }
1342 return ret;
1343}
1344
f2061656
DD
1345static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1346{
1347 int ret;
1348
1349 switch (attr->group) {
4f718eab 1350 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 1351 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 1352 break;
72f25020
JH
1353 case KVM_S390_VM_TOD:
1354 ret = kvm_s390_set_tod(kvm, attr);
1355 break;
658b6eda
MM
1356 case KVM_S390_VM_CPU_MODEL:
1357 ret = kvm_s390_set_cpu_model(kvm, attr);
1358 break;
a374e892
TK
1359 case KVM_S390_VM_CRYPTO:
1360 ret = kvm_s390_vm_set_crypto(kvm, attr);
1361 break;
190df4a2
CI
1362 case KVM_S390_VM_MIGRATION:
1363 ret = kvm_s390_vm_set_migration(kvm, attr);
1364 break;
f2061656
DD
1365 default:
1366 ret = -ENXIO;
1367 break;
1368 }
1369
1370 return ret;
1371}
1372
1373static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1374{
8c0a7ce6
DD
1375 int ret;
1376
1377 switch (attr->group) {
1378 case KVM_S390_VM_MEM_CTRL:
1379 ret = kvm_s390_get_mem_control(kvm, attr);
1380 break;
72f25020
JH
1381 case KVM_S390_VM_TOD:
1382 ret = kvm_s390_get_tod(kvm, attr);
1383 break;
658b6eda
MM
1384 case KVM_S390_VM_CPU_MODEL:
1385 ret = kvm_s390_get_cpu_model(kvm, attr);
1386 break;
190df4a2
CI
1387 case KVM_S390_VM_MIGRATION:
1388 ret = kvm_s390_vm_get_migration(kvm, attr);
1389 break;
8c0a7ce6
DD
1390 default:
1391 ret = -ENXIO;
1392 break;
1393 }
1394
1395 return ret;
f2061656
DD
1396}
1397
1398static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1399{
1400 int ret;
1401
1402 switch (attr->group) {
4f718eab
DD
1403 case KVM_S390_VM_MEM_CTRL:
1404 switch (attr->attr) {
1405 case KVM_S390_VM_MEM_ENABLE_CMMA:
1406 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
1407 ret = sclp.has_cmma ? 0 : -ENXIO;
1408 break;
8c0a7ce6 1409 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
1410 ret = 0;
1411 break;
1412 default:
1413 ret = -ENXIO;
1414 break;
1415 }
1416 break;
72f25020
JH
1417 case KVM_S390_VM_TOD:
1418 switch (attr->attr) {
1419 case KVM_S390_VM_TOD_LOW:
1420 case KVM_S390_VM_TOD_HIGH:
1421 ret = 0;
1422 break;
1423 default:
1424 ret = -ENXIO;
1425 break;
1426 }
1427 break;
658b6eda
MM
1428 case KVM_S390_VM_CPU_MODEL:
1429 switch (attr->attr) {
1430 case KVM_S390_VM_CPU_PROCESSOR:
1431 case KVM_S390_VM_CPU_MACHINE:
15c9705f
DH
1432 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1433 case KVM_S390_VM_CPU_MACHINE_FEAT:
0a763c78 1434 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
658b6eda
MM
1435 ret = 0;
1436 break;
0a763c78
DH
1437 /* configuring subfunctions is not supported yet */
1438 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
658b6eda
MM
1439 default:
1440 ret = -ENXIO;
1441 break;
1442 }
1443 break;
a374e892
TK
1444 case KVM_S390_VM_CRYPTO:
1445 switch (attr->attr) {
1446 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1447 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1448 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1449 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1450 ret = 0;
1451 break;
1452 default:
1453 ret = -ENXIO;
1454 break;
1455 }
1456 break;
190df4a2
CI
1457 case KVM_S390_VM_MIGRATION:
1458 ret = 0;
1459 break;
f2061656
DD
1460 default:
1461 ret = -ENXIO;
1462 break;
1463 }
1464
1465 return ret;
1466}
1467
30ee2a98
JH
1468static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1469{
1470 uint8_t *keys;
1471 uint64_t hva;
4f899147 1472 int srcu_idx, i, r = 0;
30ee2a98
JH
1473
1474 if (args->flags != 0)
1475 return -EINVAL;
1476
1477 /* Is this guest using storage keys? */
1478 if (!mm_use_skey(current->mm))
1479 return KVM_S390_GET_SKEYS_NONE;
1480
1481 /* Enforce sane limit on memory allocation */
1482 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1483 return -EINVAL;
1484
752ade68 1485 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
30ee2a98
JH
1486 if (!keys)
1487 return -ENOMEM;
1488
d3ed1cee 1489 down_read(&current->mm->mmap_sem);
4f899147 1490 srcu_idx = srcu_read_lock(&kvm->srcu);
30ee2a98
JH
1491 for (i = 0; i < args->count; i++) {
1492 hva = gfn_to_hva(kvm, args->start_gfn + i);
1493 if (kvm_is_error_hva(hva)) {
1494 r = -EFAULT;
d3ed1cee 1495 break;
30ee2a98
JH
1496 }
1497
154c8c19
DH
1498 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1499 if (r)
d3ed1cee 1500 break;
30ee2a98 1501 }
4f899147 1502 srcu_read_unlock(&kvm->srcu, srcu_idx);
d3ed1cee
MS
1503 up_read(&current->mm->mmap_sem);
1504
1505 if (!r) {
1506 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1507 sizeof(uint8_t) * args->count);
1508 if (r)
1509 r = -EFAULT;
30ee2a98
JH
1510 }
1511
30ee2a98
JH
1512 kvfree(keys);
1513 return r;
1514}
1515
1516static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1517{
1518 uint8_t *keys;
1519 uint64_t hva;
4f899147 1520 int srcu_idx, i, r = 0;
30ee2a98
JH
1521
1522 if (args->flags != 0)
1523 return -EINVAL;
1524
1525 /* Enforce sane limit on memory allocation */
1526 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1527 return -EINVAL;
1528
752ade68 1529 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
30ee2a98
JH
1530 if (!keys)
1531 return -ENOMEM;
1532
1533 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1534 sizeof(uint8_t) * args->count);
1535 if (r) {
1536 r = -EFAULT;
1537 goto out;
1538 }
1539
1540 /* Enable storage key handling for the guest */
14d4a425
DD
1541 r = s390_enable_skey();
1542 if (r)
1543 goto out;
30ee2a98 1544
d3ed1cee 1545 down_read(&current->mm->mmap_sem);
4f899147 1546 srcu_idx = srcu_read_lock(&kvm->srcu);
30ee2a98
JH
1547 for (i = 0; i < args->count; i++) {
1548 hva = gfn_to_hva(kvm, args->start_gfn + i);
1549 if (kvm_is_error_hva(hva)) {
1550 r = -EFAULT;
d3ed1cee 1551 break;
30ee2a98
JH
1552 }
1553
1554 /* Lowest order bit is reserved */
1555 if (keys[i] & 0x01) {
1556 r = -EINVAL;
d3ed1cee 1557 break;
30ee2a98
JH
1558 }
1559
fe69eabf 1560 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
30ee2a98 1561 if (r)
d3ed1cee 1562 break;
30ee2a98 1563 }
4f899147 1564 srcu_read_unlock(&kvm->srcu, srcu_idx);
d3ed1cee 1565 up_read(&current->mm->mmap_sem);
30ee2a98
JH
1566out:
1567 kvfree(keys);
1568 return r;
1569}
1570
4036e387
CI
1571/*
1572 * Base address and length must be sent at the start of each block, therefore
1573 * it's cheaper to send some clean data, as long as it's less than the size of
1574 * two longs.
1575 */
1576#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1577/* for consistency */
1578#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1579
1580/*
1581 * This function searches for the next page with dirty CMMA attributes, and
1582 * saves the attributes in the buffer up to either the end of the buffer or
1583 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1584 * no trailing clean bytes are saved.
1585 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1586 * output buffer will indicate 0 as length.
1587 */
1588static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1589 struct kvm_s390_cmma_log *args)
1590{
1591 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1592 unsigned long bufsize, hva, pgstev, i, next, cur;
1593 int srcu_idx, peek, r = 0, rr;
1594 u8 *res;
1595
1596 cur = args->start_gfn;
1597 i = next = pgstev = 0;
1598
1599 if (unlikely(!kvm->arch.use_cmma))
1600 return -ENXIO;
1601 /* Invalid/unsupported flags were specified */
1602 if (args->flags & ~KVM_S390_CMMA_PEEK)
1603 return -EINVAL;
1604 /* Migration mode query, and we are not doing a migration */
1605 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1606 if (!peek && !s)
1607 return -EINVAL;
1608 /* CMMA is disabled or was not used, or the buffer has length zero */
1609 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
c9f0a2b8 1610 if (!bufsize || !kvm->mm->context.uses_cmm) {
4036e387
CI
1611 memset(args, 0, sizeof(*args));
1612 return 0;
1613 }
1614
1615 if (!peek) {
1616 /* We are not peeking, and there are no dirty pages */
1617 if (!atomic64_read(&s->dirty_pages)) {
1618 memset(args, 0, sizeof(*args));
1619 return 0;
1620 }
1621 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1622 args->start_gfn);
1623 if (cur >= s->bitmap_size) /* nothing found, loop back */
1624 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1625 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1626 memset(args, 0, sizeof(*args));
1627 return 0;
1628 }
1629 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1630 }
1631
1632 res = vmalloc(bufsize);
1633 if (!res)
1634 return -ENOMEM;
1635
1636 args->start_gfn = cur;
1637
1638 down_read(&kvm->mm->mmap_sem);
1639 srcu_idx = srcu_read_lock(&kvm->srcu);
1640 while (i < bufsize) {
1641 hva = gfn_to_hva(kvm, cur);
1642 if (kvm_is_error_hva(hva)) {
1643 r = -EFAULT;
1644 break;
1645 }
1646 /* decrement only if we actually flipped the bit to 0 */
1647 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1648 atomic64_dec(&s->dirty_pages);
1649 r = get_pgste(kvm->mm, hva, &pgstev);
1650 if (r < 0)
1651 pgstev = 0;
1652 /* save the value */
1bab1c02 1653 res[i++] = (pgstev >> 24) & 0x43;
4036e387
CI
1654 /*
1655 * if the next bit is too far away, stop.
1656 * if we reached the previous "next", find the next one
1657 */
1658 if (!peek) {
1659 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1660 break;
1661 if (cur == next)
1662 next = find_next_bit(s->pgste_bitmap,
1663 s->bitmap_size, cur + 1);
1664 /* reached the end of the bitmap or of the buffer, stop */
1665 if ((next >= s->bitmap_size) ||
1666 (next >= args->start_gfn + bufsize))
1667 break;
1668 }
1669 cur++;
1670 }
1671 srcu_read_unlock(&kvm->srcu, srcu_idx);
1672 up_read(&kvm->mm->mmap_sem);
1673 args->count = i;
1674 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1675
1676 rr = copy_to_user((void __user *)args->values, res, args->count);
1677 if (rr)
1678 r = -EFAULT;
1679
1680 vfree(res);
1681 return r;
1682}
1683
1684/*
1685 * This function sets the CMMA attributes for the given pages. If the input
1686 * buffer has zero length, no action is taken, otherwise the attributes are
c9f0a2b8 1687 * set and the mm->context.uses_cmm flag is set.
4036e387
CI
1688 */
1689static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1690 const struct kvm_s390_cmma_log *args)
1691{
1692 unsigned long hva, mask, pgstev, i;
1693 uint8_t *bits;
1694 int srcu_idx, r = 0;
1695
1696 mask = args->mask;
1697
1698 if (!kvm->arch.use_cmma)
1699 return -ENXIO;
1700 /* invalid/unsupported flags */
1701 if (args->flags != 0)
1702 return -EINVAL;
1703 /* Enforce sane limit on memory allocation */
1704 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1705 return -EINVAL;
1706 /* Nothing to do */
1707 if (args->count == 0)
1708 return 0;
1709
1710 bits = vmalloc(sizeof(*bits) * args->count);
1711 if (!bits)
1712 return -ENOMEM;
1713
1714 r = copy_from_user(bits, (void __user *)args->values, args->count);
1715 if (r) {
1716 r = -EFAULT;
1717 goto out;
1718 }
1719
1720 down_read(&kvm->mm->mmap_sem);
1721 srcu_idx = srcu_read_lock(&kvm->srcu);
1722 for (i = 0; i < args->count; i++) {
1723 hva = gfn_to_hva(kvm, args->start_gfn + i);
1724 if (kvm_is_error_hva(hva)) {
1725 r = -EFAULT;
1726 break;
1727 }
1728
1729 pgstev = bits[i];
1730 pgstev = pgstev << 24;
1bab1c02 1731 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
4036e387
CI
1732 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1733 }
1734 srcu_read_unlock(&kvm->srcu, srcu_idx);
1735 up_read(&kvm->mm->mmap_sem);
1736
c9f0a2b8 1737 if (!kvm->mm->context.uses_cmm) {
4036e387 1738 down_write(&kvm->mm->mmap_sem);
c9f0a2b8 1739 kvm->mm->context.uses_cmm = 1;
4036e387
CI
1740 up_write(&kvm->mm->mmap_sem);
1741 }
1742out:
1743 vfree(bits);
1744 return r;
1745}
1746
b0c632db
HC
1747long kvm_arch_vm_ioctl(struct file *filp,
1748 unsigned int ioctl, unsigned long arg)
1749{
1750 struct kvm *kvm = filp->private_data;
1751 void __user *argp = (void __user *)arg;
f2061656 1752 struct kvm_device_attr attr;
b0c632db
HC
1753 int r;
1754
1755 switch (ioctl) {
ba5c1e9b
CO
1756 case KVM_S390_INTERRUPT: {
1757 struct kvm_s390_interrupt s390int;
1758
1759 r = -EFAULT;
1760 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1761 break;
1762 r = kvm_s390_inject_vm(kvm, &s390int);
1763 break;
1764 }
d938dc55
CH
1765 case KVM_ENABLE_CAP: {
1766 struct kvm_enable_cap cap;
1767 r = -EFAULT;
1768 if (copy_from_user(&cap, argp, sizeof(cap)))
1769 break;
1770 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1771 break;
1772 }
84223598
CH
1773 case KVM_CREATE_IRQCHIP: {
1774 struct kvm_irq_routing_entry routing;
1775
1776 r = -EINVAL;
1777 if (kvm->arch.use_irqchip) {
1778 /* Set up dummy routing. */
1779 memset(&routing, 0, sizeof(routing));
152b2839 1780 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
1781 }
1782 break;
1783 }
f2061656
DD
1784 case KVM_SET_DEVICE_ATTR: {
1785 r = -EFAULT;
1786 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1787 break;
1788 r = kvm_s390_vm_set_attr(kvm, &attr);
1789 break;
1790 }
1791 case KVM_GET_DEVICE_ATTR: {
1792 r = -EFAULT;
1793 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1794 break;
1795 r = kvm_s390_vm_get_attr(kvm, &attr);
1796 break;
1797 }
1798 case KVM_HAS_DEVICE_ATTR: {
1799 r = -EFAULT;
1800 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1801 break;
1802 r = kvm_s390_vm_has_attr(kvm, &attr);
1803 break;
1804 }
30ee2a98
JH
1805 case KVM_S390_GET_SKEYS: {
1806 struct kvm_s390_skeys args;
1807
1808 r = -EFAULT;
1809 if (copy_from_user(&args, argp,
1810 sizeof(struct kvm_s390_skeys)))
1811 break;
1812 r = kvm_s390_get_skeys(kvm, &args);
1813 break;
1814 }
1815 case KVM_S390_SET_SKEYS: {
1816 struct kvm_s390_skeys args;
1817
1818 r = -EFAULT;
1819 if (copy_from_user(&args, argp,
1820 sizeof(struct kvm_s390_skeys)))
1821 break;
1822 r = kvm_s390_set_skeys(kvm, &args);
1823 break;
1824 }
4036e387
CI
1825 case KVM_S390_GET_CMMA_BITS: {
1826 struct kvm_s390_cmma_log args;
1827
1828 r = -EFAULT;
1829 if (copy_from_user(&args, argp, sizeof(args)))
1830 break;
1de1ea7e 1831 mutex_lock(&kvm->slots_lock);
4036e387 1832 r = kvm_s390_get_cmma_bits(kvm, &args);
1de1ea7e 1833 mutex_unlock(&kvm->slots_lock);
4036e387
CI
1834 if (!r) {
1835 r = copy_to_user(argp, &args, sizeof(args));
1836 if (r)
1837 r = -EFAULT;
1838 }
1839 break;
1840 }
1841 case KVM_S390_SET_CMMA_BITS: {
1842 struct kvm_s390_cmma_log args;
1843
1844 r = -EFAULT;
1845 if (copy_from_user(&args, argp, sizeof(args)))
1846 break;
1de1ea7e 1847 mutex_lock(&kvm->slots_lock);
4036e387 1848 r = kvm_s390_set_cmma_bits(kvm, &args);
1de1ea7e 1849 mutex_unlock(&kvm->slots_lock);
4036e387
CI
1850 break;
1851 }
b0c632db 1852 default:
367e1319 1853 r = -ENOTTY;
b0c632db
HC
1854 }
1855
1856 return r;
1857}
1858
45c9b47c
TK
1859static int kvm_s390_query_ap_config(u8 *config)
1860{
1861 u32 fcn_code = 0x04000000UL;
86044c8c 1862 u32 cc = 0;
45c9b47c 1863
86044c8c 1864 memset(config, 0, 128);
45c9b47c
TK
1865 asm volatile(
1866 "lgr 0,%1\n"
1867 "lgr 2,%2\n"
1868 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1869 "0: ipm %0\n"
45c9b47c 1870 "srl %0,28\n"
86044c8c
CB
1871 "1:\n"
1872 EX_TABLE(0b, 1b)
1873 : "+r" (cc)
45c9b47c
TK
1874 : "r" (fcn_code), "r" (config)
1875 : "cc", "0", "2", "memory"
1876 );
1877
1878 return cc;
1879}
1880
1881static int kvm_s390_apxa_installed(void)
1882{
1883 u8 config[128];
1884 int cc;
1885
a6aacc3f 1886 if (test_facility(12)) {
45c9b47c
TK
1887 cc = kvm_s390_query_ap_config(config);
1888
1889 if (cc)
1890 pr_err("PQAP(QCI) failed with cc=%d", cc);
1891 else
1892 return config[0] & 0x40;
1893 }
1894
1895 return 0;
1896}
1897
1898static void kvm_s390_set_crycb_format(struct kvm *kvm)
1899{
1900 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1901
1902 if (kvm_s390_apxa_installed())
1903 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1904 else
1905 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1906}
1907
9bb0ec09 1908static u64 kvm_s390_get_initial_cpuid(void)
9d8d5786 1909{
9bb0ec09
DH
1910 struct cpuid cpuid;
1911
1912 get_cpu_id(&cpuid);
1913 cpuid.version = 0xff;
1914 return *((u64 *) &cpuid);
9d8d5786
MM
1915}
1916
c54f0d6a 1917static void kvm_s390_crypto_init(struct kvm *kvm)
5102ee87 1918{
9d8d5786 1919 if (!test_kvm_facility(kvm, 76))
c54f0d6a 1920 return;
5102ee87 1921
c54f0d6a 1922 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
45c9b47c 1923 kvm_s390_set_crycb_format(kvm);
5102ee87 1924
ed6f76b4
TK
1925 /* Enable AES/DEA protected key functions by default */
1926 kvm->arch.crypto.aes_kw = 1;
1927 kvm->arch.crypto.dea_kw = 1;
1928 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1929 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1930 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1931 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
5102ee87
TK
1932}
1933
7d43bafc
ED
1934static void sca_dispose(struct kvm *kvm)
1935{
1936 if (kvm->arch.use_esca)
5e044315 1937 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
1938 else
1939 free_page((unsigned long)(kvm->arch.sca));
1940 kvm->arch.sca = NULL;
1941}
1942
e08b9637 1943int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1944{
76a6dd72 1945 gfp_t alloc_flags = GFP_KERNEL;
9d8d5786 1946 int i, rc;
b0c632db 1947 char debug_name[16];
f6c137ff 1948 static unsigned long sca_offset;
b0c632db 1949
e08b9637
CO
1950 rc = -EINVAL;
1951#ifdef CONFIG_KVM_S390_UCONTROL
1952 if (type & ~KVM_VM_S390_UCONTROL)
1953 goto out_err;
1954 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1955 goto out_err;
1956#else
1957 if (type)
1958 goto out_err;
1959#endif
1960
b0c632db
HC
1961 rc = s390_enable_sie();
1962 if (rc)
d89f5eff 1963 goto out_err;
b0c632db 1964
b290411a
CO
1965 rc = -ENOMEM;
1966
7d43bafc 1967 kvm->arch.use_esca = 0; /* start with basic SCA */
76a6dd72
DH
1968 if (!sclp.has_64bscao)
1969 alloc_flags |= GFP_DMA;
5e044315 1970 rwlock_init(&kvm->arch.sca_lock);
76a6dd72 1971 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
b0c632db 1972 if (!kvm->arch.sca)
d89f5eff 1973 goto out_err;
f6c137ff 1974 spin_lock(&kvm_lock);
c5c2c393 1975 sca_offset += 16;
bc784cce 1976 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 1977 sca_offset = 0;
bc784cce
ED
1978 kvm->arch.sca = (struct bsca_block *)
1979 ((char *) kvm->arch.sca + sca_offset);
f6c137ff 1980 spin_unlock(&kvm_lock);
b0c632db
HC
1981
1982 sprintf(debug_name, "kvm-%u", current->pid);
1983
1cb9cf72 1984 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1985 if (!kvm->arch.dbf)
40f5b735 1986 goto out_err;
b0c632db 1987
19114beb 1988 BUILD_BUG_ON(sizeof(struct sie_page2) != 4096);
c54f0d6a
DH
1989 kvm->arch.sie_page2 =
1990 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1991 if (!kvm->arch.sie_page2)
40f5b735 1992 goto out_err;
9d8d5786 1993
c54f0d6a 1994 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
c3b9e3e1
CB
1995
1996 for (i = 0; i < kvm_s390_fac_size(); i++) {
1997 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
1998 (kvm_s390_fac_base[i] |
1999 kvm_s390_fac_ext[i]);
2000 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2001 kvm_s390_fac_base[i];
2002 }
981467c9 2003
1935222d
DH
2004 /* we are always in czam mode - even on pre z14 machines */
2005 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2006 set_kvm_facility(kvm->arch.model.fac_list, 138);
2007 /* we emulate STHYI in kvm */
95ca2cb5
JF
2008 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2009 set_kvm_facility(kvm->arch.model.fac_list, 74);
1bab1c02
CI
2010 if (MACHINE_HAS_TLB_GUEST) {
2011 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2012 set_kvm_facility(kvm->arch.model.fac_list, 147);
2013 }
95ca2cb5 2014
9bb0ec09 2015 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
37c5f6c8 2016 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 2017
c54f0d6a 2018 kvm_s390_crypto_init(kvm);
5102ee87 2019
51978393
FL
2020 mutex_init(&kvm->arch.float_int.ais_lock);
2021 kvm->arch.float_int.simm = 0;
2022 kvm->arch.float_int.nimm = 0;
ba5c1e9b 2023 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
2024 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2025 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 2026 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 2027 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 2028
b0c632db 2029 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 2030 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 2031
e08b9637
CO
2032 if (type & KVM_VM_S390_UCONTROL) {
2033 kvm->arch.gmap = NULL;
a3a92c31 2034 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 2035 } else {
32e6b236 2036 if (sclp.hamax == U64_MAX)
ee71d16d 2037 kvm->arch.mem_limit = TASK_SIZE_MAX;
32e6b236 2038 else
ee71d16d 2039 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
32e6b236 2040 sclp.hamax + 1);
6ea427bb 2041 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
e08b9637 2042 if (!kvm->arch.gmap)
40f5b735 2043 goto out_err;
2c70fe44 2044 kvm->arch.gmap->private = kvm;
24eb3a82 2045 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 2046 }
fa6b7fe9
CH
2047
2048 kvm->arch.css_support = 0;
84223598 2049 kvm->arch.use_irqchip = 0;
c9f0a2b8 2050 kvm->arch.use_pfmfi = sclp.has_pfmfi;
72f25020 2051 kvm->arch.epoch = 0;
fa6b7fe9 2052
8ad35755 2053 spin_lock_init(&kvm->arch.start_stop_lock);
a3508fbe 2054 kvm_s390_vsie_init(kvm);
d7c5cb01 2055 kvm_s390_gisa_init(kvm);
8335713a 2056 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 2057
d89f5eff 2058 return 0;
40f5b735 2059out_err:
c54f0d6a 2060 free_page((unsigned long)kvm->arch.sie_page2);
598841ca 2061 debug_unregister(kvm->arch.dbf);
7d43bafc 2062 sca_dispose(kvm);
78f26131 2063 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 2064 return rc;
b0c632db
HC
2065}
2066
235539b4
LC
2067bool kvm_arch_has_vcpu_debugfs(void)
2068{
2069 return false;
2070}
2071
2072int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2073{
2074 return 0;
2075}
2076
d329c035
CB
2077void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2078{
2079 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 2080 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 2081 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 2082 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 2083 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 2084 sca_del_vcpu(vcpu);
27e0393f
CO
2085
2086 if (kvm_is_ucontrol(vcpu->kvm))
6ea427bb 2087 gmap_remove(vcpu->arch.gmap);
27e0393f 2088
e6db1d61 2089 if (vcpu->kvm->arch.use_cmma)
b31605c1 2090 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 2091 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 2092
6692cef3 2093 kvm_vcpu_uninit(vcpu);
b110feaf 2094 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
2095}
2096
2097static void kvm_free_vcpus(struct kvm *kvm)
2098{
2099 unsigned int i;
988a2cae 2100 struct kvm_vcpu *vcpu;
d329c035 2101
988a2cae
GN
2102 kvm_for_each_vcpu(i, vcpu, kvm)
2103 kvm_arch_vcpu_destroy(vcpu);
2104
2105 mutex_lock(&kvm->lock);
2106 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2107 kvm->vcpus[i] = NULL;
2108
2109 atomic_set(&kvm->online_vcpus, 0);
2110 mutex_unlock(&kvm->lock);
d329c035
CB
2111}
2112
b0c632db
HC
2113void kvm_arch_destroy_vm(struct kvm *kvm)
2114{
d329c035 2115 kvm_free_vcpus(kvm);
7d43bafc 2116 sca_dispose(kvm);
d329c035 2117 debug_unregister(kvm->arch.dbf);
d7c5cb01 2118 kvm_s390_gisa_destroy(kvm);
c54f0d6a 2119 free_page((unsigned long)kvm->arch.sie_page2);
27e0393f 2120 if (!kvm_is_ucontrol(kvm))
6ea427bb 2121 gmap_remove(kvm->arch.gmap);
841b91c5 2122 kvm_s390_destroy_adapters(kvm);
67335e63 2123 kvm_s390_clear_float_irqs(kvm);
a3508fbe 2124 kvm_s390_vsie_destroy(kvm);
190df4a2
CI
2125 if (kvm->arch.migration_state) {
2126 vfree(kvm->arch.migration_state->pgste_bitmap);
2127 kfree(kvm->arch.migration_state);
2128 }
8335713a 2129 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
2130}
2131
2132/* Section: vcpu related */
dafd032a
DD
2133static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2134{
6ea427bb 2135 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
dafd032a
DD
2136 if (!vcpu->arch.gmap)
2137 return -ENOMEM;
2138 vcpu->arch.gmap->private = vcpu->kvm;
2139
2140 return 0;
2141}
2142
a6e2f683
ED
2143static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2144{
a6940674
DH
2145 if (!kvm_s390_use_sca_entries())
2146 return;
5e044315 2147 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
2148 if (vcpu->kvm->arch.use_esca) {
2149 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 2150
7d43bafc 2151 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 2152 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
2153 } else {
2154 struct bsca_block *sca = vcpu->kvm->arch.sca;
2155
2156 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 2157 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 2158 }
5e044315 2159 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
2160}
2161
eaa78f34 2162static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 2163{
a6940674
DH
2164 if (!kvm_s390_use_sca_entries()) {
2165 struct bsca_block *sca = vcpu->kvm->arch.sca;
2166
2167 /* we still need the basic sca for the ipte control */
2168 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2169 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
f07afa04 2170 return;
a6940674 2171 }
eaa78f34
DH
2172 read_lock(&vcpu->kvm->arch.sca_lock);
2173 if (vcpu->kvm->arch.use_esca) {
2174 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 2175
eaa78f34 2176 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
2177 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2178 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
0c9d8683 2179 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
eaa78f34 2180 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 2181 } else {
eaa78f34 2182 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 2183
eaa78f34 2184 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
2185 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2186 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 2187 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 2188 }
eaa78f34 2189 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
2190}
2191
2192/* Basic SCA to Extended SCA data copy routines */
2193static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2194{
2195 d->sda = s->sda;
2196 d->sigp_ctrl.c = s->sigp_ctrl.c;
2197 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2198}
2199
2200static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2201{
2202 int i;
2203
2204 d->ipte_control = s->ipte_control;
2205 d->mcn[0] = s->mcn;
2206 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2207 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2208}
2209
2210static int sca_switch_to_extended(struct kvm *kvm)
2211{
2212 struct bsca_block *old_sca = kvm->arch.sca;
2213 struct esca_block *new_sca;
2214 struct kvm_vcpu *vcpu;
2215 unsigned int vcpu_idx;
2216 u32 scaol, scaoh;
2217
2218 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2219 if (!new_sca)
2220 return -ENOMEM;
2221
2222 scaoh = (u32)((u64)(new_sca) >> 32);
2223 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2224
2225 kvm_s390_vcpu_block_all(kvm);
2226 write_lock(&kvm->arch.sca_lock);
2227
2228 sca_copy_b_to_e(new_sca, old_sca);
2229
2230 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2231 vcpu->arch.sie_block->scaoh = scaoh;
2232 vcpu->arch.sie_block->scaol = scaol;
0c9d8683 2233 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
5e044315
ED
2234 }
2235 kvm->arch.sca = new_sca;
2236 kvm->arch.use_esca = 1;
2237
2238 write_unlock(&kvm->arch.sca_lock);
2239 kvm_s390_vcpu_unblock_all(kvm);
2240
2241 free_page((unsigned long)old_sca);
2242
8335713a
CB
2243 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2244 old_sca, kvm->arch.sca);
5e044315 2245 return 0;
a6e2f683
ED
2246}
2247
2248static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2249{
5e044315
ED
2250 int rc;
2251
a6940674
DH
2252 if (!kvm_s390_use_sca_entries()) {
2253 if (id < KVM_MAX_VCPUS)
2254 return true;
2255 return false;
2256 }
5e044315
ED
2257 if (id < KVM_S390_BSCA_CPU_SLOTS)
2258 return true;
76a6dd72 2259 if (!sclp.has_esca || !sclp.has_64bscao)
5e044315
ED
2260 return false;
2261
2262 mutex_lock(&kvm->lock);
2263 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2264 mutex_unlock(&kvm->lock);
2265
2266 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
2267}
2268
b0c632db
HC
2269int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2270{
3c038e6b
DD
2271 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2272 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
2273 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2274 KVM_SYNC_GPRS |
9eed0735 2275 KVM_SYNC_ACRS |
b028ee3e
DH
2276 KVM_SYNC_CRS |
2277 KVM_SYNC_ARCH0 |
2278 KVM_SYNC_PFAULT;
75a4615c 2279 kvm_s390_set_prefix(vcpu, 0);
c6e5f166
FZ
2280 if (test_kvm_facility(vcpu->kvm, 64))
2281 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
35b3fde6
CB
2282 if (test_kvm_facility(vcpu->kvm, 82))
2283 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
4e0b1ab7
FZ
2284 if (test_kvm_facility(vcpu->kvm, 133))
2285 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
f6aa6dc4
DH
2286 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2287 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2288 */
2289 if (MACHINE_HAS_VX)
68c55750 2290 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
2291 else
2292 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
2293
2294 if (kvm_is_ucontrol(vcpu->kvm))
2295 return __kvm_ucontrol_vcpu_init(vcpu);
2296
b0c632db
HC
2297 return 0;
2298}
2299
db0758b2
DH
2300/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2301static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2302{
2303 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
9c23a131 2304 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2 2305 vcpu->arch.cputm_start = get_tod_clock_fast();
9c23a131 2306 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
2307}
2308
2309/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2310static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2311{
2312 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
9c23a131 2313 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
2314 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2315 vcpu->arch.cputm_start = 0;
9c23a131 2316 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
2317}
2318
2319/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2320static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2321{
2322 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2323 vcpu->arch.cputm_enabled = true;
2324 __start_cpu_timer_accounting(vcpu);
2325}
2326
2327/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2328static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2329{
2330 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2331 __stop_cpu_timer_accounting(vcpu);
2332 vcpu->arch.cputm_enabled = false;
2333}
2334
2335static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2336{
2337 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2338 __enable_cpu_timer_accounting(vcpu);
2339 preempt_enable();
2340}
2341
2342static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2343{
2344 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2345 __disable_cpu_timer_accounting(vcpu);
2346 preempt_enable();
2347}
2348
4287f247
DH
2349/* set the cpu timer - may only be called from the VCPU thread itself */
2350void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2351{
db0758b2 2352 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
9c23a131 2353 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
2354 if (vcpu->arch.cputm_enabled)
2355 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 2356 vcpu->arch.sie_block->cputm = cputm;
9c23a131 2357 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2 2358 preempt_enable();
4287f247
DH
2359}
2360
db0758b2 2361/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
2362__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2363{
9c23a131 2364 unsigned int seq;
db0758b2 2365 __u64 value;
db0758b2
DH
2366
2367 if (unlikely(!vcpu->arch.cputm_enabled))
2368 return vcpu->arch.sie_block->cputm;
2369
9c23a131
DH
2370 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2371 do {
2372 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2373 /*
2374 * If the writer would ever execute a read in the critical
2375 * section, e.g. in irq context, we have a deadlock.
2376 */
2377 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2378 value = vcpu->arch.sie_block->cputm;
2379 /* if cputm_start is 0, accounting is being started/stopped */
2380 if (likely(vcpu->arch.cputm_start))
2381 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2382 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2383 preempt_enable();
db0758b2 2384 return value;
4287f247
DH
2385}
2386
b0c632db
HC
2387void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2388{
9977e886 2389
37d9df98 2390 gmap_enable(vcpu->arch.enabled_gmap);
ef8f4f49 2391 kvm_s390_set_cpuflags(vcpu, CPUSTAT_RUNNING);
5ebda316 2392 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 2393 __start_cpu_timer_accounting(vcpu);
01a745ac 2394 vcpu->cpu = cpu;
b0c632db
HC
2395}
2396
2397void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2398{
01a745ac 2399 vcpu->cpu = -1;
5ebda316 2400 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 2401 __stop_cpu_timer_accounting(vcpu);
9daecfc6 2402 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_RUNNING);
37d9df98
DH
2403 vcpu->arch.enabled_gmap = gmap_get_enabled();
2404 gmap_disable(vcpu->arch.enabled_gmap);
9977e886 2405
b0c632db
HC
2406}
2407
2408static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2409{
2410 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2411 vcpu->arch.sie_block->gpsw.mask = 0UL;
2412 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 2413 kvm_s390_set_prefix(vcpu, 0);
4287f247 2414 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
2415 vcpu->arch.sie_block->ckc = 0UL;
2416 vcpu->arch.sie_block->todpr = 0;
2417 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2418 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2419 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
9abc2a08
DH
2420 /* make sure the new fpc will be lazily loaded */
2421 save_fpu_regs();
2422 current->thread.fpu.fpc = 0;
b0c632db 2423 vcpu->arch.sie_block->gbea = 1;
672550fb 2424 vcpu->arch.sie_block->pp = 0;
35b3fde6 2425 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3c038e6b
DD
2426 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2427 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
2428 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2429 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 2430 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
2431}
2432
31928aa5 2433void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 2434{
72f25020 2435 mutex_lock(&vcpu->kvm->lock);
fdf03650 2436 preempt_disable();
72f25020 2437 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
d16b52cb 2438 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
fdf03650 2439 preempt_enable();
72f25020 2440 mutex_unlock(&vcpu->kvm->lock);
25508824 2441 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 2442 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 2443 sca_add_vcpu(vcpu);
25508824 2444 }
6502a34c
DH
2445 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2446 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
37d9df98
DH
2447 /* make vcpu_load load the right gmap on the first trigger */
2448 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
42897d86
MT
2449}
2450
5102ee87
TK
2451static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2452{
9d8d5786 2453 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
2454 return;
2455
a374e892
TK
2456 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
2457
2458 if (vcpu->kvm->arch.crypto.aes_kw)
2459 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
2460 if (vcpu->kvm->arch.crypto.dea_kw)
2461 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
2462
5102ee87
TK
2463 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
2464}
2465
b31605c1
DD
2466void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2467{
2468 free_page(vcpu->arch.sie_block->cbrlo);
2469 vcpu->arch.sie_block->cbrlo = 0;
2470}
2471
2472int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2473{
2474 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2475 if (!vcpu->arch.sie_block->cbrlo)
2476 return -ENOMEM;
b31605c1
DD
2477 return 0;
2478}
2479
91520f1a
MM
2480static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2481{
2482 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2483
91520f1a 2484 vcpu->arch.sie_block->ibc = model->ibc;
80bc79dc 2485 if (test_kvm_facility(vcpu->kvm, 7))
c54f0d6a 2486 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
91520f1a
MM
2487}
2488
b0c632db
HC
2489int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2490{
b31605c1 2491 int rc = 0;
b31288fa 2492
9e6dabef
CH
2493 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2494 CPUSTAT_SM |
a4a4f191
GH
2495 CPUSTAT_STOPPED);
2496
53df84f8 2497 if (test_kvm_facility(vcpu->kvm, 78))
ef8f4f49 2498 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED2);
53df84f8 2499 else if (test_kvm_facility(vcpu->kvm, 8))
ef8f4f49 2500 kvm_s390_set_cpuflags(vcpu, CPUSTAT_GED);
a4a4f191 2501
91520f1a
MM
2502 kvm_s390_vcpu_setup_model(vcpu);
2503
bdab09f3
DH
2504 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2505 if (MACHINE_HAS_ESOP)
0c9d8683 2506 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
bd50e8ec 2507 if (test_kvm_facility(vcpu->kvm, 9))
0c9d8683 2508 vcpu->arch.sie_block->ecb |= ECB_SRSI;
f597d24e 2509 if (test_kvm_facility(vcpu->kvm, 73))
0c9d8683 2510 vcpu->arch.sie_block->ecb |= ECB_TE;
7feb6bb8 2511
c9f0a2b8 2512 if (test_kvm_facility(vcpu->kvm, 8) && vcpu->kvm->arch.use_pfmfi)
0c9d8683 2513 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
cd1836f5 2514 if (test_kvm_facility(vcpu->kvm, 130))
0c9d8683
DH
2515 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2516 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
48ee7d3a 2517 if (sclp.has_cei)
0c9d8683 2518 vcpu->arch.sie_block->eca |= ECA_CEI;
11ad65b7 2519 if (sclp.has_ib)
0c9d8683 2520 vcpu->arch.sie_block->eca |= ECA_IB;
37c5f6c8 2521 if (sclp.has_siif)
0c9d8683 2522 vcpu->arch.sie_block->eca |= ECA_SII;
37c5f6c8 2523 if (sclp.has_sigpif)
0c9d8683 2524 vcpu->arch.sie_block->eca |= ECA_SIGPI;
18280d8b 2525 if (test_kvm_facility(vcpu->kvm, 129)) {
0c9d8683
DH
2526 vcpu->arch.sie_block->eca |= ECA_VX;
2527 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
13211ea7 2528 }
8fa1696e
CW
2529 if (test_kvm_facility(vcpu->kvm, 139))
2530 vcpu->arch.sie_block->ecd |= ECD_MEF;
2531
d7c5cb01
MM
2532 if (vcpu->arch.sie_block->gd) {
2533 vcpu->arch.sie_block->eca |= ECA_AIV;
2534 VCPU_EVENT(vcpu, 3, "AIV gisa format-%u enabled for cpu %03u",
2535 vcpu->arch.sie_block->gd & 0x3, vcpu->vcpu_id);
2536 }
4e0b1ab7
FZ
2537 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2538 | SDNXC;
c6e5f166 2539 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
730cd632
FA
2540
2541 if (sclp.has_kss)
ef8f4f49 2542 kvm_s390_set_cpuflags(vcpu, CPUSTAT_KSS);
730cd632
FA
2543 else
2544 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 2545
e6db1d61 2546 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
2547 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2548 if (rc)
2549 return rc;
b31288fa 2550 }
0ac96caf 2551 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 2552 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 2553
5102ee87
TK
2554 kvm_s390_vcpu_crypto_setup(vcpu);
2555
b31605c1 2556 return rc;
b0c632db
HC
2557}
2558
2559struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2560 unsigned int id)
2561{
4d47555a 2562 struct kvm_vcpu *vcpu;
7feb6bb8 2563 struct sie_page *sie_page;
4d47555a
CO
2564 int rc = -EINVAL;
2565
4215825e 2566 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
2567 goto out;
2568
2569 rc = -ENOMEM;
b0c632db 2570
b110feaf 2571 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 2572 if (!vcpu)
4d47555a 2573 goto out;
b0c632db 2574
da72ca4d 2575 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
7feb6bb8
MM
2576 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2577 if (!sie_page)
b0c632db
HC
2578 goto out_free_cpu;
2579
7feb6bb8
MM
2580 vcpu->arch.sie_block = &sie_page->sie_block;
2581 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2582
efed1104
DH
2583 /* the real guest size will always be smaller than msl */
2584 vcpu->arch.sie_block->mso = 0;
2585 vcpu->arch.sie_block->msl = sclp.hamax;
2586
b0c632db 2587 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 2588 spin_lock_init(&vcpu->arch.local_int.lock);
d7c5cb01 2589 vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa;
4b9f9525
MM
2590 if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
2591 vcpu->arch.sie_block->gd |= GISA_FORMAT1;
9c23a131 2592 seqcount_init(&vcpu->arch.cputm_seqcount);
ba5c1e9b 2593
b0c632db
HC
2594 rc = kvm_vcpu_init(vcpu, kvm, id);
2595 if (rc)
9abc2a08 2596 goto out_free_sie_block;
8335713a 2597 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 2598 vcpu->arch.sie_block);
ade38c31 2599 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 2600
b0c632db 2601 return vcpu;
7b06bf2f
WY
2602out_free_sie_block:
2603 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 2604out_free_cpu:
b110feaf 2605 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 2606out:
b0c632db
HC
2607 return ERR_PTR(rc);
2608}
2609
b0c632db
HC
2610int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2611{
9a022067 2612 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
2613}
2614
199b5763
LM
2615bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2616{
0546c63d 2617 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
199b5763
LM
2618}
2619
27406cd5 2620void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 2621{
805de8f4 2622 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 2623 exit_sie(vcpu);
49b99e1e
CB
2624}
2625
27406cd5 2626void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 2627{
805de8f4 2628 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
2629}
2630
8e236546
CB
2631static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2632{
805de8f4 2633 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 2634 exit_sie(vcpu);
8e236546
CB
2635}
2636
2637static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2638{
9bf9fde2 2639 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
2640}
2641
49b99e1e
CB
2642/*
2643 * Kick a guest cpu out of SIE and wait until SIE is not running.
2644 * If the CPU is not running (e.g. waiting as idle) the function will
2645 * return immediately. */
2646void exit_sie(struct kvm_vcpu *vcpu)
2647{
ef8f4f49 2648 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
49b99e1e
CB
2649 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2650 cpu_relax();
2651}
2652
8e236546
CB
2653/* Kick a guest cpu out of SIE to process a request synchronously */
2654void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 2655{
8e236546
CB
2656 kvm_make_request(req, vcpu);
2657 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
2658}
2659
414d3b07
MS
2660static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2661 unsigned long end)
2c70fe44 2662{
2c70fe44
CB
2663 struct kvm *kvm = gmap->private;
2664 struct kvm_vcpu *vcpu;
414d3b07
MS
2665 unsigned long prefix;
2666 int i;
2c70fe44 2667
65d0b0d4
DH
2668 if (gmap_is_shadow(gmap))
2669 return;
414d3b07
MS
2670 if (start >= 1UL << 31)
2671 /* We are only interested in prefix pages */
2672 return;
2c70fe44
CB
2673 kvm_for_each_vcpu(i, vcpu, kvm) {
2674 /* match against both prefix pages */
414d3b07
MS
2675 prefix = kvm_s390_get_prefix(vcpu);
2676 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2677 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2678 start, end);
8e236546 2679 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
2680 }
2681 }
2682}
2683
b6d33834
CD
2684int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2685{
2686 /* kvm common code refers to this, but never calls it */
2687 BUG();
2688 return 0;
2689}
2690
14eebd91
CO
2691static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2692 struct kvm_one_reg *reg)
2693{
2694 int r = -EINVAL;
2695
2696 switch (reg->id) {
29b7c71b
CO
2697 case KVM_REG_S390_TODPR:
2698 r = put_user(vcpu->arch.sie_block->todpr,
2699 (u32 __user *)reg->addr);
2700 break;
2701 case KVM_REG_S390_EPOCHDIFF:
2702 r = put_user(vcpu->arch.sie_block->epoch,
2703 (u64 __user *)reg->addr);
2704 break;
46a6dd1c 2705 case KVM_REG_S390_CPU_TIMER:
4287f247 2706 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
2707 (u64 __user *)reg->addr);
2708 break;
2709 case KVM_REG_S390_CLOCK_COMP:
2710 r = put_user(vcpu->arch.sie_block->ckc,
2711 (u64 __user *)reg->addr);
2712 break;
536336c2
DD
2713 case KVM_REG_S390_PFTOKEN:
2714 r = put_user(vcpu->arch.pfault_token,
2715 (u64 __user *)reg->addr);
2716 break;
2717 case KVM_REG_S390_PFCOMPARE:
2718 r = put_user(vcpu->arch.pfault_compare,
2719 (u64 __user *)reg->addr);
2720 break;
2721 case KVM_REG_S390_PFSELECT:
2722 r = put_user(vcpu->arch.pfault_select,
2723 (u64 __user *)reg->addr);
2724 break;
672550fb
CB
2725 case KVM_REG_S390_PP:
2726 r = put_user(vcpu->arch.sie_block->pp,
2727 (u64 __user *)reg->addr);
2728 break;
afa45ff5
CB
2729 case KVM_REG_S390_GBEA:
2730 r = put_user(vcpu->arch.sie_block->gbea,
2731 (u64 __user *)reg->addr);
2732 break;
14eebd91
CO
2733 default:
2734 break;
2735 }
2736
2737 return r;
2738}
2739
2740static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2741 struct kvm_one_reg *reg)
2742{
2743 int r = -EINVAL;
4287f247 2744 __u64 val;
14eebd91
CO
2745
2746 switch (reg->id) {
29b7c71b
CO
2747 case KVM_REG_S390_TODPR:
2748 r = get_user(vcpu->arch.sie_block->todpr,
2749 (u32 __user *)reg->addr);
2750 break;
2751 case KVM_REG_S390_EPOCHDIFF:
2752 r = get_user(vcpu->arch.sie_block->epoch,
2753 (u64 __user *)reg->addr);
2754 break;
46a6dd1c 2755 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
2756 r = get_user(val, (u64 __user *)reg->addr);
2757 if (!r)
2758 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
2759 break;
2760 case KVM_REG_S390_CLOCK_COMP:
2761 r = get_user(vcpu->arch.sie_block->ckc,
2762 (u64 __user *)reg->addr);
2763 break;
536336c2
DD
2764 case KVM_REG_S390_PFTOKEN:
2765 r = get_user(vcpu->arch.pfault_token,
2766 (u64 __user *)reg->addr);
9fbd8082
DH
2767 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2768 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
2769 break;
2770 case KVM_REG_S390_PFCOMPARE:
2771 r = get_user(vcpu->arch.pfault_compare,
2772 (u64 __user *)reg->addr);
2773 break;
2774 case KVM_REG_S390_PFSELECT:
2775 r = get_user(vcpu->arch.pfault_select,
2776 (u64 __user *)reg->addr);
2777 break;
672550fb
CB
2778 case KVM_REG_S390_PP:
2779 r = get_user(vcpu->arch.sie_block->pp,
2780 (u64 __user *)reg->addr);
2781 break;
afa45ff5
CB
2782 case KVM_REG_S390_GBEA:
2783 r = get_user(vcpu->arch.sie_block->gbea,
2784 (u64 __user *)reg->addr);
2785 break;
14eebd91
CO
2786 default:
2787 break;
2788 }
2789
2790 return r;
2791}
b6d33834 2792
b0c632db
HC
2793static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2794{
b0c632db 2795 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
2796 return 0;
2797}
2798
2799int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2800{
875656fe 2801 vcpu_load(vcpu);
5a32c1af 2802 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
875656fe 2803 vcpu_put(vcpu);
b0c632db
HC
2804 return 0;
2805}
2806
2807int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2808{
1fc9b76b 2809 vcpu_load(vcpu);
5a32c1af 2810 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1fc9b76b 2811 vcpu_put(vcpu);
b0c632db
HC
2812 return 0;
2813}
2814
2815int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2816 struct kvm_sregs *sregs)
2817{
b4ef9d4e
CD
2818 vcpu_load(vcpu);
2819
59674c1a 2820 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 2821 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
b4ef9d4e
CD
2822
2823 vcpu_put(vcpu);
b0c632db
HC
2824 return 0;
2825}
2826
2827int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2828 struct kvm_sregs *sregs)
2829{
bcdec41c
CD
2830 vcpu_load(vcpu);
2831
59674c1a 2832 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 2833 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
bcdec41c
CD
2834
2835 vcpu_put(vcpu);
b0c632db
HC
2836 return 0;
2837}
2838
2839int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2840{
6a96bc7f
CD
2841 int ret = 0;
2842
2843 vcpu_load(vcpu);
2844
2845 if (test_fp_ctl(fpu->fpc)) {
2846 ret = -EINVAL;
2847 goto out;
2848 }
e1788bb9 2849 vcpu->run->s.regs.fpc = fpu->fpc;
9abc2a08 2850 if (MACHINE_HAS_VX)
a7d4b8f2
DH
2851 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
2852 (freg_t *) fpu->fprs);
9abc2a08 2853 else
a7d4b8f2 2854 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
6a96bc7f
CD
2855
2856out:
2857 vcpu_put(vcpu);
2858 return ret;
b0c632db
HC
2859}
2860
2861int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2862{
1393123e
CD
2863 vcpu_load(vcpu);
2864
9abc2a08
DH
2865 /* make sure we have the latest values */
2866 save_fpu_regs();
2867 if (MACHINE_HAS_VX)
a7d4b8f2
DH
2868 convert_vx_to_fp((freg_t *) fpu->fprs,
2869 (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08 2870 else
a7d4b8f2 2871 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
e1788bb9 2872 fpu->fpc = vcpu->run->s.regs.fpc;
1393123e
CD
2873
2874 vcpu_put(vcpu);
b0c632db
HC
2875 return 0;
2876}
2877
2878static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2879{
2880 int rc = 0;
2881
7a42fdc2 2882 if (!is_vcpu_stopped(vcpu))
b0c632db 2883 rc = -EBUSY;
d7b0b5eb
CO
2884 else {
2885 vcpu->run->psw_mask = psw.mask;
2886 vcpu->run->psw_addr = psw.addr;
2887 }
b0c632db
HC
2888 return rc;
2889}
2890
2891int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2892 struct kvm_translation *tr)
2893{
2894 return -EINVAL; /* not implemented yet */
2895}
2896
27291e21
DH
2897#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2898 KVM_GUESTDBG_USE_HW_BP | \
2899 KVM_GUESTDBG_ENABLE)
2900
d0bfb940
JK
2901int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2902 struct kvm_guest_debug *dbg)
b0c632db 2903{
27291e21
DH
2904 int rc = 0;
2905
66b56562
CD
2906 vcpu_load(vcpu);
2907
27291e21
DH
2908 vcpu->guest_debug = 0;
2909 kvm_s390_clear_bp_data(vcpu);
2910
66b56562
CD
2911 if (dbg->control & ~VALID_GUESTDBG_FLAGS) {
2912 rc = -EINVAL;
2913 goto out;
2914 }
2915 if (!sclp.has_gpere) {
2916 rc = -EINVAL;
2917 goto out;
2918 }
27291e21
DH
2919
2920 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2921 vcpu->guest_debug = dbg->control;
2922 /* enforce guest PER */
ef8f4f49 2923 kvm_s390_set_cpuflags(vcpu, CPUSTAT_P);
27291e21
DH
2924
2925 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2926 rc = kvm_s390_import_bp_data(vcpu, dbg);
2927 } else {
9daecfc6 2928 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
27291e21
DH
2929 vcpu->arch.guestdbg.last_bp = 0;
2930 }
2931
2932 if (rc) {
2933 vcpu->guest_debug = 0;
2934 kvm_s390_clear_bp_data(vcpu);
9daecfc6 2935 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_P);
27291e21
DH
2936 }
2937
66b56562
CD
2938out:
2939 vcpu_put(vcpu);
27291e21 2940 return rc;
b0c632db
HC
2941}
2942
62d9f0db
MT
2943int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2944 struct kvm_mp_state *mp_state)
2945{
fd232561
CD
2946 int ret;
2947
2948 vcpu_load(vcpu);
2949
6352e4d2 2950 /* CHECK_STOP and LOAD are not supported yet */
fd232561
CD
2951 ret = is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2952 KVM_MP_STATE_OPERATING;
2953
2954 vcpu_put(vcpu);
2955 return ret;
62d9f0db
MT
2956}
2957
2958int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2959 struct kvm_mp_state *mp_state)
2960{
6352e4d2
DH
2961 int rc = 0;
2962
e83dff5e
CD
2963 vcpu_load(vcpu);
2964
6352e4d2
DH
2965 /* user space knows about this interface - let it control the state */
2966 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2967
2968 switch (mp_state->mp_state) {
2969 case KVM_MP_STATE_STOPPED:
2970 kvm_s390_vcpu_stop(vcpu);
2971 break;
2972 case KVM_MP_STATE_OPERATING:
2973 kvm_s390_vcpu_start(vcpu);
2974 break;
2975 case KVM_MP_STATE_LOAD:
2976 case KVM_MP_STATE_CHECK_STOP:
2977 /* fall through - CHECK_STOP and LOAD are not supported yet */
2978 default:
2979 rc = -ENXIO;
2980 }
2981
e83dff5e 2982 vcpu_put(vcpu);
6352e4d2 2983 return rc;
62d9f0db
MT
2984}
2985
8ad35755
DH
2986static bool ibs_enabled(struct kvm_vcpu *vcpu)
2987{
8d5fb0dc 2988 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_IBS);
8ad35755
DH
2989}
2990
2c70fe44
CB
2991static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2992{
8ad35755 2993retry:
8e236546 2994 kvm_s390_vcpu_request_handled(vcpu);
2fa6e1e1 2995 if (!kvm_request_pending(vcpu))
586b7ccd 2996 return 0;
2c70fe44
CB
2997 /*
2998 * We use MMU_RELOAD just to re-arm the ipte notifier for the
b2d73b2a 2999 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
2c70fe44
CB
3000 * This ensures that the ipte instruction for this request has
3001 * already finished. We might race against a second unmapper that
3002 * wants to set the blocking bit. Lets just retry the request loop.
3003 */
8ad35755 3004 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44 3005 int rc;
b2d73b2a
MS
3006 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3007 kvm_s390_get_prefix(vcpu),
3008 PAGE_SIZE * 2, PROT_WRITE);
aca411a4
JN
3009 if (rc) {
3010 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44 3011 return rc;
aca411a4 3012 }
8ad35755 3013 goto retry;
2c70fe44 3014 }
8ad35755 3015
d3d692c8
DH
3016 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3017 vcpu->arch.sie_block->ihcpu = 0xffff;
3018 goto retry;
3019 }
3020
8ad35755
DH
3021 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3022 if (!ibs_enabled(vcpu)) {
3023 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
ef8f4f49 3024 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IBS);
8ad35755
DH
3025 }
3026 goto retry;
2c70fe44 3027 }
8ad35755
DH
3028
3029 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3030 if (ibs_enabled(vcpu)) {
3031 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
9daecfc6 3032 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IBS);
8ad35755
DH
3033 }
3034 goto retry;
3035 }
3036
6502a34c
DH
3037 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3038 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3039 goto retry;
3040 }
3041
190df4a2
CI
3042 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3043 /*
c9f0a2b8 3044 * Disable CMM virtualization; we will emulate the ESSA
190df4a2
CI
3045 * instruction manually, in order to provide additional
3046 * functionalities needed for live migration.
3047 */
3048 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3049 goto retry;
3050 }
3051
3052 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3053 /*
c9f0a2b8
JF
3054 * Re-enable CMM virtualization if CMMA is available and
3055 * CMM has been used.
190df4a2
CI
3056 */
3057 if ((vcpu->kvm->arch.use_cmma) &&
c9f0a2b8 3058 (vcpu->kvm->mm->context.uses_cmm))
190df4a2
CI
3059 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3060 goto retry;
3061 }
3062
0759d068 3063 /* nothing to do, just clear the request */
72875d8a 3064 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
0759d068 3065
2c70fe44
CB
3066 return 0;
3067}
3068
0e7def5f
DH
3069void kvm_s390_set_tod_clock(struct kvm *kvm,
3070 const struct kvm_s390_vm_tod_clock *gtod)
8fa1696e
CW
3071{
3072 struct kvm_vcpu *vcpu;
3073 struct kvm_s390_tod_clock_ext htod;
3074 int i;
3075
3076 mutex_lock(&kvm->lock);
3077 preempt_disable();
3078
3079 get_tod_clock_ext((char *)&htod);
3080
3081 kvm->arch.epoch = gtod->tod - htod.tod;
0e7def5f
DH
3082 kvm->arch.epdx = 0;
3083 if (test_kvm_facility(kvm, 139)) {
3084 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3085 if (kvm->arch.epoch > gtod->tod)
3086 kvm->arch.epdx -= 1;
3087 }
8fa1696e
CW
3088
3089 kvm_s390_vcpu_block_all(kvm);
3090 kvm_for_each_vcpu(i, vcpu, kvm) {
3091 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3092 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3093 }
3094
3095 kvm_s390_vcpu_unblock_all(kvm);
3096 preempt_enable();
3097 mutex_unlock(&kvm->lock);
3098}
3099
fa576c58
TH
3100/**
3101 * kvm_arch_fault_in_page - fault-in guest page if necessary
3102 * @vcpu: The corresponding virtual cpu
3103 * @gpa: Guest physical address
3104 * @writable: Whether the page should be writable or not
3105 *
3106 * Make sure that a guest page has been faulted-in on the host.
3107 *
3108 * Return: Zero on success, negative error code otherwise.
3109 */
3110long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 3111{
527e30b4
MS
3112 return gmap_fault(vcpu->arch.gmap, gpa,
3113 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
3114}
3115
3c038e6b
DD
3116static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3117 unsigned long token)
3118{
3119 struct kvm_s390_interrupt inti;
383d0b05 3120 struct kvm_s390_irq irq;
3c038e6b
DD
3121
3122 if (start_token) {
383d0b05
JF
3123 irq.u.ext.ext_params2 = token;
3124 irq.type = KVM_S390_INT_PFAULT_INIT;
3125 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
3126 } else {
3127 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 3128 inti.parm64 = token;
3c038e6b
DD
3129 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3130 }
3131}
3132
3133void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3134 struct kvm_async_pf *work)
3135{
3136 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3137 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3138}
3139
3140void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3141 struct kvm_async_pf *work)
3142{
3143 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3144 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3145}
3146
3147void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3148 struct kvm_async_pf *work)
3149{
3150 /* s390 will always inject the page directly */
3151}
3152
3153bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3154{
3155 /*
3156 * s390 will always inject the page directly,
3157 * but we still want check_async_completion to cleanup
3158 */
3159 return true;
3160}
3161
3162static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3163{
3164 hva_t hva;
3165 struct kvm_arch_async_pf arch;
3166 int rc;
3167
3168 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3169 return 0;
3170 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3171 vcpu->arch.pfault_compare)
3172 return 0;
3173 if (psw_extint_disabled(vcpu))
3174 return 0;
9a022067 3175 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
3176 return 0;
3177 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
3178 return 0;
3179 if (!vcpu->arch.gmap->pfault_enabled)
3180 return 0;
3181
81480cc1
HC
3182 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3183 hva += current->thread.gmap_addr & ~PAGE_MASK;
3184 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
3185 return 0;
3186
3187 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3188 return rc;
3189}
3190
3fb4c40f 3191static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 3192{
3fb4c40f 3193 int rc, cpuflags;
e168bf8d 3194
3c038e6b
DD
3195 /*
3196 * On s390 notifications for arriving pages will be delivered directly
3197 * to the guest but the house keeping for completed pfaults is
3198 * handled outside the worker.
3199 */
3200 kvm_check_async_pf_completion(vcpu);
3201
7ec7c8c7
CB
3202 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3203 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
3204
3205 if (need_resched())
3206 schedule();
3207
d3a73acb 3208 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
3209 s390_handle_mcck();
3210
79395031
JF
3211 if (!kvm_is_ucontrol(vcpu->kvm)) {
3212 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3213 if (rc)
3214 return rc;
3215 }
0ff31867 3216
2c70fe44
CB
3217 rc = kvm_s390_handle_requests(vcpu);
3218 if (rc)
3219 return rc;
3220
27291e21
DH
3221 if (guestdbg_enabled(vcpu)) {
3222 kvm_s390_backup_guest_per_regs(vcpu);
3223 kvm_s390_patch_guest_per_regs(vcpu);
3224 }
3225
b0c632db 3226 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
3227 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3228 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3229 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 3230
3fb4c40f
TH
3231 return 0;
3232}
3233
492d8642
TH
3234static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3235{
56317920
DH
3236 struct kvm_s390_pgm_info pgm_info = {
3237 .code = PGM_ADDRESSING,
3238 };
3239 u8 opcode, ilen;
492d8642
TH
3240 int rc;
3241
3242 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3243 trace_kvm_s390_sie_fault(vcpu);
3244
3245 /*
3246 * We want to inject an addressing exception, which is defined as a
3247 * suppressing or terminating exception. However, since we came here
3248 * by a DAT access exception, the PSW still points to the faulting
3249 * instruction since DAT exceptions are nullifying. So we've got
3250 * to look up the current opcode to get the length of the instruction
3251 * to be able to forward the PSW.
3252 */
3fa8cad7 3253 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
56317920 3254 ilen = insn_length(opcode);
9b0d721a
DH
3255 if (rc < 0) {
3256 return rc;
3257 } else if (rc) {
3258 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3259 * Forward by arbitrary ilc, injection will take care of
3260 * nullification if necessary.
3261 */
3262 pgm_info = vcpu->arch.pgm;
3263 ilen = 4;
3264 }
56317920
DH
3265 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3266 kvm_s390_forward_psw(vcpu, ilen);
3267 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
3268}
3269
3fb4c40f
TH
3270static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3271{
4d62fcc0
QH
3272 struct mcck_volatile_info *mcck_info;
3273 struct sie_page *sie_page;
3274
2b29a9fd
DD
3275 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3276 vcpu->arch.sie_block->icptcode);
3277 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3278
27291e21
DH
3279 if (guestdbg_enabled(vcpu))
3280 kvm_s390_restore_guest_per_regs(vcpu);
3281
7ec7c8c7
CB
3282 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3283 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf 3284
4d62fcc0
QH
3285 if (exit_reason == -EINTR) {
3286 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3287 sie_page = container_of(vcpu->arch.sie_block,
3288 struct sie_page, sie_block);
3289 mcck_info = &sie_page->mcck_info;
3290 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3291 return 0;
3292 }
3293
71f116bf
DH
3294 if (vcpu->arch.sie_block->icptcode > 0) {
3295 int rc = kvm_handle_sie_intercept(vcpu);
3296
3297 if (rc != -EOPNOTSUPP)
3298 return rc;
3299 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3300 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3301 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3302 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3303 return -EREMOTE;
3304 } else if (exit_reason != -EFAULT) {
3305 vcpu->stat.exit_null++;
3306 return 0;
210b1607
TH
3307 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3308 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3309 vcpu->run->s390_ucontrol.trans_exc_code =
3310 current->thread.gmap_addr;
3311 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 3312 return -EREMOTE;
24eb3a82 3313 } else if (current->thread.gmap_pfault) {
3c038e6b 3314 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 3315 current->thread.gmap_pfault = 0;
71f116bf
DH
3316 if (kvm_arch_setup_async_pf(vcpu))
3317 return 0;
3318 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 3319 }
71f116bf 3320 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
3321}
3322
3323static int __vcpu_run(struct kvm_vcpu *vcpu)
3324{
3325 int rc, exit_reason;
3326
800c1065
TH
3327 /*
3328 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3329 * ning the guest), so that memslots (and other stuff) are protected
3330 */
3331 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3332
a76ccff6
TH
3333 do {
3334 rc = vcpu_pre_run(vcpu);
3335 if (rc)
3336 break;
3fb4c40f 3337
800c1065 3338 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
3339 /*
3340 * As PF_VCPU will be used in fault handler, between
3341 * guest_enter and guest_exit should be no uaccess.
3342 */
0097d12e 3343 local_irq_disable();
6edaa530 3344 guest_enter_irqoff();
db0758b2 3345 __disable_cpu_timer_accounting(vcpu);
0097d12e 3346 local_irq_enable();
a76ccff6
TH
3347 exit_reason = sie64a(vcpu->arch.sie_block,
3348 vcpu->run->s.regs.gprs);
0097d12e 3349 local_irq_disable();
db0758b2 3350 __enable_cpu_timer_accounting(vcpu);
6edaa530 3351 guest_exit_irqoff();
0097d12e 3352 local_irq_enable();
800c1065 3353 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
3354
3355 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 3356 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 3357
800c1065 3358 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 3359 return rc;
b0c632db
HC
3360}
3361
b028ee3e
DH
3362static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3363{
4d5f2c04 3364 struct runtime_instr_cb *riccb;
4e0b1ab7 3365 struct gs_cb *gscb;
4d5f2c04
CB
3366
3367 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4e0b1ab7 3368 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
b028ee3e
DH
3369 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3370 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3371 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3372 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3373 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3374 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
3375 /* some control register changes require a tlb flush */
3376 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
3377 }
3378 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 3379 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
3380 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3381 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3382 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3383 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3384 }
3385 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3386 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3387 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3388 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
3389 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3390 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e 3391 }
80cd8763
FZ
3392 /*
3393 * If userspace sets the riccb (e.g. after migration) to a valid state,
3394 * we should enable RI here instead of doing the lazy enablement.
3395 */
3396 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4d5f2c04 3397 test_kvm_facility(vcpu->kvm, 64) &&
bb59c2da 3398 riccb->v &&
0c9d8683 3399 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4d5f2c04 3400 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
0c9d8683 3401 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
80cd8763 3402 }
4e0b1ab7
FZ
3403 /*
3404 * If userspace sets the gscb (e.g. after migration) to non-zero,
3405 * we should enable GS here instead of doing the lazy enablement.
3406 */
3407 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3408 test_kvm_facility(vcpu->kvm, 133) &&
3409 gscb->gssm &&
3410 !vcpu->arch.gs_enabled) {
3411 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3412 vcpu->arch.sie_block->ecb |= ECB_GS;
3413 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3414 vcpu->arch.gs_enabled = 1;
80cd8763 3415 }
35b3fde6
CB
3416 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3417 test_kvm_facility(vcpu->kvm, 82)) {
3418 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3419 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3420 }
31d8b8d4
CB
3421 save_access_regs(vcpu->arch.host_acrs);
3422 restore_access_regs(vcpu->run->s.regs.acrs);
e1788bb9
CB
3423 /* save host (userspace) fprs/vrs */
3424 save_fpu_regs();
3425 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3426 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3427 if (MACHINE_HAS_VX)
3428 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3429 else
3430 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3431 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3432 if (test_fp_ctl(current->thread.fpu.fpc))
3433 /* User space provided an invalid FPC, let's clear it */
3434 current->thread.fpu.fpc = 0;
4e0b1ab7
FZ
3435 if (MACHINE_HAS_GS) {
3436 preempt_disable();
3437 __ctl_set_bit(2, 4);
3438 if (current->thread.gs_cb) {
3439 vcpu->arch.host_gscb = current->thread.gs_cb;
3440 save_gs_cb(vcpu->arch.host_gscb);
3441 }
3442 if (vcpu->arch.gs_enabled) {
3443 current->thread.gs_cb = (struct gs_cb *)
3444 &vcpu->run->s.regs.gscb;
3445 restore_gs_cb(current->thread.gs_cb);
3446 }
3447 preempt_enable();
3448 }
80cd8763 3449
b028ee3e
DH
3450 kvm_run->kvm_dirty_regs = 0;
3451}
3452
3453static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3454{
3455 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3456 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3457 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3458 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 3459 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
3460 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3461 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3462 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3463 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3464 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3465 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3466 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
35b3fde6 3467 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
31d8b8d4
CB
3468 save_access_regs(vcpu->run->s.regs.acrs);
3469 restore_access_regs(vcpu->arch.host_acrs);
e1788bb9
CB
3470 /* Save guest register state */
3471 save_fpu_regs();
3472 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3473 /* Restore will be done lazily at return */
3474 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3475 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4e0b1ab7
FZ
3476 if (MACHINE_HAS_GS) {
3477 __ctl_set_bit(2, 4);
3478 if (vcpu->arch.gs_enabled)
3479 save_gs_cb(current->thread.gs_cb);
3480 preempt_disable();
3481 current->thread.gs_cb = vcpu->arch.host_gscb;
3482 restore_gs_cb(vcpu->arch.host_gscb);
3483 preempt_enable();
3484 if (!vcpu->arch.host_gscb)
3485 __ctl_clear_bit(2, 4);
3486 vcpu->arch.host_gscb = NULL;
3487 }
e1788bb9 3488
b028ee3e
DH
3489}
3490
b0c632db
HC
3491int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3492{
8f2abe6a 3493 int rc;
b0c632db 3494
460df4c1
PB
3495 if (kvm_run->immediate_exit)
3496 return -EINTR;
3497
accb757d
CD
3498 vcpu_load(vcpu);
3499
27291e21
DH
3500 if (guestdbg_exit_pending(vcpu)) {
3501 kvm_s390_prepare_debug_exit(vcpu);
accb757d
CD
3502 rc = 0;
3503 goto out;
27291e21
DH
3504 }
3505
20b7035c 3506 kvm_sigset_activate(vcpu);
b0c632db 3507
6352e4d2
DH
3508 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3509 kvm_s390_vcpu_start(vcpu);
3510 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 3511 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2 3512 vcpu->vcpu_id);
accb757d
CD
3513 rc = -EINVAL;
3514 goto out;
6352e4d2 3515 }
b0c632db 3516
b028ee3e 3517 sync_regs(vcpu, kvm_run);
db0758b2 3518 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 3519
dab4079d 3520 might_fault();
a76ccff6 3521 rc = __vcpu_run(vcpu);
9ace903d 3522
b1d16c49
CE
3523 if (signal_pending(current) && !rc) {
3524 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 3525 rc = -EINTR;
b1d16c49 3526 }
8f2abe6a 3527
27291e21
DH
3528 if (guestdbg_exit_pending(vcpu) && !rc) {
3529 kvm_s390_prepare_debug_exit(vcpu);
3530 rc = 0;
3531 }
3532
8f2abe6a 3533 if (rc == -EREMOTE) {
71f116bf 3534 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
3535 rc = 0;
3536 }
b0c632db 3537
db0758b2 3538 disable_cpu_timer_accounting(vcpu);
b028ee3e 3539 store_regs(vcpu, kvm_run);
d7b0b5eb 3540
20b7035c 3541 kvm_sigset_deactivate(vcpu);
b0c632db 3542
b0c632db 3543 vcpu->stat.exit_userspace++;
accb757d
CD
3544out:
3545 vcpu_put(vcpu);
7e8e6ab4 3546 return rc;
b0c632db
HC
3547}
3548
b0c632db
HC
3549/*
3550 * store status at address
3551 * we use have two special cases:
3552 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3553 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3554 */
d0bce605 3555int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 3556{
092670cd 3557 unsigned char archmode = 1;
9abc2a08 3558 freg_t fprs[NUM_FPRS];
fda902cb 3559 unsigned int px;
4287f247 3560 u64 clkcomp, cputm;
d0bce605 3561 int rc;
b0c632db 3562
d9a3a09a 3563 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
3564 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3565 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 3566 return -EFAULT;
d9a3a09a 3567 gpa = 0;
d0bce605
HC
3568 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3569 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 3570 return -EFAULT;
d9a3a09a
MS
3571 gpa = px;
3572 } else
3573 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
3574
3575 /* manually convert vector registers if necessary */
3576 if (MACHINE_HAS_VX) {
9522b37f 3577 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08
DH
3578 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3579 fprs, 128);
3580 } else {
3581 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 3582 vcpu->run->s.regs.fprs, 128);
9abc2a08 3583 }
d9a3a09a 3584 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 3585 vcpu->run->s.regs.gprs, 128);
d9a3a09a 3586 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 3587 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 3588 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 3589 &px, 4);
d9a3a09a 3590 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 3591 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 3592 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 3593 &vcpu->arch.sie_block->todpr, 4);
4287f247 3594 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 3595 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 3596 &cputm, 8);
178bd789 3597 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 3598 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 3599 &clkcomp, 8);
d9a3a09a 3600 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 3601 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 3602 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
3603 &vcpu->arch.sie_block->gcr, 128);
3604 return rc ? -EFAULT : 0;
b0c632db
HC
3605}
3606
e879892c
TH
3607int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3608{
3609 /*
3610 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
31d8b8d4 3611 * switch in the run ioctl. Let's update our copies before we save
e879892c
TH
3612 * it into the save area
3613 */
d0164ee2 3614 save_fpu_regs();
9abc2a08 3615 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
3616 save_access_regs(vcpu->run->s.regs.acrs);
3617
3618 return kvm_s390_store_status_unloaded(vcpu, addr);
3619}
3620
8ad35755
DH
3621static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3622{
3623 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 3624 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
3625}
3626
3627static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3628{
3629 unsigned int i;
3630 struct kvm_vcpu *vcpu;
3631
3632 kvm_for_each_vcpu(i, vcpu, kvm) {
3633 __disable_ibs_on_vcpu(vcpu);
3634 }
3635}
3636
3637static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3638{
09a400e7
DH
3639 if (!sclp.has_ibs)
3640 return;
8ad35755 3641 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 3642 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
3643}
3644
6852d7b6
DH
3645void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3646{
8ad35755
DH
3647 int i, online_vcpus, started_vcpus = 0;
3648
3649 if (!is_vcpu_stopped(vcpu))
3650 return;
3651
6852d7b6 3652 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 3653 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 3654 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
3655 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3656
3657 for (i = 0; i < online_vcpus; i++) {
3658 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3659 started_vcpus++;
3660 }
3661
3662 if (started_vcpus == 0) {
3663 /* we're the only active VCPU -> speed it up */
3664 __enable_ibs_on_vcpu(vcpu);
3665 } else if (started_vcpus == 1) {
3666 /*
3667 * As we are starting a second VCPU, we have to disable
3668 * the IBS facility on all VCPUs to remove potentially
3669 * oustanding ENABLE requests.
3670 */
3671 __disable_ibs_on_all_vcpus(vcpu->kvm);
3672 }
3673
9daecfc6 3674 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED);
8ad35755
DH
3675 /*
3676 * Another VCPU might have used IBS while we were offline.
3677 * Let's play safe and flush the VCPU at startup.
3678 */
d3d692c8 3679 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 3680 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 3681 return;
6852d7b6
DH
3682}
3683
3684void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3685{
8ad35755
DH
3686 int i, online_vcpus, started_vcpus = 0;
3687 struct kvm_vcpu *started_vcpu = NULL;
3688
3689 if (is_vcpu_stopped(vcpu))
3690 return;
3691
6852d7b6 3692 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 3693 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 3694 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
3695 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3696
32f5ff63 3697 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 3698 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 3699
ef8f4f49 3700 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
8ad35755
DH
3701 __disable_ibs_on_vcpu(vcpu);
3702
3703 for (i = 0; i < online_vcpus; i++) {
3704 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3705 started_vcpus++;
3706 started_vcpu = vcpu->kvm->vcpus[i];
3707 }
3708 }
3709
3710 if (started_vcpus == 1) {
3711 /*
3712 * As we only have one VCPU left, we want to enable the
3713 * IBS facility for that VCPU to speed it up.
3714 */
3715 __enable_ibs_on_vcpu(started_vcpu);
3716 }
3717
433b9ee4 3718 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 3719 return;
6852d7b6
DH
3720}
3721
d6712df9
CH
3722static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
3723 struct kvm_enable_cap *cap)
3724{
3725 int r;
3726
3727 if (cap->flags)
3728 return -EINVAL;
3729
3730 switch (cap->cap) {
fa6b7fe9
CH
3731 case KVM_CAP_S390_CSS_SUPPORT:
3732 if (!vcpu->kvm->arch.css_support) {
3733 vcpu->kvm->arch.css_support = 1;
c92ea7b9 3734 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
3735 trace_kvm_s390_enable_css(vcpu->kvm);
3736 }
3737 r = 0;
3738 break;
d6712df9
CH
3739 default:
3740 r = -EINVAL;
3741 break;
3742 }
3743 return r;
3744}
3745
41408c28
TH
3746static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
3747 struct kvm_s390_mem_op *mop)
3748{
3749 void __user *uaddr = (void __user *)mop->buf;
3750 void *tmpbuf = NULL;
3751 int r, srcu_idx;
3752 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
3753 | KVM_S390_MEMOP_F_CHECK_ONLY;
3754
3755 if (mop->flags & ~supported_flags)
3756 return -EINVAL;
3757
3758 if (mop->size > MEM_OP_MAX_SIZE)
3759 return -E2BIG;
3760
3761 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
3762 tmpbuf = vmalloc(mop->size);
3763 if (!tmpbuf)
3764 return -ENOMEM;
3765 }
3766
3767 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3768
3769 switch (mop->op) {
3770 case KVM_S390_MEMOP_LOGICAL_READ:
3771 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
3772 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3773 mop->size, GACC_FETCH);
41408c28
TH
3774 break;
3775 }
3776 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3777 if (r == 0) {
3778 if (copy_to_user(uaddr, tmpbuf, mop->size))
3779 r = -EFAULT;
3780 }
3781 break;
3782 case KVM_S390_MEMOP_LOGICAL_WRITE:
3783 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
3784 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
3785 mop->size, GACC_STORE);
41408c28
TH
3786 break;
3787 }
3788 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
3789 r = -EFAULT;
3790 break;
3791 }
3792 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
3793 break;
3794 default:
3795 r = -EINVAL;
3796 }
3797
3798 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
3799
3800 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
3801 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3802
3803 vfree(tmpbuf);
3804 return r;
3805}
3806
5cb0944c
PB
3807long kvm_arch_vcpu_async_ioctl(struct file *filp,
3808 unsigned int ioctl, unsigned long arg)
b0c632db
HC
3809{
3810 struct kvm_vcpu *vcpu = filp->private_data;
3811 void __user *argp = (void __user *)arg;
3812
93736624 3813 switch (ioctl) {
47b43c52
JF
3814 case KVM_S390_IRQ: {
3815 struct kvm_s390_irq s390irq;
3816
47b43c52 3817 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
9b062471
CD
3818 return -EFAULT;
3819 return kvm_s390_inject_vcpu(vcpu, &s390irq);
47b43c52 3820 }
93736624 3821 case KVM_S390_INTERRUPT: {
ba5c1e9b 3822 struct kvm_s390_interrupt s390int;
383d0b05 3823 struct kvm_s390_irq s390irq;
ba5c1e9b
CO
3824
3825 if (copy_from_user(&s390int, argp, sizeof(s390int)))
9b062471 3826 return -EFAULT;
383d0b05
JF
3827 if (s390int_to_s390irq(&s390int, &s390irq))
3828 return -EINVAL;
9b062471 3829 return kvm_s390_inject_vcpu(vcpu, &s390irq);
ba5c1e9b 3830 }
9b062471 3831 }
5cb0944c
PB
3832 return -ENOIOCTLCMD;
3833}
3834
3835long kvm_arch_vcpu_ioctl(struct file *filp,
3836 unsigned int ioctl, unsigned long arg)
3837{
3838 struct kvm_vcpu *vcpu = filp->private_data;
3839 void __user *argp = (void __user *)arg;
3840 int idx;
3841 long r;
9b062471
CD
3842
3843 vcpu_load(vcpu);
3844
3845 switch (ioctl) {
b0c632db 3846 case KVM_S390_STORE_STATUS:
800c1065 3847 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 3848 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 3849 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 3850 break;
b0c632db
HC
3851 case KVM_S390_SET_INITIAL_PSW: {
3852 psw_t psw;
3853
bc923cc9 3854 r = -EFAULT;
b0c632db 3855 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
3856 break;
3857 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3858 break;
b0c632db
HC
3859 }
3860 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
3861 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3862 break;
14eebd91
CO
3863 case KVM_SET_ONE_REG:
3864 case KVM_GET_ONE_REG: {
3865 struct kvm_one_reg reg;
3866 r = -EFAULT;
3867 if (copy_from_user(&reg, argp, sizeof(reg)))
3868 break;
3869 if (ioctl == KVM_SET_ONE_REG)
3870 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3871 else
3872 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3873 break;
3874 }
27e0393f
CO
3875#ifdef CONFIG_KVM_S390_UCONTROL
3876 case KVM_S390_UCAS_MAP: {
3877 struct kvm_s390_ucas_mapping ucasmap;
3878
3879 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3880 r = -EFAULT;
3881 break;
3882 }
3883
3884 if (!kvm_is_ucontrol(vcpu->kvm)) {
3885 r = -EINVAL;
3886 break;
3887 }
3888
3889 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3890 ucasmap.vcpu_addr, ucasmap.length);
3891 break;
3892 }
3893 case KVM_S390_UCAS_UNMAP: {
3894 struct kvm_s390_ucas_mapping ucasmap;
3895
3896 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3897 r = -EFAULT;
3898 break;
3899 }
3900
3901 if (!kvm_is_ucontrol(vcpu->kvm)) {
3902 r = -EINVAL;
3903 break;
3904 }
3905
3906 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3907 ucasmap.length);
3908 break;
3909 }
3910#endif
ccc7910f 3911 case KVM_S390_VCPU_FAULT: {
527e30b4 3912 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
3913 break;
3914 }
d6712df9
CH
3915 case KVM_ENABLE_CAP:
3916 {
3917 struct kvm_enable_cap cap;
3918 r = -EFAULT;
3919 if (copy_from_user(&cap, argp, sizeof(cap)))
3920 break;
3921 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3922 break;
3923 }
41408c28
TH
3924 case KVM_S390_MEM_OP: {
3925 struct kvm_s390_mem_op mem_op;
3926
3927 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3928 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3929 else
3930 r = -EFAULT;
3931 break;
3932 }
816c7667
JF
3933 case KVM_S390_SET_IRQ_STATE: {
3934 struct kvm_s390_irq_state irq_state;
3935
3936 r = -EFAULT;
3937 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3938 break;
3939 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3940 irq_state.len == 0 ||
3941 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3942 r = -EINVAL;
3943 break;
3944 }
bb64da9a 3945 /* do not use irq_state.flags, it will break old QEMUs */
816c7667
JF
3946 r = kvm_s390_set_irq_state(vcpu,
3947 (void __user *) irq_state.buf,
3948 irq_state.len);
3949 break;
3950 }
3951 case KVM_S390_GET_IRQ_STATE: {
3952 struct kvm_s390_irq_state irq_state;
3953
3954 r = -EFAULT;
3955 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3956 break;
3957 if (irq_state.len == 0) {
3958 r = -EINVAL;
3959 break;
3960 }
bb64da9a 3961 /* do not use irq_state.flags, it will break old QEMUs */
816c7667
JF
3962 r = kvm_s390_get_irq_state(vcpu,
3963 (__u8 __user *) irq_state.buf,
3964 irq_state.len);
3965 break;
3966 }
b0c632db 3967 default:
3e6afcf1 3968 r = -ENOTTY;
b0c632db 3969 }
9b062471
CD
3970
3971 vcpu_put(vcpu);
bc923cc9 3972 return r;
b0c632db
HC
3973}
3974
5b1c1493
CO
3975int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3976{
3977#ifdef CONFIG_KVM_S390_UCONTROL
3978 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3979 && (kvm_is_ucontrol(vcpu->kvm))) {
3980 vmf->page = virt_to_page(vcpu->arch.sie_block);
3981 get_page(vmf->page);
3982 return 0;
3983 }
3984#endif
3985 return VM_FAULT_SIGBUS;
3986}
3987
5587027c
AK
3988int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3989 unsigned long npages)
db3fe4eb
TY
3990{
3991 return 0;
3992}
3993
b0c632db 3994/* Section: memory related */
f7784b8e
MT
3995int kvm_arch_prepare_memory_region(struct kvm *kvm,
3996 struct kvm_memory_slot *memslot,
09170a49 3997 const struct kvm_userspace_memory_region *mem,
7b6195a9 3998 enum kvm_mr_change change)
b0c632db 3999{
dd2887e7
NW
4000 /* A few sanity checks. We can have memory slots which have to be
4001 located/ended at a segment boundary (1MB). The memory in userland is
4002 ok to be fragmented into various different vmas. It is okay to mmap()
4003 and munmap() stuff in this slot after doing this call at any time */
b0c632db 4004
598841ca 4005 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
4006 return -EINVAL;
4007
598841ca 4008 if (mem->memory_size & 0xffffful)
b0c632db
HC
4009 return -EINVAL;
4010
a3a92c31
DD
4011 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4012 return -EINVAL;
4013
f7784b8e
MT
4014 return 0;
4015}
4016
4017void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 4018 const struct kvm_userspace_memory_region *mem,
8482644a 4019 const struct kvm_memory_slot *old,
f36f3f28 4020 const struct kvm_memory_slot *new,
8482644a 4021 enum kvm_mr_change change)
f7784b8e 4022{
f7850c92 4023 int rc;
f7784b8e 4024
2cef4deb
CB
4025 /* If the basics of the memslot do not change, we do not want
4026 * to update the gmap. Every update causes several unnecessary
4027 * segment translation exceptions. This is usually handled just
4028 * fine by the normal fault handler + gmap, but it will also
4029 * cause faults on the prefix page of running guest CPUs.
4030 */
4031 if (old->userspace_addr == mem->userspace_addr &&
4032 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
4033 old->npages * PAGE_SIZE == mem->memory_size)
4034 return;
598841ca
CO
4035
4036 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4037 mem->guest_phys_addr, mem->memory_size);
4038 if (rc)
ea2cdd27 4039 pr_warn("failed to commit memory region\n");
598841ca 4040 return;
b0c632db
HC
4041}
4042
60a37709
AY
4043static inline unsigned long nonhyp_mask(int i)
4044{
4045 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4046
4047 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4048}
4049
3491caf2
CB
4050void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4051{
4052 vcpu->valid_wakeup = false;
4053}
4054
b0c632db
HC
4055static int __init kvm_s390_init(void)
4056{
60a37709
AY
4057 int i;
4058
07197fd0
DH
4059 if (!sclp.has_sief2) {
4060 pr_info("SIE not available\n");
4061 return -ENODEV;
4062 }
4063
60a37709 4064 for (i = 0; i < 16; i++)
c3b9e3e1 4065 kvm_s390_fac_base[i] |=
60a37709
AY
4066 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4067
9d8d5786 4068 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
4069}
4070
4071static void __exit kvm_s390_exit(void)
4072{
4073 kvm_exit();
4074}
4075
4076module_init(kvm_s390_init);
4077module_exit(kvm_s390_exit);
566af940
CH
4078
4079/*
4080 * Enable autoloading of the kvm module.
4081 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4082 * since x86 takes a different approach.
4083 */
4084#include <linux/miscdevice.h>
4085MODULE_ALIAS_MISCDEV(KVM_MINOR);
4086MODULE_ALIAS("devname:kvm");