KVM: s390: vsie: speed up VCPU external calls
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
b2d73b2a 24#include <linux/mman.h>
b0c632db 25#include <linux/module.h>
a374e892 26#include <linux/random.h>
b0c632db 27#include <linux/slab.h>
ba5c1e9b 28#include <linux/timer.h>
41408c28 29#include <linux/vmalloc.h>
15c9705f 30#include <linux/bitmap.h>
cbb870c8 31#include <asm/asm-offsets.h>
b0c632db 32#include <asm/lowcore.h>
fdf03650 33#include <asm/etr.h>
b0c632db 34#include <asm/pgtable.h>
1e133ab2 35#include <asm/gmap.h>
f5daba1d 36#include <asm/nmi.h>
a0616cde 37#include <asm/switch_to.h>
6d3da241 38#include <asm/isc.h>
1526bf9c 39#include <asm/sclp.h>
0a763c78
DH
40#include <asm/cpacf.h>
41#include <asm/etr.h>
8f2abe6a 42#include "kvm-s390.h"
b0c632db
HC
43#include "gaccess.h"
44
ea2cdd27
DH
45#define KMSG_COMPONENT "kvm-s390"
46#undef pr_fmt
47#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
48
5786fffa
CH
49#define CREATE_TRACE_POINTS
50#include "trace.h"
ade38c31 51#include "trace-s390.h"
5786fffa 52
41408c28 53#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
54#define LOCAL_IRQS 32
55#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 57
b0c632db
HC
58#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
59
60struct kvm_stats_debugfs_item debugfs_entries[] = {
61 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 62 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
63 { "exit_validity", VCPU_STAT(exit_validity) },
64 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
65 { "exit_external_request", VCPU_STAT(exit_external_request) },
66 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
67 { "exit_instruction", VCPU_STAT(exit_instruction) },
68 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
69 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
a011eeb2 70 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
f7819512 71 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 72 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 73 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ce2e4f0b 74 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 75 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 76 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
77 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
78 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 79 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 80 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
81 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
82 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
83 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
84 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
85 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
86 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
87 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 88 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
89 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
90 { "instruction_spx", VCPU_STAT(instruction_spx) },
91 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
92 { "instruction_stap", VCPU_STAT(instruction_stap) },
93 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 94 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
95 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
96 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 97 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
98 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
99 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 100 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
95ca2cb5 101 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
a3508fbe 102 { "instruction_sie", VCPU_STAT(instruction_sie) },
5288fbf0 103 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 104 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 105 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 106 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
107 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
108 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 109 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
110 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
111 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 112 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
113 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
114 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
115 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
116 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
117 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
118 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 119 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 120 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 121 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
122 { "diagnose_258", VCPU_STAT(diagnose_258) },
123 { "diagnose_308", VCPU_STAT(diagnose_308) },
124 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
125 { NULL }
126};
127
9d8d5786 128/* upper facilities limit for kvm */
60a37709
AY
129unsigned long kvm_s390_fac_list_mask[16] = {
130 0xffe6000000000000UL,
131 0x005e000000000000UL,
9d8d5786 132};
b0c632db 133
9d8d5786 134unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 135{
9d8d5786
MM
136 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
137 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
138}
139
15c9705f
DH
140/* available cpu features supported by kvm */
141static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
0a763c78
DH
142/* available subfunctions indicated via query / "test bit" */
143static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
15c9705f 144
9d8d5786 145static struct gmap_notifier gmap_notifier;
a3508fbe 146static struct gmap_notifier vsie_gmap_notifier;
78f26131 147debug_info_t *kvm_s390_dbf;
9d8d5786 148
b0c632db 149/* Section: not file related */
13a34e06 150int kvm_arch_hardware_enable(void)
b0c632db
HC
151{
152 /* every s390 is virtualization enabled ;-) */
10474ae8 153 return 0;
b0c632db
HC
154}
155
414d3b07
MS
156static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
157 unsigned long end);
2c70fe44 158
fdf03650
FZ
159/*
160 * This callback is executed during stop_machine(). All CPUs are therefore
161 * temporarily stopped. In order not to change guest behavior, we have to
162 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
163 * so a CPU won't be stopped while calculating with the epoch.
164 */
165static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
166 void *v)
167{
168 struct kvm *kvm;
169 struct kvm_vcpu *vcpu;
170 int i;
171 unsigned long long *delta = v;
172
173 list_for_each_entry(kvm, &vm_list, vm_list) {
174 kvm->arch.epoch -= *delta;
175 kvm_for_each_vcpu(i, vcpu, kvm) {
176 vcpu->arch.sie_block->epoch -= *delta;
db0758b2
DH
177 if (vcpu->arch.cputm_enabled)
178 vcpu->arch.cputm_start += *delta;
fdf03650
FZ
179 }
180 }
181 return NOTIFY_OK;
182}
183
184static struct notifier_block kvm_clock_notifier = {
185 .notifier_call = kvm_clock_sync,
186};
187
b0c632db
HC
188int kvm_arch_hardware_setup(void)
189{
2c70fe44 190 gmap_notifier.notifier_call = kvm_gmap_notifier;
b2d73b2a 191 gmap_register_pte_notifier(&gmap_notifier);
a3508fbe
DH
192 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
193 gmap_register_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
194 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
195 &kvm_clock_notifier);
b0c632db
HC
196 return 0;
197}
198
199void kvm_arch_hardware_unsetup(void)
200{
b2d73b2a 201 gmap_unregister_pte_notifier(&gmap_notifier);
a3508fbe 202 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
203 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
b0c632db
HC
205}
206
22be5a13
DH
207static void allow_cpu_feat(unsigned long nr)
208{
209 set_bit_inv(nr, kvm_s390_available_cpu_feat);
210}
211
0a763c78
DH
212static inline int plo_test_bit(unsigned char nr)
213{
214 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
215 int cc = 3; /* subfunction not available */
216
217 asm volatile(
218 /* Parameter registers are ignored for "test bit" */
219 " plo 0,0,0,0(0)\n"
220 " ipm %0\n"
221 " srl %0,28\n"
222 : "=d" (cc)
223 : "d" (r0)
224 : "cc");
225 return cc == 0;
226}
227
22be5a13
DH
228static void kvm_s390_cpu_feat_init(void)
229{
0a763c78
DH
230 int i;
231
232 for (i = 0; i < 256; ++i) {
233 if (plo_test_bit(i))
234 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
235 }
236
237 if (test_facility(28)) /* TOD-clock steering */
238 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
239
240 if (test_facility(17)) { /* MSA */
241 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
242 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
243 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
244 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
245 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
246 }
247 if (test_facility(76)) /* MSA3 */
248 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
249 if (test_facility(77)) { /* MSA4 */
250 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
251 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
252 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
253 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
254 }
255 if (test_facility(57)) /* MSA5 */
256 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
257
22be5a13
DH
258 if (MACHINE_HAS_ESOP)
259 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
a3508fbe
DH
260 /*
261 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
262 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
263 */
264 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
265 !test_facility(3))
266 return;
267 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
19c439b5
DH
268 if (sclp.has_64bscao)
269 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
0615a326
DH
270 if (sclp.has_siif)
271 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
77d18f6d
DH
272 if (sclp.has_gpere)
273 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
a1b7b9b2
DH
274 if (sclp.has_gsls)
275 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
5630a8e8
DH
276 if (sclp.has_ib)
277 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
13ee3f67
DH
278 if (sclp.has_cei)
279 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
7fd7f39d
DH
280 if (sclp.has_ibs)
281 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
22be5a13
DH
282}
283
b0c632db
HC
284int kvm_arch_init(void *opaque)
285{
78f26131
CB
286 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
287 if (!kvm_s390_dbf)
288 return -ENOMEM;
289
290 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
291 debug_unregister(kvm_s390_dbf);
292 return -ENOMEM;
293 }
294
22be5a13
DH
295 kvm_s390_cpu_feat_init();
296
84877d93
CH
297 /* Register floating interrupt controller interface. */
298 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
299}
300
78f26131
CB
301void kvm_arch_exit(void)
302{
303 debug_unregister(kvm_s390_dbf);
304}
305
b0c632db
HC
306/* Section: device related */
307long kvm_arch_dev_ioctl(struct file *filp,
308 unsigned int ioctl, unsigned long arg)
309{
310 if (ioctl == KVM_S390_ENABLE_SIE)
311 return s390_enable_sie();
312 return -EINVAL;
313}
314
784aa3d7 315int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 316{
d7b0b5eb
CO
317 int r;
318
2bd0ac4e 319 switch (ext) {
d7b0b5eb 320 case KVM_CAP_S390_PSW:
b6cf8788 321 case KVM_CAP_S390_GMAP:
52e16b18 322 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
323#ifdef CONFIG_KVM_S390_UCONTROL
324 case KVM_CAP_S390_UCONTROL:
325#endif
3c038e6b 326 case KVM_CAP_ASYNC_PF:
60b413c9 327 case KVM_CAP_SYNC_REGS:
14eebd91 328 case KVM_CAP_ONE_REG:
d6712df9 329 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 330 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 331 case KVM_CAP_IOEVENTFD:
c05c4186 332 case KVM_CAP_DEVICE_CTRL:
d938dc55 333 case KVM_CAP_ENABLE_CAP_VM:
78599d90 334 case KVM_CAP_S390_IRQCHIP:
f2061656 335 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 336 case KVM_CAP_MP_STATE:
47b43c52 337 case KVM_CAP_S390_INJECT_IRQ:
2444b352 338 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 339 case KVM_CAP_S390_USER_STSI:
30ee2a98 340 case KVM_CAP_S390_SKEYS:
816c7667 341 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
342 r = 1;
343 break;
41408c28
TH
344 case KVM_CAP_S390_MEM_OP:
345 r = MEM_OP_MAX_SIZE;
346 break;
e726b1bd
CB
347 case KVM_CAP_NR_VCPUS:
348 case KVM_CAP_MAX_VCPUS:
76a6dd72
DH
349 r = KVM_S390_BSCA_CPU_SLOTS;
350 if (sclp.has_esca && sclp.has_64bscao)
351 r = KVM_S390_ESCA_CPU_SLOTS;
e726b1bd 352 break;
e1e2e605
NW
353 case KVM_CAP_NR_MEMSLOTS:
354 r = KVM_USER_MEM_SLOTS;
355 break;
1526bf9c 356 case KVM_CAP_S390_COW:
abf09bed 357 r = MACHINE_HAS_ESOP;
1526bf9c 358 break;
68c55750
EF
359 case KVM_CAP_S390_VECTOR_REGISTERS:
360 r = MACHINE_HAS_VX;
361 break;
c6e5f166
FZ
362 case KVM_CAP_S390_RI:
363 r = test_facility(64);
364 break;
2bd0ac4e 365 default:
d7b0b5eb 366 r = 0;
2bd0ac4e 367 }
d7b0b5eb 368 return r;
b0c632db
HC
369}
370
15f36ebd
JH
371static void kvm_s390_sync_dirty_log(struct kvm *kvm,
372 struct kvm_memory_slot *memslot)
373{
374 gfn_t cur_gfn, last_gfn;
375 unsigned long address;
376 struct gmap *gmap = kvm->arch.gmap;
377
15f36ebd
JH
378 /* Loop over all guest pages */
379 last_gfn = memslot->base_gfn + memslot->npages;
380 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
381 address = gfn_to_hva_memslot(memslot, cur_gfn);
382
1e133ab2 383 if (test_and_clear_guest_dirty(gmap->mm, address))
15f36ebd 384 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
385 if (fatal_signal_pending(current))
386 return;
70c88a00 387 cond_resched();
15f36ebd 388 }
15f36ebd
JH
389}
390
b0c632db 391/* Section: vm related */
a6e2f683
ED
392static void sca_del_vcpu(struct kvm_vcpu *vcpu);
393
b0c632db
HC
394/*
395 * Get (and clear) the dirty memory log for a memory slot.
396 */
397int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
398 struct kvm_dirty_log *log)
399{
15f36ebd
JH
400 int r;
401 unsigned long n;
9f6b8029 402 struct kvm_memslots *slots;
15f36ebd
JH
403 struct kvm_memory_slot *memslot;
404 int is_dirty = 0;
405
406 mutex_lock(&kvm->slots_lock);
407
408 r = -EINVAL;
409 if (log->slot >= KVM_USER_MEM_SLOTS)
410 goto out;
411
9f6b8029
PB
412 slots = kvm_memslots(kvm);
413 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
414 r = -ENOENT;
415 if (!memslot->dirty_bitmap)
416 goto out;
417
418 kvm_s390_sync_dirty_log(kvm, memslot);
419 r = kvm_get_dirty_log(kvm, log, &is_dirty);
420 if (r)
421 goto out;
422
423 /* Clear the dirty log */
424 if (is_dirty) {
425 n = kvm_dirty_bitmap_bytes(memslot);
426 memset(memslot->dirty_bitmap, 0, n);
427 }
428 r = 0;
429out:
430 mutex_unlock(&kvm->slots_lock);
431 return r;
b0c632db
HC
432}
433
d938dc55
CH
434static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
435{
436 int r;
437
438 if (cap->flags)
439 return -EINVAL;
440
441 switch (cap->cap) {
84223598 442 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 443 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
444 kvm->arch.use_irqchip = 1;
445 r = 0;
446 break;
2444b352 447 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 448 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
449 kvm->arch.user_sigp = 1;
450 r = 0;
451 break;
68c55750 452 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b 453 mutex_lock(&kvm->lock);
a03825bb 454 if (kvm->created_vcpus) {
5967c17b
DH
455 r = -EBUSY;
456 } else if (MACHINE_HAS_VX) {
c54f0d6a
DH
457 set_kvm_facility(kvm->arch.model.fac_mask, 129);
458 set_kvm_facility(kvm->arch.model.fac_list, 129);
18280d8b
MM
459 r = 0;
460 } else
461 r = -EINVAL;
5967c17b 462 mutex_unlock(&kvm->lock);
c92ea7b9
CB
463 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
464 r ? "(not available)" : "(success)");
68c55750 465 break;
c6e5f166
FZ
466 case KVM_CAP_S390_RI:
467 r = -EINVAL;
468 mutex_lock(&kvm->lock);
a03825bb 469 if (kvm->created_vcpus) {
c6e5f166
FZ
470 r = -EBUSY;
471 } else if (test_facility(64)) {
c54f0d6a
DH
472 set_kvm_facility(kvm->arch.model.fac_mask, 64);
473 set_kvm_facility(kvm->arch.model.fac_list, 64);
c6e5f166
FZ
474 r = 0;
475 }
476 mutex_unlock(&kvm->lock);
477 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
478 r ? "(not available)" : "(success)");
479 break;
e44fc8c9 480 case KVM_CAP_S390_USER_STSI:
c92ea7b9 481 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
482 kvm->arch.user_stsi = 1;
483 r = 0;
484 break;
d938dc55
CH
485 default:
486 r = -EINVAL;
487 break;
488 }
489 return r;
490}
491
8c0a7ce6
DD
492static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
493{
494 int ret;
495
496 switch (attr->attr) {
497 case KVM_S390_VM_MEM_LIMIT_SIZE:
498 ret = 0;
c92ea7b9 499 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
500 kvm->arch.mem_limit);
501 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
502 ret = -EFAULT;
503 break;
504 default:
505 ret = -ENXIO;
506 break;
507 }
508 return ret;
509}
510
511static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
512{
513 int ret;
514 unsigned int idx;
515 switch (attr->attr) {
516 case KVM_S390_VM_MEM_ENABLE_CMMA:
f9cbd9b0 517 ret = -ENXIO;
c24cc9c8 518 if (!sclp.has_cmma)
e6db1d61
DD
519 break;
520
4f718eab 521 ret = -EBUSY;
c92ea7b9 522 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab 523 mutex_lock(&kvm->lock);
a03825bb 524 if (!kvm->created_vcpus) {
4f718eab
DD
525 kvm->arch.use_cmma = 1;
526 ret = 0;
527 }
528 mutex_unlock(&kvm->lock);
529 break;
530 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
531 ret = -ENXIO;
532 if (!sclp.has_cmma)
533 break;
c3489155
DD
534 ret = -EINVAL;
535 if (!kvm->arch.use_cmma)
536 break;
537
c92ea7b9 538 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
539 mutex_lock(&kvm->lock);
540 idx = srcu_read_lock(&kvm->srcu);
a13cff31 541 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
542 srcu_read_unlock(&kvm->srcu, idx);
543 mutex_unlock(&kvm->lock);
544 ret = 0;
545 break;
8c0a7ce6
DD
546 case KVM_S390_VM_MEM_LIMIT_SIZE: {
547 unsigned long new_limit;
548
549 if (kvm_is_ucontrol(kvm))
550 return -EINVAL;
551
552 if (get_user(new_limit, (u64 __user *)attr->addr))
553 return -EFAULT;
554
a3a92c31
DD
555 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
556 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
557 return -E2BIG;
558
a3a92c31
DD
559 if (!new_limit)
560 return -EINVAL;
561
6ea427bb 562 /* gmap_create takes last usable address */
a3a92c31
DD
563 if (new_limit != KVM_S390_NO_MEM_LIMIT)
564 new_limit -= 1;
565
8c0a7ce6
DD
566 ret = -EBUSY;
567 mutex_lock(&kvm->lock);
a03825bb 568 if (!kvm->created_vcpus) {
6ea427bb
MS
569 /* gmap_create will round the limit up */
570 struct gmap *new = gmap_create(current->mm, new_limit);
8c0a7ce6
DD
571
572 if (!new) {
573 ret = -ENOMEM;
574 } else {
6ea427bb 575 gmap_remove(kvm->arch.gmap);
8c0a7ce6
DD
576 new->private = kvm;
577 kvm->arch.gmap = new;
578 ret = 0;
579 }
580 }
581 mutex_unlock(&kvm->lock);
a3a92c31
DD
582 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
583 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
584 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
585 break;
586 }
4f718eab
DD
587 default:
588 ret = -ENXIO;
589 break;
590 }
591 return ret;
592}
593
a374e892
TK
594static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
595
596static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
597{
598 struct kvm_vcpu *vcpu;
599 int i;
600
9d8d5786 601 if (!test_kvm_facility(kvm, 76))
a374e892
TK
602 return -EINVAL;
603
604 mutex_lock(&kvm->lock);
605 switch (attr->attr) {
606 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
607 get_random_bytes(
608 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
609 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
610 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 611 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
612 break;
613 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
614 get_random_bytes(
615 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
616 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
617 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 618 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
619 break;
620 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
621 kvm->arch.crypto.aes_kw = 0;
622 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
623 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 624 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
625 break;
626 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
627 kvm->arch.crypto.dea_kw = 0;
628 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
629 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 630 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
631 break;
632 default:
633 mutex_unlock(&kvm->lock);
634 return -ENXIO;
635 }
636
637 kvm_for_each_vcpu(i, vcpu, kvm) {
638 kvm_s390_vcpu_crypto_setup(vcpu);
639 exit_sie(vcpu);
640 }
641 mutex_unlock(&kvm->lock);
642 return 0;
643}
644
72f25020
JH
645static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
646{
647 u8 gtod_high;
648
649 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
650 sizeof(gtod_high)))
651 return -EFAULT;
652
653 if (gtod_high != 0)
654 return -EINVAL;
58c383c6 655 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
656
657 return 0;
658}
659
660static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
661{
5a3d883a 662 u64 gtod;
72f25020
JH
663
664 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
665 return -EFAULT;
666
25ed1675 667 kvm_s390_set_tod_clock(kvm, gtod);
58c383c6 668 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
72f25020
JH
669 return 0;
670}
671
672static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
673{
674 int ret;
675
676 if (attr->flags)
677 return -EINVAL;
678
679 switch (attr->attr) {
680 case KVM_S390_VM_TOD_HIGH:
681 ret = kvm_s390_set_tod_high(kvm, attr);
682 break;
683 case KVM_S390_VM_TOD_LOW:
684 ret = kvm_s390_set_tod_low(kvm, attr);
685 break;
686 default:
687 ret = -ENXIO;
688 break;
689 }
690 return ret;
691}
692
693static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
694{
695 u8 gtod_high = 0;
696
697 if (copy_to_user((void __user *)attr->addr, &gtod_high,
698 sizeof(gtod_high)))
699 return -EFAULT;
58c383c6 700 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
701
702 return 0;
703}
704
705static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
706{
5a3d883a 707 u64 gtod;
72f25020 708
60417fcc 709 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
710 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
711 return -EFAULT;
58c383c6 712 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
713
714 return 0;
715}
716
717static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
718{
719 int ret;
720
721 if (attr->flags)
722 return -EINVAL;
723
724 switch (attr->attr) {
725 case KVM_S390_VM_TOD_HIGH:
726 ret = kvm_s390_get_tod_high(kvm, attr);
727 break;
728 case KVM_S390_VM_TOD_LOW:
729 ret = kvm_s390_get_tod_low(kvm, attr);
730 break;
731 default:
732 ret = -ENXIO;
733 break;
734 }
735 return ret;
736}
737
658b6eda
MM
738static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
739{
740 struct kvm_s390_vm_cpu_processor *proc;
053dd230 741 u16 lowest_ibc, unblocked_ibc;
658b6eda
MM
742 int ret = 0;
743
744 mutex_lock(&kvm->lock);
a03825bb 745 if (kvm->created_vcpus) {
658b6eda
MM
746 ret = -EBUSY;
747 goto out;
748 }
749 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
750 if (!proc) {
751 ret = -ENOMEM;
752 goto out;
753 }
754 if (!copy_from_user(proc, (void __user *)attr->addr,
755 sizeof(*proc))) {
9bb0ec09 756 kvm->arch.model.cpuid = proc->cpuid;
053dd230
DH
757 lowest_ibc = sclp.ibc >> 16 & 0xfff;
758 unblocked_ibc = sclp.ibc & 0xfff;
759 if (lowest_ibc) {
760 if (proc->ibc > unblocked_ibc)
761 kvm->arch.model.ibc = unblocked_ibc;
762 else if (proc->ibc < lowest_ibc)
763 kvm->arch.model.ibc = lowest_ibc;
764 else
765 kvm->arch.model.ibc = proc->ibc;
766 }
c54f0d6a 767 memcpy(kvm->arch.model.fac_list, proc->fac_list,
658b6eda
MM
768 S390_ARCH_FAC_LIST_SIZE_BYTE);
769 } else
770 ret = -EFAULT;
771 kfree(proc);
772out:
773 mutex_unlock(&kvm->lock);
774 return ret;
775}
776
15c9705f
DH
777static int kvm_s390_set_processor_feat(struct kvm *kvm,
778 struct kvm_device_attr *attr)
779{
780 struct kvm_s390_vm_cpu_feat data;
781 int ret = -EBUSY;
782
783 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
784 return -EFAULT;
785 if (!bitmap_subset((unsigned long *) data.feat,
786 kvm_s390_available_cpu_feat,
787 KVM_S390_VM_CPU_FEAT_NR_BITS))
788 return -EINVAL;
789
790 mutex_lock(&kvm->lock);
791 if (!atomic_read(&kvm->online_vcpus)) {
792 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
793 KVM_S390_VM_CPU_FEAT_NR_BITS);
794 ret = 0;
795 }
796 mutex_unlock(&kvm->lock);
797 return ret;
798}
799
0a763c78
DH
800static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
801 struct kvm_device_attr *attr)
802{
803 /*
804 * Once supported by kernel + hw, we have to store the subfunctions
805 * in kvm->arch and remember that user space configured them.
806 */
807 return -ENXIO;
808}
809
658b6eda
MM
810static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
811{
812 int ret = -ENXIO;
813
814 switch (attr->attr) {
815 case KVM_S390_VM_CPU_PROCESSOR:
816 ret = kvm_s390_set_processor(kvm, attr);
817 break;
15c9705f
DH
818 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
819 ret = kvm_s390_set_processor_feat(kvm, attr);
820 break;
0a763c78
DH
821 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
822 ret = kvm_s390_set_processor_subfunc(kvm, attr);
823 break;
658b6eda
MM
824 }
825 return ret;
826}
827
828static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
829{
830 struct kvm_s390_vm_cpu_processor *proc;
831 int ret = 0;
832
833 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
834 if (!proc) {
835 ret = -ENOMEM;
836 goto out;
837 }
9bb0ec09 838 proc->cpuid = kvm->arch.model.cpuid;
658b6eda 839 proc->ibc = kvm->arch.model.ibc;
c54f0d6a
DH
840 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
841 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
842 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
843 ret = -EFAULT;
844 kfree(proc);
845out:
846 return ret;
847}
848
849static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
850{
851 struct kvm_s390_vm_cpu_machine *mach;
852 int ret = 0;
853
854 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
855 if (!mach) {
856 ret = -ENOMEM;
857 goto out;
858 }
859 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 860 mach->ibc = sclp.ibc;
c54f0d6a 861 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
981467c9 862 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 863 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 864 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
865 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
866 ret = -EFAULT;
867 kfree(mach);
868out:
869 return ret;
870}
871
15c9705f
DH
872static int kvm_s390_get_processor_feat(struct kvm *kvm,
873 struct kvm_device_attr *attr)
874{
875 struct kvm_s390_vm_cpu_feat data;
876
877 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
878 KVM_S390_VM_CPU_FEAT_NR_BITS);
879 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
880 return -EFAULT;
881 return 0;
882}
883
884static int kvm_s390_get_machine_feat(struct kvm *kvm,
885 struct kvm_device_attr *attr)
886{
887 struct kvm_s390_vm_cpu_feat data;
888
889 bitmap_copy((unsigned long *) data.feat,
890 kvm_s390_available_cpu_feat,
891 KVM_S390_VM_CPU_FEAT_NR_BITS);
892 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
893 return -EFAULT;
894 return 0;
895}
896
0a763c78
DH
897static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
898 struct kvm_device_attr *attr)
899{
900 /*
901 * Once we can actually configure subfunctions (kernel + hw support),
902 * we have to check if they were already set by user space, if so copy
903 * them from kvm->arch.
904 */
905 return -ENXIO;
906}
907
908static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
909 struct kvm_device_attr *attr)
910{
911 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
912 sizeof(struct kvm_s390_vm_cpu_subfunc)))
913 return -EFAULT;
914 return 0;
915}
658b6eda
MM
916static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
917{
918 int ret = -ENXIO;
919
920 switch (attr->attr) {
921 case KVM_S390_VM_CPU_PROCESSOR:
922 ret = kvm_s390_get_processor(kvm, attr);
923 break;
924 case KVM_S390_VM_CPU_MACHINE:
925 ret = kvm_s390_get_machine(kvm, attr);
926 break;
15c9705f
DH
927 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
928 ret = kvm_s390_get_processor_feat(kvm, attr);
929 break;
930 case KVM_S390_VM_CPU_MACHINE_FEAT:
931 ret = kvm_s390_get_machine_feat(kvm, attr);
932 break;
0a763c78
DH
933 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
934 ret = kvm_s390_get_processor_subfunc(kvm, attr);
935 break;
936 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
937 ret = kvm_s390_get_machine_subfunc(kvm, attr);
938 break;
658b6eda
MM
939 }
940 return ret;
941}
942
f2061656
DD
943static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
944{
945 int ret;
946
947 switch (attr->group) {
4f718eab 948 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 949 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 950 break;
72f25020
JH
951 case KVM_S390_VM_TOD:
952 ret = kvm_s390_set_tod(kvm, attr);
953 break;
658b6eda
MM
954 case KVM_S390_VM_CPU_MODEL:
955 ret = kvm_s390_set_cpu_model(kvm, attr);
956 break;
a374e892
TK
957 case KVM_S390_VM_CRYPTO:
958 ret = kvm_s390_vm_set_crypto(kvm, attr);
959 break;
f2061656
DD
960 default:
961 ret = -ENXIO;
962 break;
963 }
964
965 return ret;
966}
967
968static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
969{
8c0a7ce6
DD
970 int ret;
971
972 switch (attr->group) {
973 case KVM_S390_VM_MEM_CTRL:
974 ret = kvm_s390_get_mem_control(kvm, attr);
975 break;
72f25020
JH
976 case KVM_S390_VM_TOD:
977 ret = kvm_s390_get_tod(kvm, attr);
978 break;
658b6eda
MM
979 case KVM_S390_VM_CPU_MODEL:
980 ret = kvm_s390_get_cpu_model(kvm, attr);
981 break;
8c0a7ce6
DD
982 default:
983 ret = -ENXIO;
984 break;
985 }
986
987 return ret;
f2061656
DD
988}
989
990static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
991{
992 int ret;
993
994 switch (attr->group) {
4f718eab
DD
995 case KVM_S390_VM_MEM_CTRL:
996 switch (attr->attr) {
997 case KVM_S390_VM_MEM_ENABLE_CMMA:
998 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
999 ret = sclp.has_cmma ? 0 : -ENXIO;
1000 break;
8c0a7ce6 1001 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
1002 ret = 0;
1003 break;
1004 default:
1005 ret = -ENXIO;
1006 break;
1007 }
1008 break;
72f25020
JH
1009 case KVM_S390_VM_TOD:
1010 switch (attr->attr) {
1011 case KVM_S390_VM_TOD_LOW:
1012 case KVM_S390_VM_TOD_HIGH:
1013 ret = 0;
1014 break;
1015 default:
1016 ret = -ENXIO;
1017 break;
1018 }
1019 break;
658b6eda
MM
1020 case KVM_S390_VM_CPU_MODEL:
1021 switch (attr->attr) {
1022 case KVM_S390_VM_CPU_PROCESSOR:
1023 case KVM_S390_VM_CPU_MACHINE:
15c9705f
DH
1024 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1025 case KVM_S390_VM_CPU_MACHINE_FEAT:
0a763c78 1026 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
658b6eda
MM
1027 ret = 0;
1028 break;
0a763c78
DH
1029 /* configuring subfunctions is not supported yet */
1030 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
658b6eda
MM
1031 default:
1032 ret = -ENXIO;
1033 break;
1034 }
1035 break;
a374e892
TK
1036 case KVM_S390_VM_CRYPTO:
1037 switch (attr->attr) {
1038 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1039 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1040 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1041 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1042 ret = 0;
1043 break;
1044 default:
1045 ret = -ENXIO;
1046 break;
1047 }
1048 break;
f2061656
DD
1049 default:
1050 ret = -ENXIO;
1051 break;
1052 }
1053
1054 return ret;
1055}
1056
30ee2a98
JH
1057static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1058{
1059 uint8_t *keys;
1060 uint64_t hva;
30ee2a98
JH
1061 int i, r = 0;
1062
1063 if (args->flags != 0)
1064 return -EINVAL;
1065
1066 /* Is this guest using storage keys? */
1067 if (!mm_use_skey(current->mm))
1068 return KVM_S390_GET_SKEYS_NONE;
1069
1070 /* Enforce sane limit on memory allocation */
1071 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1072 return -EINVAL;
1073
1074 keys = kmalloc_array(args->count, sizeof(uint8_t),
1075 GFP_KERNEL | __GFP_NOWARN);
1076 if (!keys)
1077 keys = vmalloc(sizeof(uint8_t) * args->count);
1078 if (!keys)
1079 return -ENOMEM;
1080
d3ed1cee 1081 down_read(&current->mm->mmap_sem);
30ee2a98
JH
1082 for (i = 0; i < args->count; i++) {
1083 hva = gfn_to_hva(kvm, args->start_gfn + i);
1084 if (kvm_is_error_hva(hva)) {
1085 r = -EFAULT;
d3ed1cee 1086 break;
30ee2a98
JH
1087 }
1088
154c8c19
DH
1089 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1090 if (r)
d3ed1cee 1091 break;
30ee2a98 1092 }
d3ed1cee
MS
1093 up_read(&current->mm->mmap_sem);
1094
1095 if (!r) {
1096 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1097 sizeof(uint8_t) * args->count);
1098 if (r)
1099 r = -EFAULT;
1100 }
30ee2a98 1101
30ee2a98
JH
1102 kvfree(keys);
1103 return r;
1104}
1105
1106static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1107{
1108 uint8_t *keys;
1109 uint64_t hva;
1110 int i, r = 0;
1111
1112 if (args->flags != 0)
1113 return -EINVAL;
1114
1115 /* Enforce sane limit on memory allocation */
1116 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1117 return -EINVAL;
1118
1119 keys = kmalloc_array(args->count, sizeof(uint8_t),
1120 GFP_KERNEL | __GFP_NOWARN);
1121 if (!keys)
1122 keys = vmalloc(sizeof(uint8_t) * args->count);
1123 if (!keys)
1124 return -ENOMEM;
1125
1126 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1127 sizeof(uint8_t) * args->count);
1128 if (r) {
1129 r = -EFAULT;
1130 goto out;
1131 }
1132
1133 /* Enable storage key handling for the guest */
14d4a425
DD
1134 r = s390_enable_skey();
1135 if (r)
1136 goto out;
30ee2a98 1137
d3ed1cee 1138 down_read(&current->mm->mmap_sem);
30ee2a98
JH
1139 for (i = 0; i < args->count; i++) {
1140 hva = gfn_to_hva(kvm, args->start_gfn + i);
1141 if (kvm_is_error_hva(hva)) {
1142 r = -EFAULT;
d3ed1cee 1143 break;
30ee2a98
JH
1144 }
1145
1146 /* Lowest order bit is reserved */
1147 if (keys[i] & 0x01) {
1148 r = -EINVAL;
d3ed1cee 1149 break;
30ee2a98
JH
1150 }
1151
fe69eabf 1152 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
30ee2a98 1153 if (r)
d3ed1cee 1154 break;
30ee2a98 1155 }
d3ed1cee 1156 up_read(&current->mm->mmap_sem);
30ee2a98
JH
1157out:
1158 kvfree(keys);
1159 return r;
1160}
1161
b0c632db
HC
1162long kvm_arch_vm_ioctl(struct file *filp,
1163 unsigned int ioctl, unsigned long arg)
1164{
1165 struct kvm *kvm = filp->private_data;
1166 void __user *argp = (void __user *)arg;
f2061656 1167 struct kvm_device_attr attr;
b0c632db
HC
1168 int r;
1169
1170 switch (ioctl) {
ba5c1e9b
CO
1171 case KVM_S390_INTERRUPT: {
1172 struct kvm_s390_interrupt s390int;
1173
1174 r = -EFAULT;
1175 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1176 break;
1177 r = kvm_s390_inject_vm(kvm, &s390int);
1178 break;
1179 }
d938dc55
CH
1180 case KVM_ENABLE_CAP: {
1181 struct kvm_enable_cap cap;
1182 r = -EFAULT;
1183 if (copy_from_user(&cap, argp, sizeof(cap)))
1184 break;
1185 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1186 break;
1187 }
84223598
CH
1188 case KVM_CREATE_IRQCHIP: {
1189 struct kvm_irq_routing_entry routing;
1190
1191 r = -EINVAL;
1192 if (kvm->arch.use_irqchip) {
1193 /* Set up dummy routing. */
1194 memset(&routing, 0, sizeof(routing));
152b2839 1195 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
1196 }
1197 break;
1198 }
f2061656
DD
1199 case KVM_SET_DEVICE_ATTR: {
1200 r = -EFAULT;
1201 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1202 break;
1203 r = kvm_s390_vm_set_attr(kvm, &attr);
1204 break;
1205 }
1206 case KVM_GET_DEVICE_ATTR: {
1207 r = -EFAULT;
1208 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1209 break;
1210 r = kvm_s390_vm_get_attr(kvm, &attr);
1211 break;
1212 }
1213 case KVM_HAS_DEVICE_ATTR: {
1214 r = -EFAULT;
1215 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1216 break;
1217 r = kvm_s390_vm_has_attr(kvm, &attr);
1218 break;
1219 }
30ee2a98
JH
1220 case KVM_S390_GET_SKEYS: {
1221 struct kvm_s390_skeys args;
1222
1223 r = -EFAULT;
1224 if (copy_from_user(&args, argp,
1225 sizeof(struct kvm_s390_skeys)))
1226 break;
1227 r = kvm_s390_get_skeys(kvm, &args);
1228 break;
1229 }
1230 case KVM_S390_SET_SKEYS: {
1231 struct kvm_s390_skeys args;
1232
1233 r = -EFAULT;
1234 if (copy_from_user(&args, argp,
1235 sizeof(struct kvm_s390_skeys)))
1236 break;
1237 r = kvm_s390_set_skeys(kvm, &args);
1238 break;
1239 }
b0c632db 1240 default:
367e1319 1241 r = -ENOTTY;
b0c632db
HC
1242 }
1243
1244 return r;
1245}
1246
45c9b47c
TK
1247static int kvm_s390_query_ap_config(u8 *config)
1248{
1249 u32 fcn_code = 0x04000000UL;
86044c8c 1250 u32 cc = 0;
45c9b47c 1251
86044c8c 1252 memset(config, 0, 128);
45c9b47c
TK
1253 asm volatile(
1254 "lgr 0,%1\n"
1255 "lgr 2,%2\n"
1256 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1257 "0: ipm %0\n"
45c9b47c 1258 "srl %0,28\n"
86044c8c
CB
1259 "1:\n"
1260 EX_TABLE(0b, 1b)
1261 : "+r" (cc)
45c9b47c
TK
1262 : "r" (fcn_code), "r" (config)
1263 : "cc", "0", "2", "memory"
1264 );
1265
1266 return cc;
1267}
1268
1269static int kvm_s390_apxa_installed(void)
1270{
1271 u8 config[128];
1272 int cc;
1273
a6aacc3f 1274 if (test_facility(12)) {
45c9b47c
TK
1275 cc = kvm_s390_query_ap_config(config);
1276
1277 if (cc)
1278 pr_err("PQAP(QCI) failed with cc=%d", cc);
1279 else
1280 return config[0] & 0x40;
1281 }
1282
1283 return 0;
1284}
1285
1286static void kvm_s390_set_crycb_format(struct kvm *kvm)
1287{
1288 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1289
1290 if (kvm_s390_apxa_installed())
1291 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1292 else
1293 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1294}
1295
9bb0ec09 1296static u64 kvm_s390_get_initial_cpuid(void)
9d8d5786 1297{
9bb0ec09
DH
1298 struct cpuid cpuid;
1299
1300 get_cpu_id(&cpuid);
1301 cpuid.version = 0xff;
1302 return *((u64 *) &cpuid);
9d8d5786
MM
1303}
1304
c54f0d6a 1305static void kvm_s390_crypto_init(struct kvm *kvm)
5102ee87 1306{
9d8d5786 1307 if (!test_kvm_facility(kvm, 76))
c54f0d6a 1308 return;
5102ee87 1309
c54f0d6a 1310 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
45c9b47c 1311 kvm_s390_set_crycb_format(kvm);
5102ee87 1312
ed6f76b4
TK
1313 /* Enable AES/DEA protected key functions by default */
1314 kvm->arch.crypto.aes_kw = 1;
1315 kvm->arch.crypto.dea_kw = 1;
1316 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1317 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1318 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1319 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
5102ee87
TK
1320}
1321
7d43bafc
ED
1322static void sca_dispose(struct kvm *kvm)
1323{
1324 if (kvm->arch.use_esca)
5e044315 1325 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
1326 else
1327 free_page((unsigned long)(kvm->arch.sca));
1328 kvm->arch.sca = NULL;
1329}
1330
e08b9637 1331int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1332{
76a6dd72 1333 gfp_t alloc_flags = GFP_KERNEL;
9d8d5786 1334 int i, rc;
b0c632db 1335 char debug_name[16];
f6c137ff 1336 static unsigned long sca_offset;
b0c632db 1337
e08b9637
CO
1338 rc = -EINVAL;
1339#ifdef CONFIG_KVM_S390_UCONTROL
1340 if (type & ~KVM_VM_S390_UCONTROL)
1341 goto out_err;
1342 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1343 goto out_err;
1344#else
1345 if (type)
1346 goto out_err;
1347#endif
1348
b0c632db
HC
1349 rc = s390_enable_sie();
1350 if (rc)
d89f5eff 1351 goto out_err;
b0c632db 1352
b290411a
CO
1353 rc = -ENOMEM;
1354
7d0a5e62
JF
1355 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1356
7d43bafc 1357 kvm->arch.use_esca = 0; /* start with basic SCA */
76a6dd72
DH
1358 if (!sclp.has_64bscao)
1359 alloc_flags |= GFP_DMA;
5e044315 1360 rwlock_init(&kvm->arch.sca_lock);
76a6dd72 1361 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
b0c632db 1362 if (!kvm->arch.sca)
d89f5eff 1363 goto out_err;
f6c137ff 1364 spin_lock(&kvm_lock);
c5c2c393 1365 sca_offset += 16;
bc784cce 1366 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 1367 sca_offset = 0;
bc784cce
ED
1368 kvm->arch.sca = (struct bsca_block *)
1369 ((char *) kvm->arch.sca + sca_offset);
f6c137ff 1370 spin_unlock(&kvm_lock);
b0c632db
HC
1371
1372 sprintf(debug_name, "kvm-%u", current->pid);
1373
1cb9cf72 1374 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1375 if (!kvm->arch.dbf)
40f5b735 1376 goto out_err;
b0c632db 1377
c54f0d6a
DH
1378 kvm->arch.sie_page2 =
1379 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1380 if (!kvm->arch.sie_page2)
40f5b735 1381 goto out_err;
9d8d5786 1382
fb5bf93f 1383 /* Populate the facility mask initially. */
c54f0d6a 1384 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
94422ee8 1385 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1386 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1387 if (i < kvm_s390_fac_list_mask_size())
c54f0d6a 1388 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1389 else
c54f0d6a 1390 kvm->arch.model.fac_mask[i] = 0UL;
9d8d5786
MM
1391 }
1392
981467c9 1393 /* Populate the facility list initially. */
c54f0d6a
DH
1394 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1395 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
981467c9
MM
1396 S390_ARCH_FAC_LIST_SIZE_BYTE);
1397
95ca2cb5
JF
1398 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1399 set_kvm_facility(kvm->arch.model.fac_list, 74);
1400
9bb0ec09 1401 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
37c5f6c8 1402 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1403
c54f0d6a 1404 kvm_s390_crypto_init(kvm);
5102ee87 1405
ba5c1e9b 1406 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1407 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1408 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1409 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1410 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1411
b0c632db 1412 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1413 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1414
e08b9637
CO
1415 if (type & KVM_VM_S390_UCONTROL) {
1416 kvm->arch.gmap = NULL;
a3a92c31 1417 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 1418 } else {
32e6b236
GH
1419 if (sclp.hamax == U64_MAX)
1420 kvm->arch.mem_limit = TASK_MAX_SIZE;
1421 else
1422 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1423 sclp.hamax + 1);
6ea427bb 1424 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
e08b9637 1425 if (!kvm->arch.gmap)
40f5b735 1426 goto out_err;
2c70fe44 1427 kvm->arch.gmap->private = kvm;
24eb3a82 1428 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1429 }
fa6b7fe9
CH
1430
1431 kvm->arch.css_support = 0;
84223598 1432 kvm->arch.use_irqchip = 0;
72f25020 1433 kvm->arch.epoch = 0;
fa6b7fe9 1434
8ad35755 1435 spin_lock_init(&kvm->arch.start_stop_lock);
a3508fbe 1436 kvm_s390_vsie_init(kvm);
8335713a 1437 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 1438
d89f5eff 1439 return 0;
40f5b735 1440out_err:
c54f0d6a 1441 free_page((unsigned long)kvm->arch.sie_page2);
598841ca 1442 debug_unregister(kvm->arch.dbf);
7d43bafc 1443 sca_dispose(kvm);
78f26131 1444 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1445 return rc;
b0c632db
HC
1446}
1447
d329c035
CB
1448void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1449{
1450 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1451 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1452 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1453 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 1454 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 1455 sca_del_vcpu(vcpu);
27e0393f
CO
1456
1457 if (kvm_is_ucontrol(vcpu->kvm))
6ea427bb 1458 gmap_remove(vcpu->arch.gmap);
27e0393f 1459
e6db1d61 1460 if (vcpu->kvm->arch.use_cmma)
b31605c1 1461 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1462 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1463
6692cef3 1464 kvm_vcpu_uninit(vcpu);
b110feaf 1465 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1466}
1467
1468static void kvm_free_vcpus(struct kvm *kvm)
1469{
1470 unsigned int i;
988a2cae 1471 struct kvm_vcpu *vcpu;
d329c035 1472
988a2cae
GN
1473 kvm_for_each_vcpu(i, vcpu, kvm)
1474 kvm_arch_vcpu_destroy(vcpu);
1475
1476 mutex_lock(&kvm->lock);
1477 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1478 kvm->vcpus[i] = NULL;
1479
1480 atomic_set(&kvm->online_vcpus, 0);
1481 mutex_unlock(&kvm->lock);
d329c035
CB
1482}
1483
b0c632db
HC
1484void kvm_arch_destroy_vm(struct kvm *kvm)
1485{
d329c035 1486 kvm_free_vcpus(kvm);
7d43bafc 1487 sca_dispose(kvm);
d329c035 1488 debug_unregister(kvm->arch.dbf);
c54f0d6a 1489 free_page((unsigned long)kvm->arch.sie_page2);
27e0393f 1490 if (!kvm_is_ucontrol(kvm))
6ea427bb 1491 gmap_remove(kvm->arch.gmap);
841b91c5 1492 kvm_s390_destroy_adapters(kvm);
67335e63 1493 kvm_s390_clear_float_irqs(kvm);
a3508fbe 1494 kvm_s390_vsie_destroy(kvm);
8335713a 1495 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
1496}
1497
1498/* Section: vcpu related */
dafd032a
DD
1499static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1500{
6ea427bb 1501 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
dafd032a
DD
1502 if (!vcpu->arch.gmap)
1503 return -ENOMEM;
1504 vcpu->arch.gmap->private = vcpu->kvm;
1505
1506 return 0;
1507}
1508
a6e2f683
ED
1509static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1510{
5e044315 1511 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
1512 if (vcpu->kvm->arch.use_esca) {
1513 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1514
7d43bafc 1515 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 1516 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
1517 } else {
1518 struct bsca_block *sca = vcpu->kvm->arch.sca;
1519
1520 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 1521 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 1522 }
5e044315 1523 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
1524}
1525
eaa78f34 1526static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 1527{
eaa78f34
DH
1528 read_lock(&vcpu->kvm->arch.sca_lock);
1529 if (vcpu->kvm->arch.use_esca) {
1530 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 1531
eaa78f34 1532 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1533 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1534 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
25508824 1535 vcpu->arch.sie_block->ecb2 |= 0x04U;
eaa78f34 1536 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 1537 } else {
eaa78f34 1538 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1539
eaa78f34 1540 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1541 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1542 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 1543 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 1544 }
eaa78f34 1545 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
1546}
1547
1548/* Basic SCA to Extended SCA data copy routines */
1549static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1550{
1551 d->sda = s->sda;
1552 d->sigp_ctrl.c = s->sigp_ctrl.c;
1553 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1554}
1555
1556static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1557{
1558 int i;
1559
1560 d->ipte_control = s->ipte_control;
1561 d->mcn[0] = s->mcn;
1562 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1563 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1564}
1565
1566static int sca_switch_to_extended(struct kvm *kvm)
1567{
1568 struct bsca_block *old_sca = kvm->arch.sca;
1569 struct esca_block *new_sca;
1570 struct kvm_vcpu *vcpu;
1571 unsigned int vcpu_idx;
1572 u32 scaol, scaoh;
1573
1574 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1575 if (!new_sca)
1576 return -ENOMEM;
1577
1578 scaoh = (u32)((u64)(new_sca) >> 32);
1579 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1580
1581 kvm_s390_vcpu_block_all(kvm);
1582 write_lock(&kvm->arch.sca_lock);
1583
1584 sca_copy_b_to_e(new_sca, old_sca);
1585
1586 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1587 vcpu->arch.sie_block->scaoh = scaoh;
1588 vcpu->arch.sie_block->scaol = scaol;
1589 vcpu->arch.sie_block->ecb2 |= 0x04U;
1590 }
1591 kvm->arch.sca = new_sca;
1592 kvm->arch.use_esca = 1;
1593
1594 write_unlock(&kvm->arch.sca_lock);
1595 kvm_s390_vcpu_unblock_all(kvm);
1596
1597 free_page((unsigned long)old_sca);
1598
8335713a
CB
1599 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1600 old_sca, kvm->arch.sca);
5e044315 1601 return 0;
a6e2f683
ED
1602}
1603
1604static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1605{
5e044315
ED
1606 int rc;
1607
1608 if (id < KVM_S390_BSCA_CPU_SLOTS)
1609 return true;
76a6dd72 1610 if (!sclp.has_esca || !sclp.has_64bscao)
5e044315
ED
1611 return false;
1612
1613 mutex_lock(&kvm->lock);
1614 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1615 mutex_unlock(&kvm->lock);
1616
1617 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
1618}
1619
b0c632db
HC
1620int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1621{
3c038e6b
DD
1622 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1623 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1624 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1625 KVM_SYNC_GPRS |
9eed0735 1626 KVM_SYNC_ACRS |
b028ee3e
DH
1627 KVM_SYNC_CRS |
1628 KVM_SYNC_ARCH0 |
1629 KVM_SYNC_PFAULT;
c6e5f166
FZ
1630 if (test_kvm_facility(vcpu->kvm, 64))
1631 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
f6aa6dc4
DH
1632 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1633 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1634 */
1635 if (MACHINE_HAS_VX)
68c55750 1636 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
1637 else
1638 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
1639
1640 if (kvm_is_ucontrol(vcpu->kvm))
1641 return __kvm_ucontrol_vcpu_init(vcpu);
1642
b0c632db
HC
1643 return 0;
1644}
1645
db0758b2
DH
1646/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1647static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1648{
1649 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
9c23a131 1650 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2 1651 vcpu->arch.cputm_start = get_tod_clock_fast();
9c23a131 1652 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1653}
1654
1655/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1656static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1657{
1658 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
9c23a131 1659 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1660 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1661 vcpu->arch.cputm_start = 0;
9c23a131 1662 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1663}
1664
1665/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1666static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1667{
1668 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1669 vcpu->arch.cputm_enabled = true;
1670 __start_cpu_timer_accounting(vcpu);
1671}
1672
1673/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1674static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1675{
1676 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1677 __stop_cpu_timer_accounting(vcpu);
1678 vcpu->arch.cputm_enabled = false;
1679}
1680
1681static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1682{
1683 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1684 __enable_cpu_timer_accounting(vcpu);
1685 preempt_enable();
1686}
1687
1688static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1689{
1690 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1691 __disable_cpu_timer_accounting(vcpu);
1692 preempt_enable();
1693}
1694
4287f247
DH
1695/* set the cpu timer - may only be called from the VCPU thread itself */
1696void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1697{
db0758b2 1698 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
9c23a131 1699 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1700 if (vcpu->arch.cputm_enabled)
1701 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 1702 vcpu->arch.sie_block->cputm = cputm;
9c23a131 1703 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2 1704 preempt_enable();
4287f247
DH
1705}
1706
db0758b2 1707/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
1708__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1709{
9c23a131 1710 unsigned int seq;
db0758b2 1711 __u64 value;
db0758b2
DH
1712
1713 if (unlikely(!vcpu->arch.cputm_enabled))
1714 return vcpu->arch.sie_block->cputm;
1715
9c23a131
DH
1716 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1717 do {
1718 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1719 /*
1720 * If the writer would ever execute a read in the critical
1721 * section, e.g. in irq context, we have a deadlock.
1722 */
1723 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1724 value = vcpu->arch.sie_block->cputm;
1725 /* if cputm_start is 0, accounting is being started/stopped */
1726 if (likely(vcpu->arch.cputm_start))
1727 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1728 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1729 preempt_enable();
db0758b2 1730 return value;
4287f247
DH
1731}
1732
b0c632db
HC
1733void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1734{
9977e886 1735 /* Save host register state */
d0164ee2 1736 save_fpu_regs();
9abc2a08
DH
1737 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1738 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
9977e886 1739
6fd8e67d
DH
1740 if (MACHINE_HAS_VX)
1741 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1742 else
1743 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
9abc2a08 1744 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
9977e886 1745 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1746 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1747 current->thread.fpu.fpc = 0;
1748
1749 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1750 restore_access_regs(vcpu->run->s.regs.acrs);
37d9df98 1751 gmap_enable(vcpu->arch.enabled_gmap);
805de8f4 1752 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
5ebda316 1753 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1754 __start_cpu_timer_accounting(vcpu);
01a745ac 1755 vcpu->cpu = cpu;
b0c632db
HC
1756}
1757
1758void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1759{
01a745ac 1760 vcpu->cpu = -1;
5ebda316 1761 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1762 __stop_cpu_timer_accounting(vcpu);
805de8f4 1763 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
37d9df98
DH
1764 vcpu->arch.enabled_gmap = gmap_get_enabled();
1765 gmap_disable(vcpu->arch.enabled_gmap);
9977e886 1766
9abc2a08 1767 /* Save guest register state */
d0164ee2 1768 save_fpu_regs();
9abc2a08 1769 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
9977e886 1770
9abc2a08
DH
1771 /* Restore host register state */
1772 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1773 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
9977e886
HB
1774
1775 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1776 restore_access_regs(vcpu->arch.host_acrs);
1777}
1778
1779static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1780{
1781 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1782 vcpu->arch.sie_block->gpsw.mask = 0UL;
1783 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1784 kvm_s390_set_prefix(vcpu, 0);
4287f247 1785 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
1786 vcpu->arch.sie_block->ckc = 0UL;
1787 vcpu->arch.sie_block->todpr = 0;
1788 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1789 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1790 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
9abc2a08
DH
1791 /* make sure the new fpc will be lazily loaded */
1792 save_fpu_regs();
1793 current->thread.fpu.fpc = 0;
b0c632db 1794 vcpu->arch.sie_block->gbea = 1;
672550fb 1795 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1796 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1797 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1798 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1799 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1800 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1801}
1802
31928aa5 1803void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1804{
72f25020 1805 mutex_lock(&vcpu->kvm->lock);
fdf03650 1806 preempt_disable();
72f25020 1807 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1808 preempt_enable();
72f25020 1809 mutex_unlock(&vcpu->kvm->lock);
25508824 1810 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 1811 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 1812 sca_add_vcpu(vcpu);
25508824 1813 }
37d9df98
DH
1814 /* make vcpu_load load the right gmap on the first trigger */
1815 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
42897d86
MT
1816}
1817
5102ee87
TK
1818static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1819{
9d8d5786 1820 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1821 return;
1822
a374e892
TK
1823 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1824
1825 if (vcpu->kvm->arch.crypto.aes_kw)
1826 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1827 if (vcpu->kvm->arch.crypto.dea_kw)
1828 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1829
5102ee87
TK
1830 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1831}
1832
b31605c1
DD
1833void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1834{
1835 free_page(vcpu->arch.sie_block->cbrlo);
1836 vcpu->arch.sie_block->cbrlo = 0;
1837}
1838
1839int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1840{
1841 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1842 if (!vcpu->arch.sie_block->cbrlo)
1843 return -ENOMEM;
1844
1845 vcpu->arch.sie_block->ecb2 |= 0x80;
1846 vcpu->arch.sie_block->ecb2 &= ~0x08;
1847 return 0;
1848}
1849
91520f1a
MM
1850static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1851{
1852 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1853
91520f1a 1854 vcpu->arch.sie_block->ibc = model->ibc;
80bc79dc 1855 if (test_kvm_facility(vcpu->kvm, 7))
c54f0d6a 1856 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
91520f1a
MM
1857}
1858
b0c632db
HC
1859int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1860{
b31605c1 1861 int rc = 0;
b31288fa 1862
9e6dabef
CH
1863 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1864 CPUSTAT_SM |
a4a4f191
GH
1865 CPUSTAT_STOPPED);
1866
53df84f8 1867 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1868 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1869 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1870 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1871
91520f1a
MM
1872 kvm_s390_vcpu_setup_model(vcpu);
1873
bdab09f3
DH
1874 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1875 if (MACHINE_HAS_ESOP)
1876 vcpu->arch.sie_block->ecb |= 0x02;
bd50e8ec
DH
1877 if (test_kvm_facility(vcpu->kvm, 9))
1878 vcpu->arch.sie_block->ecb |= 0x04;
f597d24e 1879 if (test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1880 vcpu->arch.sie_block->ecb |= 0x10;
1881
873b425e 1882 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
d6af0b49 1883 vcpu->arch.sie_block->ecb2 |= 0x08;
48ee7d3a
DH
1884 vcpu->arch.sie_block->eca = 0x1002000U;
1885 if (sclp.has_cei)
1886 vcpu->arch.sie_block->eca |= 0x80000000U;
11ad65b7
DH
1887 if (sclp.has_ib)
1888 vcpu->arch.sie_block->eca |= 0x40000000U;
37c5f6c8 1889 if (sclp.has_siif)
217a4406 1890 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1891 if (sclp.has_sigpif)
ea5f4969 1892 vcpu->arch.sie_block->eca |= 0x10000000U;
c6e5f166
FZ
1893 if (test_kvm_facility(vcpu->kvm, 64))
1894 vcpu->arch.sie_block->ecb3 |= 0x01;
18280d8b 1895 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1896 vcpu->arch.sie_block->eca |= 0x00020000;
1897 vcpu->arch.sie_block->ecd |= 0x20000000;
1898 }
c6e5f166 1899 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
492d8642 1900 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
95ca2cb5
JF
1901 if (test_kvm_facility(vcpu->kvm, 74))
1902 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
5a5e6536 1903
e6db1d61 1904 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1905 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1906 if (rc)
1907 return rc;
b31288fa 1908 }
0ac96caf 1909 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1910 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1911
5102ee87
TK
1912 kvm_s390_vcpu_crypto_setup(vcpu);
1913
b31605c1 1914 return rc;
b0c632db
HC
1915}
1916
1917struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1918 unsigned int id)
1919{
4d47555a 1920 struct kvm_vcpu *vcpu;
7feb6bb8 1921 struct sie_page *sie_page;
4d47555a
CO
1922 int rc = -EINVAL;
1923
4215825e 1924 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
1925 goto out;
1926
1927 rc = -ENOMEM;
b0c632db 1928
b110feaf 1929 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1930 if (!vcpu)
4d47555a 1931 goto out;
b0c632db 1932
7feb6bb8
MM
1933 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1934 if (!sie_page)
b0c632db
HC
1935 goto out_free_cpu;
1936
7feb6bb8
MM
1937 vcpu->arch.sie_block = &sie_page->sie_block;
1938 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1939
efed1104
DH
1940 /* the real guest size will always be smaller than msl */
1941 vcpu->arch.sie_block->mso = 0;
1942 vcpu->arch.sie_block->msl = sclp.hamax;
1943
b0c632db 1944 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 1945 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1946 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1947 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1948 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
9c23a131 1949 seqcount_init(&vcpu->arch.cputm_seqcount);
ba5c1e9b 1950
b0c632db
HC
1951 rc = kvm_vcpu_init(vcpu, kvm, id);
1952 if (rc)
9abc2a08 1953 goto out_free_sie_block;
8335713a 1954 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 1955 vcpu->arch.sie_block);
ade38c31 1956 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1957
b0c632db 1958 return vcpu;
7b06bf2f
WY
1959out_free_sie_block:
1960 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1961out_free_cpu:
b110feaf 1962 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1963out:
b0c632db
HC
1964 return ERR_PTR(rc);
1965}
1966
b0c632db
HC
1967int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1968{
9a022067 1969 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1970}
1971
27406cd5 1972void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1973{
805de8f4 1974 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1975 exit_sie(vcpu);
49b99e1e
CB
1976}
1977
27406cd5 1978void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1979{
805de8f4 1980 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1981}
1982
8e236546
CB
1983static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1984{
805de8f4 1985 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1986 exit_sie(vcpu);
8e236546
CB
1987}
1988
1989static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1990{
9bf9fde2 1991 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1992}
1993
49b99e1e
CB
1994/*
1995 * Kick a guest cpu out of SIE and wait until SIE is not running.
1996 * If the CPU is not running (e.g. waiting as idle) the function will
1997 * return immediately. */
1998void exit_sie(struct kvm_vcpu *vcpu)
1999{
805de8f4 2000 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
2001 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2002 cpu_relax();
2003}
2004
8e236546
CB
2005/* Kick a guest cpu out of SIE to process a request synchronously */
2006void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 2007{
8e236546
CB
2008 kvm_make_request(req, vcpu);
2009 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
2010}
2011
414d3b07
MS
2012static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2013 unsigned long end)
2c70fe44 2014{
2c70fe44
CB
2015 struct kvm *kvm = gmap->private;
2016 struct kvm_vcpu *vcpu;
414d3b07
MS
2017 unsigned long prefix;
2018 int i;
2c70fe44 2019
65d0b0d4
DH
2020 if (gmap_is_shadow(gmap))
2021 return;
414d3b07
MS
2022 if (start >= 1UL << 31)
2023 /* We are only interested in prefix pages */
2024 return;
2c70fe44
CB
2025 kvm_for_each_vcpu(i, vcpu, kvm) {
2026 /* match against both prefix pages */
414d3b07
MS
2027 prefix = kvm_s390_get_prefix(vcpu);
2028 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2029 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2030 start, end);
8e236546 2031 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
2032 }
2033 }
2034}
2035
b6d33834
CD
2036int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2037{
2038 /* kvm common code refers to this, but never calls it */
2039 BUG();
2040 return 0;
2041}
2042
14eebd91
CO
2043static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2044 struct kvm_one_reg *reg)
2045{
2046 int r = -EINVAL;
2047
2048 switch (reg->id) {
29b7c71b
CO
2049 case KVM_REG_S390_TODPR:
2050 r = put_user(vcpu->arch.sie_block->todpr,
2051 (u32 __user *)reg->addr);
2052 break;
2053 case KVM_REG_S390_EPOCHDIFF:
2054 r = put_user(vcpu->arch.sie_block->epoch,
2055 (u64 __user *)reg->addr);
2056 break;
46a6dd1c 2057 case KVM_REG_S390_CPU_TIMER:
4287f247 2058 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
2059 (u64 __user *)reg->addr);
2060 break;
2061 case KVM_REG_S390_CLOCK_COMP:
2062 r = put_user(vcpu->arch.sie_block->ckc,
2063 (u64 __user *)reg->addr);
2064 break;
536336c2
DD
2065 case KVM_REG_S390_PFTOKEN:
2066 r = put_user(vcpu->arch.pfault_token,
2067 (u64 __user *)reg->addr);
2068 break;
2069 case KVM_REG_S390_PFCOMPARE:
2070 r = put_user(vcpu->arch.pfault_compare,
2071 (u64 __user *)reg->addr);
2072 break;
2073 case KVM_REG_S390_PFSELECT:
2074 r = put_user(vcpu->arch.pfault_select,
2075 (u64 __user *)reg->addr);
2076 break;
672550fb
CB
2077 case KVM_REG_S390_PP:
2078 r = put_user(vcpu->arch.sie_block->pp,
2079 (u64 __user *)reg->addr);
2080 break;
afa45ff5
CB
2081 case KVM_REG_S390_GBEA:
2082 r = put_user(vcpu->arch.sie_block->gbea,
2083 (u64 __user *)reg->addr);
2084 break;
14eebd91
CO
2085 default:
2086 break;
2087 }
2088
2089 return r;
2090}
2091
2092static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2093 struct kvm_one_reg *reg)
2094{
2095 int r = -EINVAL;
4287f247 2096 __u64 val;
14eebd91
CO
2097
2098 switch (reg->id) {
29b7c71b
CO
2099 case KVM_REG_S390_TODPR:
2100 r = get_user(vcpu->arch.sie_block->todpr,
2101 (u32 __user *)reg->addr);
2102 break;
2103 case KVM_REG_S390_EPOCHDIFF:
2104 r = get_user(vcpu->arch.sie_block->epoch,
2105 (u64 __user *)reg->addr);
2106 break;
46a6dd1c 2107 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
2108 r = get_user(val, (u64 __user *)reg->addr);
2109 if (!r)
2110 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
2111 break;
2112 case KVM_REG_S390_CLOCK_COMP:
2113 r = get_user(vcpu->arch.sie_block->ckc,
2114 (u64 __user *)reg->addr);
2115 break;
536336c2
DD
2116 case KVM_REG_S390_PFTOKEN:
2117 r = get_user(vcpu->arch.pfault_token,
2118 (u64 __user *)reg->addr);
9fbd8082
DH
2119 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2120 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
2121 break;
2122 case KVM_REG_S390_PFCOMPARE:
2123 r = get_user(vcpu->arch.pfault_compare,
2124 (u64 __user *)reg->addr);
2125 break;
2126 case KVM_REG_S390_PFSELECT:
2127 r = get_user(vcpu->arch.pfault_select,
2128 (u64 __user *)reg->addr);
2129 break;
672550fb
CB
2130 case KVM_REG_S390_PP:
2131 r = get_user(vcpu->arch.sie_block->pp,
2132 (u64 __user *)reg->addr);
2133 break;
afa45ff5
CB
2134 case KVM_REG_S390_GBEA:
2135 r = get_user(vcpu->arch.sie_block->gbea,
2136 (u64 __user *)reg->addr);
2137 break;
14eebd91
CO
2138 default:
2139 break;
2140 }
2141
2142 return r;
2143}
b6d33834 2144
b0c632db
HC
2145static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2146{
b0c632db 2147 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
2148 return 0;
2149}
2150
2151int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2152{
5a32c1af 2153 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
2154 return 0;
2155}
2156
2157int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2158{
5a32c1af 2159 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
2160 return 0;
2161}
2162
2163int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2164 struct kvm_sregs *sregs)
2165{
59674c1a 2166 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 2167 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 2168 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
2169 return 0;
2170}
2171
2172int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2173 struct kvm_sregs *sregs)
2174{
59674c1a 2175 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 2176 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
2177 return 0;
2178}
2179
2180int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2181{
9abc2a08
DH
2182 /* make sure the new values will be lazily loaded */
2183 save_fpu_regs();
4725c860
MS
2184 if (test_fp_ctl(fpu->fpc))
2185 return -EINVAL;
9abc2a08
DH
2186 current->thread.fpu.fpc = fpu->fpc;
2187 if (MACHINE_HAS_VX)
2188 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2189 else
2190 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
b0c632db
HC
2191 return 0;
2192}
2193
2194int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2195{
9abc2a08
DH
2196 /* make sure we have the latest values */
2197 save_fpu_regs();
2198 if (MACHINE_HAS_VX)
2199 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2200 else
2201 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2202 fpu->fpc = current->thread.fpu.fpc;
b0c632db
HC
2203 return 0;
2204}
2205
2206static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2207{
2208 int rc = 0;
2209
7a42fdc2 2210 if (!is_vcpu_stopped(vcpu))
b0c632db 2211 rc = -EBUSY;
d7b0b5eb
CO
2212 else {
2213 vcpu->run->psw_mask = psw.mask;
2214 vcpu->run->psw_addr = psw.addr;
2215 }
b0c632db
HC
2216 return rc;
2217}
2218
2219int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2220 struct kvm_translation *tr)
2221{
2222 return -EINVAL; /* not implemented yet */
2223}
2224
27291e21
DH
2225#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2226 KVM_GUESTDBG_USE_HW_BP | \
2227 KVM_GUESTDBG_ENABLE)
2228
d0bfb940
JK
2229int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2230 struct kvm_guest_debug *dbg)
b0c632db 2231{
27291e21
DH
2232 int rc = 0;
2233
2234 vcpu->guest_debug = 0;
2235 kvm_s390_clear_bp_data(vcpu);
2236
2de3bfc2 2237 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21 2238 return -EINVAL;
89b5b4de
DH
2239 if (!sclp.has_gpere)
2240 return -EINVAL;
27291e21
DH
2241
2242 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2243 vcpu->guest_debug = dbg->control;
2244 /* enforce guest PER */
805de8f4 2245 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2246
2247 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2248 rc = kvm_s390_import_bp_data(vcpu, dbg);
2249 } else {
805de8f4 2250 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2251 vcpu->arch.guestdbg.last_bp = 0;
2252 }
2253
2254 if (rc) {
2255 vcpu->guest_debug = 0;
2256 kvm_s390_clear_bp_data(vcpu);
805de8f4 2257 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2258 }
2259
2260 return rc;
b0c632db
HC
2261}
2262
62d9f0db
MT
2263int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2264 struct kvm_mp_state *mp_state)
2265{
6352e4d2
DH
2266 /* CHECK_STOP and LOAD are not supported yet */
2267 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2268 KVM_MP_STATE_OPERATING;
62d9f0db
MT
2269}
2270
2271int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2272 struct kvm_mp_state *mp_state)
2273{
6352e4d2
DH
2274 int rc = 0;
2275
2276 /* user space knows about this interface - let it control the state */
2277 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2278
2279 switch (mp_state->mp_state) {
2280 case KVM_MP_STATE_STOPPED:
2281 kvm_s390_vcpu_stop(vcpu);
2282 break;
2283 case KVM_MP_STATE_OPERATING:
2284 kvm_s390_vcpu_start(vcpu);
2285 break;
2286 case KVM_MP_STATE_LOAD:
2287 case KVM_MP_STATE_CHECK_STOP:
2288 /* fall through - CHECK_STOP and LOAD are not supported yet */
2289 default:
2290 rc = -ENXIO;
2291 }
2292
2293 return rc;
62d9f0db
MT
2294}
2295
8ad35755
DH
2296static bool ibs_enabled(struct kvm_vcpu *vcpu)
2297{
2298 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2299}
2300
2c70fe44
CB
2301static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2302{
8ad35755 2303retry:
8e236546 2304 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
2305 if (!vcpu->requests)
2306 return 0;
2c70fe44
CB
2307 /*
2308 * We use MMU_RELOAD just to re-arm the ipte notifier for the
b2d73b2a 2309 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
2c70fe44
CB
2310 * This ensures that the ipte instruction for this request has
2311 * already finished. We might race against a second unmapper that
2312 * wants to set the blocking bit. Lets just retry the request loop.
2313 */
8ad35755 2314 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44 2315 int rc;
b2d73b2a
MS
2316 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2317 kvm_s390_get_prefix(vcpu),
2318 PAGE_SIZE * 2, PROT_WRITE);
2c70fe44
CB
2319 if (rc)
2320 return rc;
8ad35755 2321 goto retry;
2c70fe44 2322 }
8ad35755 2323
d3d692c8
DH
2324 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2325 vcpu->arch.sie_block->ihcpu = 0xffff;
2326 goto retry;
2327 }
2328
8ad35755
DH
2329 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2330 if (!ibs_enabled(vcpu)) {
2331 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 2332 atomic_or(CPUSTAT_IBS,
8ad35755
DH
2333 &vcpu->arch.sie_block->cpuflags);
2334 }
2335 goto retry;
2c70fe44 2336 }
8ad35755
DH
2337
2338 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2339 if (ibs_enabled(vcpu)) {
2340 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 2341 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
2342 &vcpu->arch.sie_block->cpuflags);
2343 }
2344 goto retry;
2345 }
2346
0759d068
DH
2347 /* nothing to do, just clear the request */
2348 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2349
2c70fe44
CB
2350 return 0;
2351}
2352
25ed1675
DH
2353void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2354{
2355 struct kvm_vcpu *vcpu;
2356 int i;
2357
2358 mutex_lock(&kvm->lock);
2359 preempt_disable();
2360 kvm->arch.epoch = tod - get_tod_clock();
2361 kvm_s390_vcpu_block_all(kvm);
2362 kvm_for_each_vcpu(i, vcpu, kvm)
2363 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2364 kvm_s390_vcpu_unblock_all(kvm);
2365 preempt_enable();
2366 mutex_unlock(&kvm->lock);
2367}
2368
fa576c58
TH
2369/**
2370 * kvm_arch_fault_in_page - fault-in guest page if necessary
2371 * @vcpu: The corresponding virtual cpu
2372 * @gpa: Guest physical address
2373 * @writable: Whether the page should be writable or not
2374 *
2375 * Make sure that a guest page has been faulted-in on the host.
2376 *
2377 * Return: Zero on success, negative error code otherwise.
2378 */
2379long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 2380{
527e30b4
MS
2381 return gmap_fault(vcpu->arch.gmap, gpa,
2382 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
2383}
2384
3c038e6b
DD
2385static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2386 unsigned long token)
2387{
2388 struct kvm_s390_interrupt inti;
383d0b05 2389 struct kvm_s390_irq irq;
3c038e6b
DD
2390
2391 if (start_token) {
383d0b05
JF
2392 irq.u.ext.ext_params2 = token;
2393 irq.type = KVM_S390_INT_PFAULT_INIT;
2394 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
2395 } else {
2396 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 2397 inti.parm64 = token;
3c038e6b
DD
2398 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2399 }
2400}
2401
2402void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2403 struct kvm_async_pf *work)
2404{
2405 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2406 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2407}
2408
2409void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2410 struct kvm_async_pf *work)
2411{
2412 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2413 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2414}
2415
2416void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2417 struct kvm_async_pf *work)
2418{
2419 /* s390 will always inject the page directly */
2420}
2421
2422bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2423{
2424 /*
2425 * s390 will always inject the page directly,
2426 * but we still want check_async_completion to cleanup
2427 */
2428 return true;
2429}
2430
2431static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2432{
2433 hva_t hva;
2434 struct kvm_arch_async_pf arch;
2435 int rc;
2436
2437 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2438 return 0;
2439 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2440 vcpu->arch.pfault_compare)
2441 return 0;
2442 if (psw_extint_disabled(vcpu))
2443 return 0;
9a022067 2444 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
2445 return 0;
2446 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2447 return 0;
2448 if (!vcpu->arch.gmap->pfault_enabled)
2449 return 0;
2450
81480cc1
HC
2451 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2452 hva += current->thread.gmap_addr & ~PAGE_MASK;
2453 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
2454 return 0;
2455
2456 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2457 return rc;
2458}
2459
3fb4c40f 2460static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2461{
3fb4c40f 2462 int rc, cpuflags;
e168bf8d 2463
3c038e6b
DD
2464 /*
2465 * On s390 notifications for arriving pages will be delivered directly
2466 * to the guest but the house keeping for completed pfaults is
2467 * handled outside the worker.
2468 */
2469 kvm_check_async_pf_completion(vcpu);
2470
7ec7c8c7
CB
2471 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2472 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
2473
2474 if (need_resched())
2475 schedule();
2476
d3a73acb 2477 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2478 s390_handle_mcck();
2479
79395031
JF
2480 if (!kvm_is_ucontrol(vcpu->kvm)) {
2481 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2482 if (rc)
2483 return rc;
2484 }
0ff31867 2485
2c70fe44
CB
2486 rc = kvm_s390_handle_requests(vcpu);
2487 if (rc)
2488 return rc;
2489
27291e21
DH
2490 if (guestdbg_enabled(vcpu)) {
2491 kvm_s390_backup_guest_per_regs(vcpu);
2492 kvm_s390_patch_guest_per_regs(vcpu);
2493 }
2494
b0c632db 2495 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2496 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2497 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2498 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2499
3fb4c40f
TH
2500 return 0;
2501}
2502
492d8642
TH
2503static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2504{
56317920
DH
2505 struct kvm_s390_pgm_info pgm_info = {
2506 .code = PGM_ADDRESSING,
2507 };
2508 u8 opcode, ilen;
492d8642
TH
2509 int rc;
2510
2511 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2512 trace_kvm_s390_sie_fault(vcpu);
2513
2514 /*
2515 * We want to inject an addressing exception, which is defined as a
2516 * suppressing or terminating exception. However, since we came here
2517 * by a DAT access exception, the PSW still points to the faulting
2518 * instruction since DAT exceptions are nullifying. So we've got
2519 * to look up the current opcode to get the length of the instruction
2520 * to be able to forward the PSW.
2521 */
65977322 2522 rc = read_guest_instr(vcpu, &opcode, 1);
56317920 2523 ilen = insn_length(opcode);
9b0d721a
DH
2524 if (rc < 0) {
2525 return rc;
2526 } else if (rc) {
2527 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2528 * Forward by arbitrary ilc, injection will take care of
2529 * nullification if necessary.
2530 */
2531 pgm_info = vcpu->arch.pgm;
2532 ilen = 4;
2533 }
56317920
DH
2534 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2535 kvm_s390_forward_psw(vcpu, ilen);
2536 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
2537}
2538
3fb4c40f
TH
2539static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2540{
2b29a9fd
DD
2541 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2542 vcpu->arch.sie_block->icptcode);
2543 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2544
27291e21
DH
2545 if (guestdbg_enabled(vcpu))
2546 kvm_s390_restore_guest_per_regs(vcpu);
2547
7ec7c8c7
CB
2548 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2549 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf
DH
2550
2551 if (vcpu->arch.sie_block->icptcode > 0) {
2552 int rc = kvm_handle_sie_intercept(vcpu);
2553
2554 if (rc != -EOPNOTSUPP)
2555 return rc;
2556 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2557 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2558 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2559 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2560 return -EREMOTE;
2561 } else if (exit_reason != -EFAULT) {
2562 vcpu->stat.exit_null++;
2563 return 0;
210b1607
TH
2564 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2565 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2566 vcpu->run->s390_ucontrol.trans_exc_code =
2567 current->thread.gmap_addr;
2568 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 2569 return -EREMOTE;
24eb3a82 2570 } else if (current->thread.gmap_pfault) {
3c038e6b 2571 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2572 current->thread.gmap_pfault = 0;
71f116bf
DH
2573 if (kvm_arch_setup_async_pf(vcpu))
2574 return 0;
2575 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 2576 }
71f116bf 2577 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
2578}
2579
2580static int __vcpu_run(struct kvm_vcpu *vcpu)
2581{
2582 int rc, exit_reason;
2583
800c1065
TH
2584 /*
2585 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2586 * ning the guest), so that memslots (and other stuff) are protected
2587 */
2588 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2589
a76ccff6
TH
2590 do {
2591 rc = vcpu_pre_run(vcpu);
2592 if (rc)
2593 break;
3fb4c40f 2594
800c1065 2595 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2596 /*
2597 * As PF_VCPU will be used in fault handler, between
2598 * guest_enter and guest_exit should be no uaccess.
2599 */
0097d12e
CB
2600 local_irq_disable();
2601 __kvm_guest_enter();
db0758b2 2602 __disable_cpu_timer_accounting(vcpu);
0097d12e 2603 local_irq_enable();
a76ccff6
TH
2604 exit_reason = sie64a(vcpu->arch.sie_block,
2605 vcpu->run->s.regs.gprs);
0097d12e 2606 local_irq_disable();
db0758b2 2607 __enable_cpu_timer_accounting(vcpu);
0097d12e
CB
2608 __kvm_guest_exit();
2609 local_irq_enable();
800c1065 2610 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2611
2612 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2613 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2614
800c1065 2615 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2616 return rc;
b0c632db
HC
2617}
2618
b028ee3e
DH
2619static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2620{
2621 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2622 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2623 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2624 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2625 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2626 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2627 /* some control register changes require a tlb flush */
2628 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2629 }
2630 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 2631 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
2632 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2633 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2634 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2635 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2636 }
2637 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2638 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2639 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2640 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2641 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2642 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2643 }
2644 kvm_run->kvm_dirty_regs = 0;
2645}
2646
2647static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2648{
2649 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2650 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2651 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2652 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 2653 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
2654 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2655 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2656 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2657 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2658 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2659 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2660 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2661}
2662
b0c632db
HC
2663int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2664{
8f2abe6a 2665 int rc;
b0c632db
HC
2666 sigset_t sigsaved;
2667
27291e21
DH
2668 if (guestdbg_exit_pending(vcpu)) {
2669 kvm_s390_prepare_debug_exit(vcpu);
2670 return 0;
2671 }
2672
b0c632db
HC
2673 if (vcpu->sigset_active)
2674 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2675
6352e4d2
DH
2676 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2677 kvm_s390_vcpu_start(vcpu);
2678 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2679 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2680 vcpu->vcpu_id);
2681 return -EINVAL;
2682 }
b0c632db 2683
b028ee3e 2684 sync_regs(vcpu, kvm_run);
db0758b2 2685 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 2686
dab4079d 2687 might_fault();
a76ccff6 2688 rc = __vcpu_run(vcpu);
9ace903d 2689
b1d16c49
CE
2690 if (signal_pending(current) && !rc) {
2691 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2692 rc = -EINTR;
b1d16c49 2693 }
8f2abe6a 2694
27291e21
DH
2695 if (guestdbg_exit_pending(vcpu) && !rc) {
2696 kvm_s390_prepare_debug_exit(vcpu);
2697 rc = 0;
2698 }
2699
8f2abe6a 2700 if (rc == -EREMOTE) {
71f116bf 2701 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
2702 rc = 0;
2703 }
b0c632db 2704
db0758b2 2705 disable_cpu_timer_accounting(vcpu);
b028ee3e 2706 store_regs(vcpu, kvm_run);
d7b0b5eb 2707
b0c632db
HC
2708 if (vcpu->sigset_active)
2709 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2710
b0c632db 2711 vcpu->stat.exit_userspace++;
7e8e6ab4 2712 return rc;
b0c632db
HC
2713}
2714
b0c632db
HC
2715/*
2716 * store status at address
2717 * we use have two special cases:
2718 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2719 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2720 */
d0bce605 2721int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2722{
092670cd 2723 unsigned char archmode = 1;
9abc2a08 2724 freg_t fprs[NUM_FPRS];
fda902cb 2725 unsigned int px;
4287f247 2726 u64 clkcomp, cputm;
d0bce605 2727 int rc;
b0c632db 2728
d9a3a09a 2729 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
2730 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2731 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2732 return -EFAULT;
d9a3a09a 2733 gpa = 0;
d0bce605
HC
2734 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2735 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2736 return -EFAULT;
d9a3a09a
MS
2737 gpa = px;
2738 } else
2739 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
2740
2741 /* manually convert vector registers if necessary */
2742 if (MACHINE_HAS_VX) {
9522b37f 2743 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08
DH
2744 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2745 fprs, 128);
2746 } else {
2747 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 2748 vcpu->run->s.regs.fprs, 128);
9abc2a08 2749 }
d9a3a09a 2750 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 2751 vcpu->run->s.regs.gprs, 128);
d9a3a09a 2752 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 2753 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 2754 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 2755 &px, 4);
d9a3a09a 2756 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 2757 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 2758 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 2759 &vcpu->arch.sie_block->todpr, 4);
4287f247 2760 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 2761 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 2762 &cputm, 8);
178bd789 2763 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 2764 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 2765 &clkcomp, 8);
d9a3a09a 2766 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 2767 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 2768 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
2769 &vcpu->arch.sie_block->gcr, 128);
2770 return rc ? -EFAULT : 0;
b0c632db
HC
2771}
2772
e879892c
TH
2773int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2774{
2775 /*
2776 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2777 * copying in vcpu load/put. Lets update our copies before we save
2778 * it into the save area
2779 */
d0164ee2 2780 save_fpu_regs();
9abc2a08 2781 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
2782 save_access_regs(vcpu->run->s.regs.acrs);
2783
2784 return kvm_s390_store_status_unloaded(vcpu, addr);
2785}
2786
bc17de7c
EF
2787/*
2788 * store additional status at address
2789 */
2790int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2791 unsigned long gpa)
2792{
2793 /* Only bits 0-53 are used for address formation */
2794 if (!(gpa & ~0x3ff))
2795 return 0;
2796
2797 return write_guest_abs(vcpu, gpa & ~0x3ff,
2798 (void *)&vcpu->run->s.regs.vrs, 512);
2799}
2800
2801int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2802{
2803 if (!test_kvm_facility(vcpu->kvm, 129))
2804 return 0;
2805
2806 /*
2807 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2808 * copying in vcpu load/put. We can simply call save_fpu_regs()
2809 * to save the current register state because we are in the
2810 * middle of a load/put cycle.
2811 *
2812 * Let's update our copies before we save it into the save area.
bc17de7c 2813 */
d0164ee2 2814 save_fpu_regs();
bc17de7c
EF
2815
2816 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2817}
2818
8ad35755
DH
2819static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2820{
2821 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2822 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2823}
2824
2825static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2826{
2827 unsigned int i;
2828 struct kvm_vcpu *vcpu;
2829
2830 kvm_for_each_vcpu(i, vcpu, kvm) {
2831 __disable_ibs_on_vcpu(vcpu);
2832 }
2833}
2834
2835static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2836{
09a400e7
DH
2837 if (!sclp.has_ibs)
2838 return;
8ad35755 2839 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2840 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2841}
2842
6852d7b6
DH
2843void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2844{
8ad35755
DH
2845 int i, online_vcpus, started_vcpus = 0;
2846
2847 if (!is_vcpu_stopped(vcpu))
2848 return;
2849
6852d7b6 2850 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2851 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2852 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2853 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2854
2855 for (i = 0; i < online_vcpus; i++) {
2856 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2857 started_vcpus++;
2858 }
2859
2860 if (started_vcpus == 0) {
2861 /* we're the only active VCPU -> speed it up */
2862 __enable_ibs_on_vcpu(vcpu);
2863 } else if (started_vcpus == 1) {
2864 /*
2865 * As we are starting a second VCPU, we have to disable
2866 * the IBS facility on all VCPUs to remove potentially
2867 * oustanding ENABLE requests.
2868 */
2869 __disable_ibs_on_all_vcpus(vcpu->kvm);
2870 }
2871
805de8f4 2872 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2873 /*
2874 * Another VCPU might have used IBS while we were offline.
2875 * Let's play safe and flush the VCPU at startup.
2876 */
d3d692c8 2877 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2878 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2879 return;
6852d7b6
DH
2880}
2881
2882void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2883{
8ad35755
DH
2884 int i, online_vcpus, started_vcpus = 0;
2885 struct kvm_vcpu *started_vcpu = NULL;
2886
2887 if (is_vcpu_stopped(vcpu))
2888 return;
2889
6852d7b6 2890 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2891 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2892 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2893 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2894
32f5ff63 2895 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2896 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2897
805de8f4 2898 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2899 __disable_ibs_on_vcpu(vcpu);
2900
2901 for (i = 0; i < online_vcpus; i++) {
2902 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2903 started_vcpus++;
2904 started_vcpu = vcpu->kvm->vcpus[i];
2905 }
2906 }
2907
2908 if (started_vcpus == 1) {
2909 /*
2910 * As we only have one VCPU left, we want to enable the
2911 * IBS facility for that VCPU to speed it up.
2912 */
2913 __enable_ibs_on_vcpu(started_vcpu);
2914 }
2915
433b9ee4 2916 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2917 return;
6852d7b6
DH
2918}
2919
d6712df9
CH
2920static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2921 struct kvm_enable_cap *cap)
2922{
2923 int r;
2924
2925 if (cap->flags)
2926 return -EINVAL;
2927
2928 switch (cap->cap) {
fa6b7fe9
CH
2929 case KVM_CAP_S390_CSS_SUPPORT:
2930 if (!vcpu->kvm->arch.css_support) {
2931 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2932 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2933 trace_kvm_s390_enable_css(vcpu->kvm);
2934 }
2935 r = 0;
2936 break;
d6712df9
CH
2937 default:
2938 r = -EINVAL;
2939 break;
2940 }
2941 return r;
2942}
2943
41408c28
TH
2944static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2945 struct kvm_s390_mem_op *mop)
2946{
2947 void __user *uaddr = (void __user *)mop->buf;
2948 void *tmpbuf = NULL;
2949 int r, srcu_idx;
2950 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2951 | KVM_S390_MEMOP_F_CHECK_ONLY;
2952
2953 if (mop->flags & ~supported_flags)
2954 return -EINVAL;
2955
2956 if (mop->size > MEM_OP_MAX_SIZE)
2957 return -E2BIG;
2958
2959 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2960 tmpbuf = vmalloc(mop->size);
2961 if (!tmpbuf)
2962 return -ENOMEM;
2963 }
2964
2965 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2966
2967 switch (mop->op) {
2968 case KVM_S390_MEMOP_LOGICAL_READ:
2969 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2970 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2971 mop->size, GACC_FETCH);
41408c28
TH
2972 break;
2973 }
2974 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2975 if (r == 0) {
2976 if (copy_to_user(uaddr, tmpbuf, mop->size))
2977 r = -EFAULT;
2978 }
2979 break;
2980 case KVM_S390_MEMOP_LOGICAL_WRITE:
2981 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2982 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2983 mop->size, GACC_STORE);
41408c28
TH
2984 break;
2985 }
2986 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2987 r = -EFAULT;
2988 break;
2989 }
2990 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2991 break;
2992 default:
2993 r = -EINVAL;
2994 }
2995
2996 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2997
2998 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2999 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
3000
3001 vfree(tmpbuf);
3002 return r;
3003}
3004
b0c632db
HC
3005long kvm_arch_vcpu_ioctl(struct file *filp,
3006 unsigned int ioctl, unsigned long arg)
3007{
3008 struct kvm_vcpu *vcpu = filp->private_data;
3009 void __user *argp = (void __user *)arg;
800c1065 3010 int idx;
bc923cc9 3011 long r;
b0c632db 3012
93736624 3013 switch (ioctl) {
47b43c52
JF
3014 case KVM_S390_IRQ: {
3015 struct kvm_s390_irq s390irq;
3016
3017 r = -EFAULT;
3018 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3019 break;
3020 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3021 break;
3022 }
93736624 3023 case KVM_S390_INTERRUPT: {
ba5c1e9b 3024 struct kvm_s390_interrupt s390int;
383d0b05 3025 struct kvm_s390_irq s390irq;
ba5c1e9b 3026
93736624 3027 r = -EFAULT;
ba5c1e9b 3028 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 3029 break;
383d0b05
JF
3030 if (s390int_to_s390irq(&s390int, &s390irq))
3031 return -EINVAL;
3032 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 3033 break;
ba5c1e9b 3034 }
b0c632db 3035 case KVM_S390_STORE_STATUS:
800c1065 3036 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 3037 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 3038 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 3039 break;
b0c632db
HC
3040 case KVM_S390_SET_INITIAL_PSW: {
3041 psw_t psw;
3042
bc923cc9 3043 r = -EFAULT;
b0c632db 3044 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
3045 break;
3046 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3047 break;
b0c632db
HC
3048 }
3049 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
3050 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3051 break;
14eebd91
CO
3052 case KVM_SET_ONE_REG:
3053 case KVM_GET_ONE_REG: {
3054 struct kvm_one_reg reg;
3055 r = -EFAULT;
3056 if (copy_from_user(&reg, argp, sizeof(reg)))
3057 break;
3058 if (ioctl == KVM_SET_ONE_REG)
3059 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3060 else
3061 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3062 break;
3063 }
27e0393f
CO
3064#ifdef CONFIG_KVM_S390_UCONTROL
3065 case KVM_S390_UCAS_MAP: {
3066 struct kvm_s390_ucas_mapping ucasmap;
3067
3068 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3069 r = -EFAULT;
3070 break;
3071 }
3072
3073 if (!kvm_is_ucontrol(vcpu->kvm)) {
3074 r = -EINVAL;
3075 break;
3076 }
3077
3078 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3079 ucasmap.vcpu_addr, ucasmap.length);
3080 break;
3081 }
3082 case KVM_S390_UCAS_UNMAP: {
3083 struct kvm_s390_ucas_mapping ucasmap;
3084
3085 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3086 r = -EFAULT;
3087 break;
3088 }
3089
3090 if (!kvm_is_ucontrol(vcpu->kvm)) {
3091 r = -EINVAL;
3092 break;
3093 }
3094
3095 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3096 ucasmap.length);
3097 break;
3098 }
3099#endif
ccc7910f 3100 case KVM_S390_VCPU_FAULT: {
527e30b4 3101 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
3102 break;
3103 }
d6712df9
CH
3104 case KVM_ENABLE_CAP:
3105 {
3106 struct kvm_enable_cap cap;
3107 r = -EFAULT;
3108 if (copy_from_user(&cap, argp, sizeof(cap)))
3109 break;
3110 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3111 break;
3112 }
41408c28
TH
3113 case KVM_S390_MEM_OP: {
3114 struct kvm_s390_mem_op mem_op;
3115
3116 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3117 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3118 else
3119 r = -EFAULT;
3120 break;
3121 }
816c7667
JF
3122 case KVM_S390_SET_IRQ_STATE: {
3123 struct kvm_s390_irq_state irq_state;
3124
3125 r = -EFAULT;
3126 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3127 break;
3128 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3129 irq_state.len == 0 ||
3130 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3131 r = -EINVAL;
3132 break;
3133 }
3134 r = kvm_s390_set_irq_state(vcpu,
3135 (void __user *) irq_state.buf,
3136 irq_state.len);
3137 break;
3138 }
3139 case KVM_S390_GET_IRQ_STATE: {
3140 struct kvm_s390_irq_state irq_state;
3141
3142 r = -EFAULT;
3143 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3144 break;
3145 if (irq_state.len == 0) {
3146 r = -EINVAL;
3147 break;
3148 }
3149 r = kvm_s390_get_irq_state(vcpu,
3150 (__u8 __user *) irq_state.buf,
3151 irq_state.len);
3152 break;
3153 }
b0c632db 3154 default:
3e6afcf1 3155 r = -ENOTTY;
b0c632db 3156 }
bc923cc9 3157 return r;
b0c632db
HC
3158}
3159
5b1c1493
CO
3160int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3161{
3162#ifdef CONFIG_KVM_S390_UCONTROL
3163 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3164 && (kvm_is_ucontrol(vcpu->kvm))) {
3165 vmf->page = virt_to_page(vcpu->arch.sie_block);
3166 get_page(vmf->page);
3167 return 0;
3168 }
3169#endif
3170 return VM_FAULT_SIGBUS;
3171}
3172
5587027c
AK
3173int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3174 unsigned long npages)
db3fe4eb
TY
3175{
3176 return 0;
3177}
3178
b0c632db 3179/* Section: memory related */
f7784b8e
MT
3180int kvm_arch_prepare_memory_region(struct kvm *kvm,
3181 struct kvm_memory_slot *memslot,
09170a49 3182 const struct kvm_userspace_memory_region *mem,
7b6195a9 3183 enum kvm_mr_change change)
b0c632db 3184{
dd2887e7
NW
3185 /* A few sanity checks. We can have memory slots which have to be
3186 located/ended at a segment boundary (1MB). The memory in userland is
3187 ok to be fragmented into various different vmas. It is okay to mmap()
3188 and munmap() stuff in this slot after doing this call at any time */
b0c632db 3189
598841ca 3190 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
3191 return -EINVAL;
3192
598841ca 3193 if (mem->memory_size & 0xffffful)
b0c632db
HC
3194 return -EINVAL;
3195
a3a92c31
DD
3196 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3197 return -EINVAL;
3198
f7784b8e
MT
3199 return 0;
3200}
3201
3202void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 3203 const struct kvm_userspace_memory_region *mem,
8482644a 3204 const struct kvm_memory_slot *old,
f36f3f28 3205 const struct kvm_memory_slot *new,
8482644a 3206 enum kvm_mr_change change)
f7784b8e 3207{
f7850c92 3208 int rc;
f7784b8e 3209
2cef4deb
CB
3210 /* If the basics of the memslot do not change, we do not want
3211 * to update the gmap. Every update causes several unnecessary
3212 * segment translation exceptions. This is usually handled just
3213 * fine by the normal fault handler + gmap, but it will also
3214 * cause faults on the prefix page of running guest CPUs.
3215 */
3216 if (old->userspace_addr == mem->userspace_addr &&
3217 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3218 old->npages * PAGE_SIZE == mem->memory_size)
3219 return;
598841ca
CO
3220
3221 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3222 mem->guest_phys_addr, mem->memory_size);
3223 if (rc)
ea2cdd27 3224 pr_warn("failed to commit memory region\n");
598841ca 3225 return;
b0c632db
HC
3226}
3227
60a37709
AY
3228static inline unsigned long nonhyp_mask(int i)
3229{
3230 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3231
3232 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3233}
3234
3491caf2
CB
3235void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3236{
3237 vcpu->valid_wakeup = false;
3238}
3239
b0c632db
HC
3240static int __init kvm_s390_init(void)
3241{
60a37709
AY
3242 int i;
3243
07197fd0
DH
3244 if (!sclp.has_sief2) {
3245 pr_info("SIE not available\n");
3246 return -ENODEV;
3247 }
3248
60a37709
AY
3249 for (i = 0; i < 16; i++)
3250 kvm_s390_fac_list_mask[i] |=
3251 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3252
9d8d5786 3253 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
3254}
3255
3256static void __exit kvm_s390_exit(void)
3257{
3258 kvm_exit();
3259}
3260
3261module_init(kvm_s390_init);
3262module_exit(kvm_s390_exit);
566af940
CH
3263
3264/*
3265 * Enable autoloading of the kvm module.
3266 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3267 * since x86 takes a different approach.
3268 */
3269#include <linux/miscdevice.h>
3270MODULE_ALIAS_MISCDEV(KVM_MINOR);
3271MODULE_ALIAS("devname:kvm");