KVM: s390: Use insn_length() to calculate length of instruction
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
cbb870c8 28#include <asm/asm-offsets.h>
b0c632db
HC
29#include <asm/lowcore.h>
30#include <asm/pgtable.h>
f5daba1d 31#include <asm/nmi.h>
a0616cde 32#include <asm/switch_to.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 53 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
ce2e4f0b 54 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 55 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 56 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
57 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 59 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 60 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
61 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 68 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
69 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
70 { "instruction_spx", VCPU_STAT(instruction_spx) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
72 { "instruction_stap", VCPU_STAT(instruction_stap) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 74 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
75 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 77 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
78 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 80 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 81 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 82 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 83 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 84 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
85 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 87 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
88 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
5288fbf0
CB
90 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
93 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 96 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 97 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 98 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
99 { NULL }
100};
101
9d8d5786
MM
102/* upper facilities limit for kvm */
103unsigned long kvm_s390_fac_list_mask[] = {
104 0xff82fffbf4fc2000UL,
105 0x005c000000000000UL,
106};
b0c632db 107
9d8d5786 108unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 109{
9d8d5786
MM
110 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
111 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
112}
113
9d8d5786
MM
114static struct gmap_notifier gmap_notifier;
115
b0c632db 116/* Section: not file related */
13a34e06 117int kvm_arch_hardware_enable(void)
b0c632db
HC
118{
119 /* every s390 is virtualization enabled ;-) */
10474ae8 120 return 0;
b0c632db
HC
121}
122
2c70fe44
CB
123static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
124
b0c632db
HC
125int kvm_arch_hardware_setup(void)
126{
2c70fe44
CB
127 gmap_notifier.notifier_call = kvm_gmap_notifier;
128 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
129 return 0;
130}
131
132void kvm_arch_hardware_unsetup(void)
133{
2c70fe44 134 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
135}
136
b0c632db
HC
137int kvm_arch_init(void *opaque)
138{
84877d93
CH
139 /* Register floating interrupt controller interface. */
140 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
141}
142
b0c632db
HC
143/* Section: device related */
144long kvm_arch_dev_ioctl(struct file *filp,
145 unsigned int ioctl, unsigned long arg)
146{
147 if (ioctl == KVM_S390_ENABLE_SIE)
148 return s390_enable_sie();
149 return -EINVAL;
150}
151
784aa3d7 152int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 153{
d7b0b5eb
CO
154 int r;
155
2bd0ac4e 156 switch (ext) {
d7b0b5eb 157 case KVM_CAP_S390_PSW:
b6cf8788 158 case KVM_CAP_S390_GMAP:
52e16b18 159 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
160#ifdef CONFIG_KVM_S390_UCONTROL
161 case KVM_CAP_S390_UCONTROL:
162#endif
3c038e6b 163 case KVM_CAP_ASYNC_PF:
60b413c9 164 case KVM_CAP_SYNC_REGS:
14eebd91 165 case KVM_CAP_ONE_REG:
d6712df9 166 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 167 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 168 case KVM_CAP_IRQFD:
10ccaa1e 169 case KVM_CAP_IOEVENTFD:
c05c4186 170 case KVM_CAP_DEVICE_CTRL:
d938dc55 171 case KVM_CAP_ENABLE_CAP_VM:
78599d90 172 case KVM_CAP_S390_IRQCHIP:
f2061656 173 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 174 case KVM_CAP_MP_STATE:
2444b352 175 case KVM_CAP_S390_USER_SIGP:
d7b0b5eb
CO
176 r = 1;
177 break;
e726b1bd
CB
178 case KVM_CAP_NR_VCPUS:
179 case KVM_CAP_MAX_VCPUS:
180 r = KVM_MAX_VCPUS;
181 break;
e1e2e605
NW
182 case KVM_CAP_NR_MEMSLOTS:
183 r = KVM_USER_MEM_SLOTS;
184 break;
1526bf9c 185 case KVM_CAP_S390_COW:
abf09bed 186 r = MACHINE_HAS_ESOP;
1526bf9c 187 break;
2bd0ac4e 188 default:
d7b0b5eb 189 r = 0;
2bd0ac4e 190 }
d7b0b5eb 191 return r;
b0c632db
HC
192}
193
15f36ebd
JH
194static void kvm_s390_sync_dirty_log(struct kvm *kvm,
195 struct kvm_memory_slot *memslot)
196{
197 gfn_t cur_gfn, last_gfn;
198 unsigned long address;
199 struct gmap *gmap = kvm->arch.gmap;
200
201 down_read(&gmap->mm->mmap_sem);
202 /* Loop over all guest pages */
203 last_gfn = memslot->base_gfn + memslot->npages;
204 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
205 address = gfn_to_hva_memslot(memslot, cur_gfn);
206
207 if (gmap_test_and_clear_dirty(address, gmap))
208 mark_page_dirty(kvm, cur_gfn);
209 }
210 up_read(&gmap->mm->mmap_sem);
211}
212
b0c632db
HC
213/* Section: vm related */
214/*
215 * Get (and clear) the dirty memory log for a memory slot.
216 */
217int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
218 struct kvm_dirty_log *log)
219{
15f36ebd
JH
220 int r;
221 unsigned long n;
222 struct kvm_memory_slot *memslot;
223 int is_dirty = 0;
224
225 mutex_lock(&kvm->slots_lock);
226
227 r = -EINVAL;
228 if (log->slot >= KVM_USER_MEM_SLOTS)
229 goto out;
230
231 memslot = id_to_memslot(kvm->memslots, log->slot);
232 r = -ENOENT;
233 if (!memslot->dirty_bitmap)
234 goto out;
235
236 kvm_s390_sync_dirty_log(kvm, memslot);
237 r = kvm_get_dirty_log(kvm, log, &is_dirty);
238 if (r)
239 goto out;
240
241 /* Clear the dirty log */
242 if (is_dirty) {
243 n = kvm_dirty_bitmap_bytes(memslot);
244 memset(memslot->dirty_bitmap, 0, n);
245 }
246 r = 0;
247out:
248 mutex_unlock(&kvm->slots_lock);
249 return r;
b0c632db
HC
250}
251
d938dc55
CH
252static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
253{
254 int r;
255
256 if (cap->flags)
257 return -EINVAL;
258
259 switch (cap->cap) {
84223598
CH
260 case KVM_CAP_S390_IRQCHIP:
261 kvm->arch.use_irqchip = 1;
262 r = 0;
263 break;
2444b352
DH
264 case KVM_CAP_S390_USER_SIGP:
265 kvm->arch.user_sigp = 1;
266 r = 0;
267 break;
d938dc55
CH
268 default:
269 r = -EINVAL;
270 break;
271 }
272 return r;
273}
274
8c0a7ce6
DD
275static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
276{
277 int ret;
278
279 switch (attr->attr) {
280 case KVM_S390_VM_MEM_LIMIT_SIZE:
281 ret = 0;
282 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
283 ret = -EFAULT;
284 break;
285 default:
286 ret = -ENXIO;
287 break;
288 }
289 return ret;
290}
291
292static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
293{
294 int ret;
295 unsigned int idx;
296 switch (attr->attr) {
297 case KVM_S390_VM_MEM_ENABLE_CMMA:
298 ret = -EBUSY;
299 mutex_lock(&kvm->lock);
300 if (atomic_read(&kvm->online_vcpus) == 0) {
301 kvm->arch.use_cmma = 1;
302 ret = 0;
303 }
304 mutex_unlock(&kvm->lock);
305 break;
306 case KVM_S390_VM_MEM_CLR_CMMA:
307 mutex_lock(&kvm->lock);
308 idx = srcu_read_lock(&kvm->srcu);
a13cff31 309 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
310 srcu_read_unlock(&kvm->srcu, idx);
311 mutex_unlock(&kvm->lock);
312 ret = 0;
313 break;
8c0a7ce6
DD
314 case KVM_S390_VM_MEM_LIMIT_SIZE: {
315 unsigned long new_limit;
316
317 if (kvm_is_ucontrol(kvm))
318 return -EINVAL;
319
320 if (get_user(new_limit, (u64 __user *)attr->addr))
321 return -EFAULT;
322
323 if (new_limit > kvm->arch.gmap->asce_end)
324 return -E2BIG;
325
326 ret = -EBUSY;
327 mutex_lock(&kvm->lock);
328 if (atomic_read(&kvm->online_vcpus) == 0) {
329 /* gmap_alloc will round the limit up */
330 struct gmap *new = gmap_alloc(current->mm, new_limit);
331
332 if (!new) {
333 ret = -ENOMEM;
334 } else {
335 gmap_free(kvm->arch.gmap);
336 new->private = kvm;
337 kvm->arch.gmap = new;
338 ret = 0;
339 }
340 }
341 mutex_unlock(&kvm->lock);
342 break;
343 }
4f718eab
DD
344 default:
345 ret = -ENXIO;
346 break;
347 }
348 return ret;
349}
350
a374e892
TK
351static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
352
353static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
354{
355 struct kvm_vcpu *vcpu;
356 int i;
357
9d8d5786 358 if (!test_kvm_facility(kvm, 76))
a374e892
TK
359 return -EINVAL;
360
361 mutex_lock(&kvm->lock);
362 switch (attr->attr) {
363 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
364 get_random_bytes(
365 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
366 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
367 kvm->arch.crypto.aes_kw = 1;
368 break;
369 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
370 get_random_bytes(
371 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
372 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
373 kvm->arch.crypto.dea_kw = 1;
374 break;
375 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
376 kvm->arch.crypto.aes_kw = 0;
377 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
378 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
379 break;
380 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
381 kvm->arch.crypto.dea_kw = 0;
382 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
383 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
384 break;
385 default:
386 mutex_unlock(&kvm->lock);
387 return -ENXIO;
388 }
389
390 kvm_for_each_vcpu(i, vcpu, kvm) {
391 kvm_s390_vcpu_crypto_setup(vcpu);
392 exit_sie(vcpu);
393 }
394 mutex_unlock(&kvm->lock);
395 return 0;
396}
397
72f25020
JH
398static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
399{
400 u8 gtod_high;
401
402 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
403 sizeof(gtod_high)))
404 return -EFAULT;
405
406 if (gtod_high != 0)
407 return -EINVAL;
408
409 return 0;
410}
411
412static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
413{
414 struct kvm_vcpu *cur_vcpu;
415 unsigned int vcpu_idx;
416 u64 host_tod, gtod;
417 int r;
418
419 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
420 return -EFAULT;
421
422 r = store_tod_clock(&host_tod);
423 if (r)
424 return r;
425
426 mutex_lock(&kvm->lock);
427 kvm->arch.epoch = gtod - host_tod;
428 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
429 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
430 exit_sie(cur_vcpu);
431 }
432 mutex_unlock(&kvm->lock);
433 return 0;
434}
435
436static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
437{
438 int ret;
439
440 if (attr->flags)
441 return -EINVAL;
442
443 switch (attr->attr) {
444 case KVM_S390_VM_TOD_HIGH:
445 ret = kvm_s390_set_tod_high(kvm, attr);
446 break;
447 case KVM_S390_VM_TOD_LOW:
448 ret = kvm_s390_set_tod_low(kvm, attr);
449 break;
450 default:
451 ret = -ENXIO;
452 break;
453 }
454 return ret;
455}
456
457static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
458{
459 u8 gtod_high = 0;
460
461 if (copy_to_user((void __user *)attr->addr, &gtod_high,
462 sizeof(gtod_high)))
463 return -EFAULT;
464
465 return 0;
466}
467
468static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
469{
470 u64 host_tod, gtod;
471 int r;
472
473 r = store_tod_clock(&host_tod);
474 if (r)
475 return r;
476
477 gtod = host_tod + kvm->arch.epoch;
478 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
479 return -EFAULT;
480
481 return 0;
482}
483
484static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
485{
486 int ret;
487
488 if (attr->flags)
489 return -EINVAL;
490
491 switch (attr->attr) {
492 case KVM_S390_VM_TOD_HIGH:
493 ret = kvm_s390_get_tod_high(kvm, attr);
494 break;
495 case KVM_S390_VM_TOD_LOW:
496 ret = kvm_s390_get_tod_low(kvm, attr);
497 break;
498 default:
499 ret = -ENXIO;
500 break;
501 }
502 return ret;
503}
504
658b6eda
MM
505static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
506{
507 struct kvm_s390_vm_cpu_processor *proc;
508 int ret = 0;
509
510 mutex_lock(&kvm->lock);
511 if (atomic_read(&kvm->online_vcpus)) {
512 ret = -EBUSY;
513 goto out;
514 }
515 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
516 if (!proc) {
517 ret = -ENOMEM;
518 goto out;
519 }
520 if (!copy_from_user(proc, (void __user *)attr->addr,
521 sizeof(*proc))) {
522 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
523 sizeof(struct cpuid));
524 kvm->arch.model.ibc = proc->ibc;
981467c9 525 memcpy(kvm->arch.model.fac->list, proc->fac_list,
658b6eda
MM
526 S390_ARCH_FAC_LIST_SIZE_BYTE);
527 } else
528 ret = -EFAULT;
529 kfree(proc);
530out:
531 mutex_unlock(&kvm->lock);
532 return ret;
533}
534
535static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
536{
537 int ret = -ENXIO;
538
539 switch (attr->attr) {
540 case KVM_S390_VM_CPU_PROCESSOR:
541 ret = kvm_s390_set_processor(kvm, attr);
542 break;
543 }
544 return ret;
545}
546
547static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
548{
549 struct kvm_s390_vm_cpu_processor *proc;
550 int ret = 0;
551
552 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
553 if (!proc) {
554 ret = -ENOMEM;
555 goto out;
556 }
557 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
558 proc->ibc = kvm->arch.model.ibc;
981467c9 559 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
560 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
561 ret = -EFAULT;
562 kfree(proc);
563out:
564 return ret;
565}
566
567static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
568{
569 struct kvm_s390_vm_cpu_machine *mach;
570 int ret = 0;
571
572 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
573 if (!mach) {
574 ret = -ENOMEM;
575 goto out;
576 }
577 get_cpu_id((struct cpuid *) &mach->cpuid);
578 mach->ibc = sclp_get_ibc();
981467c9
MM
579 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
580 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 581 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 582 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
583 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
584 ret = -EFAULT;
585 kfree(mach);
586out:
587 return ret;
588}
589
590static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
591{
592 int ret = -ENXIO;
593
594 switch (attr->attr) {
595 case KVM_S390_VM_CPU_PROCESSOR:
596 ret = kvm_s390_get_processor(kvm, attr);
597 break;
598 case KVM_S390_VM_CPU_MACHINE:
599 ret = kvm_s390_get_machine(kvm, attr);
600 break;
601 }
602 return ret;
603}
604
f2061656
DD
605static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
606{
607 int ret;
608
609 switch (attr->group) {
4f718eab 610 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 611 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 612 break;
72f25020
JH
613 case KVM_S390_VM_TOD:
614 ret = kvm_s390_set_tod(kvm, attr);
615 break;
658b6eda
MM
616 case KVM_S390_VM_CPU_MODEL:
617 ret = kvm_s390_set_cpu_model(kvm, attr);
618 break;
a374e892
TK
619 case KVM_S390_VM_CRYPTO:
620 ret = kvm_s390_vm_set_crypto(kvm, attr);
621 break;
f2061656
DD
622 default:
623 ret = -ENXIO;
624 break;
625 }
626
627 return ret;
628}
629
630static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
631{
8c0a7ce6
DD
632 int ret;
633
634 switch (attr->group) {
635 case KVM_S390_VM_MEM_CTRL:
636 ret = kvm_s390_get_mem_control(kvm, attr);
637 break;
72f25020
JH
638 case KVM_S390_VM_TOD:
639 ret = kvm_s390_get_tod(kvm, attr);
640 break;
658b6eda
MM
641 case KVM_S390_VM_CPU_MODEL:
642 ret = kvm_s390_get_cpu_model(kvm, attr);
643 break;
8c0a7ce6
DD
644 default:
645 ret = -ENXIO;
646 break;
647 }
648
649 return ret;
f2061656
DD
650}
651
652static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
653{
654 int ret;
655
656 switch (attr->group) {
4f718eab
DD
657 case KVM_S390_VM_MEM_CTRL:
658 switch (attr->attr) {
659 case KVM_S390_VM_MEM_ENABLE_CMMA:
660 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 661 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
662 ret = 0;
663 break;
664 default:
665 ret = -ENXIO;
666 break;
667 }
668 break;
72f25020
JH
669 case KVM_S390_VM_TOD:
670 switch (attr->attr) {
671 case KVM_S390_VM_TOD_LOW:
672 case KVM_S390_VM_TOD_HIGH:
673 ret = 0;
674 break;
675 default:
676 ret = -ENXIO;
677 break;
678 }
679 break;
658b6eda
MM
680 case KVM_S390_VM_CPU_MODEL:
681 switch (attr->attr) {
682 case KVM_S390_VM_CPU_PROCESSOR:
683 case KVM_S390_VM_CPU_MACHINE:
684 ret = 0;
685 break;
686 default:
687 ret = -ENXIO;
688 break;
689 }
690 break;
a374e892
TK
691 case KVM_S390_VM_CRYPTO:
692 switch (attr->attr) {
693 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
694 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
695 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
696 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
697 ret = 0;
698 break;
699 default:
700 ret = -ENXIO;
701 break;
702 }
703 break;
f2061656
DD
704 default:
705 ret = -ENXIO;
706 break;
707 }
708
709 return ret;
710}
711
b0c632db
HC
712long kvm_arch_vm_ioctl(struct file *filp,
713 unsigned int ioctl, unsigned long arg)
714{
715 struct kvm *kvm = filp->private_data;
716 void __user *argp = (void __user *)arg;
f2061656 717 struct kvm_device_attr attr;
b0c632db
HC
718 int r;
719
720 switch (ioctl) {
ba5c1e9b
CO
721 case KVM_S390_INTERRUPT: {
722 struct kvm_s390_interrupt s390int;
723
724 r = -EFAULT;
725 if (copy_from_user(&s390int, argp, sizeof(s390int)))
726 break;
727 r = kvm_s390_inject_vm(kvm, &s390int);
728 break;
729 }
d938dc55
CH
730 case KVM_ENABLE_CAP: {
731 struct kvm_enable_cap cap;
732 r = -EFAULT;
733 if (copy_from_user(&cap, argp, sizeof(cap)))
734 break;
735 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
736 break;
737 }
84223598
CH
738 case KVM_CREATE_IRQCHIP: {
739 struct kvm_irq_routing_entry routing;
740
741 r = -EINVAL;
742 if (kvm->arch.use_irqchip) {
743 /* Set up dummy routing. */
744 memset(&routing, 0, sizeof(routing));
745 kvm_set_irq_routing(kvm, &routing, 0, 0);
746 r = 0;
747 }
748 break;
749 }
f2061656
DD
750 case KVM_SET_DEVICE_ATTR: {
751 r = -EFAULT;
752 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
753 break;
754 r = kvm_s390_vm_set_attr(kvm, &attr);
755 break;
756 }
757 case KVM_GET_DEVICE_ATTR: {
758 r = -EFAULT;
759 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
760 break;
761 r = kvm_s390_vm_get_attr(kvm, &attr);
762 break;
763 }
764 case KVM_HAS_DEVICE_ATTR: {
765 r = -EFAULT;
766 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
767 break;
768 r = kvm_s390_vm_has_attr(kvm, &attr);
769 break;
770 }
b0c632db 771 default:
367e1319 772 r = -ENOTTY;
b0c632db
HC
773 }
774
775 return r;
776}
777
45c9b47c
TK
778static int kvm_s390_query_ap_config(u8 *config)
779{
780 u32 fcn_code = 0x04000000UL;
86044c8c 781 u32 cc = 0;
45c9b47c 782
86044c8c 783 memset(config, 0, 128);
45c9b47c
TK
784 asm volatile(
785 "lgr 0,%1\n"
786 "lgr 2,%2\n"
787 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 788 "0: ipm %0\n"
45c9b47c 789 "srl %0,28\n"
86044c8c
CB
790 "1:\n"
791 EX_TABLE(0b, 1b)
792 : "+r" (cc)
45c9b47c
TK
793 : "r" (fcn_code), "r" (config)
794 : "cc", "0", "2", "memory"
795 );
796
797 return cc;
798}
799
800static int kvm_s390_apxa_installed(void)
801{
802 u8 config[128];
803 int cc;
804
805 if (test_facility(2) && test_facility(12)) {
806 cc = kvm_s390_query_ap_config(config);
807
808 if (cc)
809 pr_err("PQAP(QCI) failed with cc=%d", cc);
810 else
811 return config[0] & 0x40;
812 }
813
814 return 0;
815}
816
817static void kvm_s390_set_crycb_format(struct kvm *kvm)
818{
819 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
820
821 if (kvm_s390_apxa_installed())
822 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
823 else
824 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
825}
826
9d8d5786
MM
827static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
828{
829 get_cpu_id(cpu_id);
830 cpu_id->version = 0xff;
831}
832
5102ee87
TK
833static int kvm_s390_crypto_init(struct kvm *kvm)
834{
9d8d5786 835 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
836 return 0;
837
838 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
839 GFP_KERNEL | GFP_DMA);
840 if (!kvm->arch.crypto.crycb)
841 return -ENOMEM;
842
45c9b47c 843 kvm_s390_set_crycb_format(kvm);
5102ee87 844
ed6f76b4
TK
845 /* Enable AES/DEA protected key functions by default */
846 kvm->arch.crypto.aes_kw = 1;
847 kvm->arch.crypto.dea_kw = 1;
848 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
849 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
850 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
851 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
a374e892 852
5102ee87
TK
853 return 0;
854}
855
e08b9637 856int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 857{
9d8d5786 858 int i, rc;
b0c632db 859 char debug_name[16];
f6c137ff 860 static unsigned long sca_offset;
b0c632db 861
e08b9637
CO
862 rc = -EINVAL;
863#ifdef CONFIG_KVM_S390_UCONTROL
864 if (type & ~KVM_VM_S390_UCONTROL)
865 goto out_err;
866 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
867 goto out_err;
868#else
869 if (type)
870 goto out_err;
871#endif
872
b0c632db
HC
873 rc = s390_enable_sie();
874 if (rc)
d89f5eff 875 goto out_err;
b0c632db 876
b290411a
CO
877 rc = -ENOMEM;
878
b0c632db
HC
879 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
880 if (!kvm->arch.sca)
d89f5eff 881 goto out_err;
f6c137ff
CB
882 spin_lock(&kvm_lock);
883 sca_offset = (sca_offset + 16) & 0x7f0;
884 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
885 spin_unlock(&kvm_lock);
b0c632db
HC
886
887 sprintf(debug_name, "kvm-%u", current->pid);
888
889 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
890 if (!kvm->arch.dbf)
891 goto out_nodbf;
892
9d8d5786
MM
893 /*
894 * The architectural maximum amount of facilities is 16 kbit. To store
895 * this amount, 2 kbyte of memory is required. Thus we need a full
981467c9
MM
896 * page to hold the guest facility list (arch.model.fac->list) and the
897 * facility mask (arch.model.fac->mask). Its address size has to be
9d8d5786
MM
898 * 31 bits and word aligned.
899 */
900 kvm->arch.model.fac =
981467c9 901 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9d8d5786
MM
902 if (!kvm->arch.model.fac)
903 goto out_nofac;
904
fb5bf93f 905 /* Populate the facility mask initially. */
981467c9 906 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
94422ee8 907 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
908 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
909 if (i < kvm_s390_fac_list_mask_size())
981467c9 910 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 911 else
981467c9 912 kvm->arch.model.fac->mask[i] = 0UL;
9d8d5786
MM
913 }
914
981467c9
MM
915 /* Populate the facility list initially. */
916 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
917 S390_ARCH_FAC_LIST_SIZE_BYTE);
918
9d8d5786 919 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
658b6eda 920 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9d8d5786 921
5102ee87
TK
922 if (kvm_s390_crypto_init(kvm) < 0)
923 goto out_crypto;
924
ba5c1e9b
CO
925 spin_lock_init(&kvm->arch.float_int.lock);
926 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 927 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 928 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 929
b0c632db
HC
930 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
931 VM_EVENT(kvm, 3, "%s", "vm created");
932
e08b9637
CO
933 if (type & KVM_VM_S390_UCONTROL) {
934 kvm->arch.gmap = NULL;
935 } else {
0349985a 936 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637
CO
937 if (!kvm->arch.gmap)
938 goto out_nogmap;
2c70fe44 939 kvm->arch.gmap->private = kvm;
24eb3a82 940 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 941 }
fa6b7fe9
CH
942
943 kvm->arch.css_support = 0;
84223598 944 kvm->arch.use_irqchip = 0;
72f25020 945 kvm->arch.epoch = 0;
fa6b7fe9 946
8ad35755
DH
947 spin_lock_init(&kvm->arch.start_stop_lock);
948
d89f5eff 949 return 0;
598841ca 950out_nogmap:
5102ee87
TK
951 kfree(kvm->arch.crypto.crycb);
952out_crypto:
9d8d5786
MM
953 free_page((unsigned long)kvm->arch.model.fac);
954out_nofac:
598841ca 955 debug_unregister(kvm->arch.dbf);
b0c632db
HC
956out_nodbf:
957 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
958out_err:
959 return rc;
b0c632db
HC
960}
961
d329c035
CB
962void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
963{
964 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 965 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 966 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 967 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
968 if (!kvm_is_ucontrol(vcpu->kvm)) {
969 clear_bit(63 - vcpu->vcpu_id,
970 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
971 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
972 (__u64) vcpu->arch.sie_block)
973 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
974 }
abf4a71e 975 smp_mb();
27e0393f
CO
976
977 if (kvm_is_ucontrol(vcpu->kvm))
978 gmap_free(vcpu->arch.gmap);
979
b31605c1
DD
980 if (kvm_s390_cmma_enabled(vcpu->kvm))
981 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 982 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 983
6692cef3 984 kvm_vcpu_uninit(vcpu);
b110feaf 985 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
986}
987
988static void kvm_free_vcpus(struct kvm *kvm)
989{
990 unsigned int i;
988a2cae 991 struct kvm_vcpu *vcpu;
d329c035 992
988a2cae
GN
993 kvm_for_each_vcpu(i, vcpu, kvm)
994 kvm_arch_vcpu_destroy(vcpu);
995
996 mutex_lock(&kvm->lock);
997 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
998 kvm->vcpus[i] = NULL;
999
1000 atomic_set(&kvm->online_vcpus, 0);
1001 mutex_unlock(&kvm->lock);
d329c035
CB
1002}
1003
b0c632db
HC
1004void kvm_arch_destroy_vm(struct kvm *kvm)
1005{
d329c035 1006 kvm_free_vcpus(kvm);
9d8d5786 1007 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 1008 free_page((unsigned long)(kvm->arch.sca));
d329c035 1009 debug_unregister(kvm->arch.dbf);
5102ee87 1010 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1011 if (!kvm_is_ucontrol(kvm))
1012 gmap_free(kvm->arch.gmap);
841b91c5 1013 kvm_s390_destroy_adapters(kvm);
67335e63 1014 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
1015}
1016
1017/* Section: vcpu related */
dafd032a
DD
1018static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1019{
1020 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1021 if (!vcpu->arch.gmap)
1022 return -ENOMEM;
1023 vcpu->arch.gmap->private = vcpu->kvm;
1024
1025 return 0;
1026}
1027
b0c632db
HC
1028int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1029{
3c038e6b
DD
1030 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1031 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1032 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1033 KVM_SYNC_GPRS |
9eed0735 1034 KVM_SYNC_ACRS |
b028ee3e
DH
1035 KVM_SYNC_CRS |
1036 KVM_SYNC_ARCH0 |
1037 KVM_SYNC_PFAULT;
dafd032a
DD
1038
1039 if (kvm_is_ucontrol(vcpu->kvm))
1040 return __kvm_ucontrol_vcpu_init(vcpu);
1041
b0c632db
HC
1042 return 0;
1043}
1044
b0c632db
HC
1045void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1046{
4725c860
MS
1047 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1048 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 1049 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
1050 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1051 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 1052 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1053 gmap_enable(vcpu->arch.gmap);
9e6dabef 1054 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
1055}
1056
1057void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1058{
9e6dabef 1059 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1060 gmap_disable(vcpu->arch.gmap);
4725c860
MS
1061 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1062 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 1063 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
1064 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1065 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
1066 restore_access_regs(vcpu->arch.host_acrs);
1067}
1068
1069static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1070{
1071 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1072 vcpu->arch.sie_block->gpsw.mask = 0UL;
1073 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1074 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
1075 vcpu->arch.sie_block->cputm = 0UL;
1076 vcpu->arch.sie_block->ckc = 0UL;
1077 vcpu->arch.sie_block->todpr = 0;
1078 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1079 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1080 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1081 vcpu->arch.guest_fpregs.fpc = 0;
1082 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1083 vcpu->arch.sie_block->gbea = 1;
672550fb 1084 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1085 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1086 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1087 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1088 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1089 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1090}
1091
31928aa5 1092void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1093{
72f25020
JH
1094 mutex_lock(&vcpu->kvm->lock);
1095 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1096 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
1097 if (!kvm_is_ucontrol(vcpu->kvm))
1098 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
1099}
1100
5102ee87
TK
1101static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1102{
9d8d5786 1103 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1104 return;
1105
a374e892
TK
1106 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1107
1108 if (vcpu->kvm->arch.crypto.aes_kw)
1109 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1110 if (vcpu->kvm->arch.crypto.dea_kw)
1111 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1112
5102ee87
TK
1113 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1114}
1115
b31605c1
DD
1116void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1117{
1118 free_page(vcpu->arch.sie_block->cbrlo);
1119 vcpu->arch.sie_block->cbrlo = 0;
1120}
1121
1122int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1123{
1124 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1125 if (!vcpu->arch.sie_block->cbrlo)
1126 return -ENOMEM;
1127
1128 vcpu->arch.sie_block->ecb2 |= 0x80;
1129 vcpu->arch.sie_block->ecb2 &= ~0x08;
1130 return 0;
1131}
1132
b0c632db
HC
1133int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1134{
b31605c1 1135 int rc = 0;
b31288fa 1136
9e6dabef
CH
1137 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1138 CPUSTAT_SM |
69d0d3a3
CB
1139 CPUSTAT_STOPPED |
1140 CPUSTAT_GED);
fc34531d 1141 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1142 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1143 vcpu->arch.sie_block->ecb |= 0x10;
1144
69d0d3a3 1145 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1146 vcpu->arch.sie_block->eca = 0xC1002000U;
217a4406
HC
1147 if (sclp_has_siif())
1148 vcpu->arch.sie_block->eca |= 1;
ea5f4969
DH
1149 if (sclp_has_sigpif())
1150 vcpu->arch.sie_block->eca |= 0x10000000U;
492d8642 1151 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 1152
b31605c1
DD
1153 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1154 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1155 if (rc)
1156 return rc;
b31288fa 1157 }
0ac96caf 1158 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1159 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1160
658b6eda 1161 mutex_lock(&vcpu->kvm->lock);
9d8d5786 1162 vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
658b6eda
MM
1163 vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
1164 mutex_unlock(&vcpu->kvm->lock);
5102ee87
TK
1165
1166 kvm_s390_vcpu_crypto_setup(vcpu);
1167
b31605c1 1168 return rc;
b0c632db
HC
1169}
1170
1171struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1172 unsigned int id)
1173{
4d47555a 1174 struct kvm_vcpu *vcpu;
7feb6bb8 1175 struct sie_page *sie_page;
4d47555a
CO
1176 int rc = -EINVAL;
1177
1178 if (id >= KVM_MAX_VCPUS)
1179 goto out;
1180
1181 rc = -ENOMEM;
b0c632db 1182
b110feaf 1183 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1184 if (!vcpu)
4d47555a 1185 goto out;
b0c632db 1186
7feb6bb8
MM
1187 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1188 if (!sie_page)
b0c632db
HC
1189 goto out_free_cpu;
1190
7feb6bb8
MM
1191 vcpu->arch.sie_block = &sie_page->sie_block;
1192 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1193
b0c632db 1194 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1195 if (!kvm_is_ucontrol(kvm)) {
1196 if (!kvm->arch.sca) {
1197 WARN_ON_ONCE(1);
1198 goto out_free_cpu;
1199 }
1200 if (!kvm->arch.sca->cpu[id].sda)
1201 kvm->arch.sca->cpu[id].sda =
1202 (__u64) vcpu->arch.sie_block;
1203 vcpu->arch.sie_block->scaoh =
1204 (__u32)(((__u64)kvm->arch.sca) >> 32);
1205 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1206 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1207 }
981467c9 1208 vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->list;
b0c632db 1209
ba5c1e9b 1210 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1211 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1212 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1213 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1214
b0c632db
HC
1215 rc = kvm_vcpu_init(vcpu, kvm, id);
1216 if (rc)
7b06bf2f 1217 goto out_free_sie_block;
b0c632db
HC
1218 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1219 vcpu->arch.sie_block);
ade38c31 1220 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1221
b0c632db 1222 return vcpu;
7b06bf2f
WY
1223out_free_sie_block:
1224 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1225out_free_cpu:
b110feaf 1226 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1227out:
b0c632db
HC
1228 return ERR_PTR(rc);
1229}
1230
b0c632db
HC
1231int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1232{
9a022067 1233 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1234}
1235
49b99e1e
CB
1236void s390_vcpu_block(struct kvm_vcpu *vcpu)
1237{
1238 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1239}
1240
1241void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1242{
1243 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1244}
1245
1246/*
1247 * Kick a guest cpu out of SIE and wait until SIE is not running.
1248 * If the CPU is not running (e.g. waiting as idle) the function will
1249 * return immediately. */
1250void exit_sie(struct kvm_vcpu *vcpu)
1251{
1252 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1253 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1254 cpu_relax();
1255}
1256
1257/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1258void exit_sie_sync(struct kvm_vcpu *vcpu)
1259{
1260 s390_vcpu_block(vcpu);
1261 exit_sie(vcpu);
1262}
1263
2c70fe44
CB
1264static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1265{
1266 int i;
1267 struct kvm *kvm = gmap->private;
1268 struct kvm_vcpu *vcpu;
1269
1270 kvm_for_each_vcpu(i, vcpu, kvm) {
1271 /* match against both prefix pages */
fda902cb 1272 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
1273 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1274 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1275 exit_sie_sync(vcpu);
1276 }
1277 }
1278}
1279
b6d33834
CD
1280int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1281{
1282 /* kvm common code refers to this, but never calls it */
1283 BUG();
1284 return 0;
1285}
1286
14eebd91
CO
1287static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1288 struct kvm_one_reg *reg)
1289{
1290 int r = -EINVAL;
1291
1292 switch (reg->id) {
29b7c71b
CO
1293 case KVM_REG_S390_TODPR:
1294 r = put_user(vcpu->arch.sie_block->todpr,
1295 (u32 __user *)reg->addr);
1296 break;
1297 case KVM_REG_S390_EPOCHDIFF:
1298 r = put_user(vcpu->arch.sie_block->epoch,
1299 (u64 __user *)reg->addr);
1300 break;
46a6dd1c
J
1301 case KVM_REG_S390_CPU_TIMER:
1302 r = put_user(vcpu->arch.sie_block->cputm,
1303 (u64 __user *)reg->addr);
1304 break;
1305 case KVM_REG_S390_CLOCK_COMP:
1306 r = put_user(vcpu->arch.sie_block->ckc,
1307 (u64 __user *)reg->addr);
1308 break;
536336c2
DD
1309 case KVM_REG_S390_PFTOKEN:
1310 r = put_user(vcpu->arch.pfault_token,
1311 (u64 __user *)reg->addr);
1312 break;
1313 case KVM_REG_S390_PFCOMPARE:
1314 r = put_user(vcpu->arch.pfault_compare,
1315 (u64 __user *)reg->addr);
1316 break;
1317 case KVM_REG_S390_PFSELECT:
1318 r = put_user(vcpu->arch.pfault_select,
1319 (u64 __user *)reg->addr);
1320 break;
672550fb
CB
1321 case KVM_REG_S390_PP:
1322 r = put_user(vcpu->arch.sie_block->pp,
1323 (u64 __user *)reg->addr);
1324 break;
afa45ff5
CB
1325 case KVM_REG_S390_GBEA:
1326 r = put_user(vcpu->arch.sie_block->gbea,
1327 (u64 __user *)reg->addr);
1328 break;
14eebd91
CO
1329 default:
1330 break;
1331 }
1332
1333 return r;
1334}
1335
1336static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1337 struct kvm_one_reg *reg)
1338{
1339 int r = -EINVAL;
1340
1341 switch (reg->id) {
29b7c71b
CO
1342 case KVM_REG_S390_TODPR:
1343 r = get_user(vcpu->arch.sie_block->todpr,
1344 (u32 __user *)reg->addr);
1345 break;
1346 case KVM_REG_S390_EPOCHDIFF:
1347 r = get_user(vcpu->arch.sie_block->epoch,
1348 (u64 __user *)reg->addr);
1349 break;
46a6dd1c
J
1350 case KVM_REG_S390_CPU_TIMER:
1351 r = get_user(vcpu->arch.sie_block->cputm,
1352 (u64 __user *)reg->addr);
1353 break;
1354 case KVM_REG_S390_CLOCK_COMP:
1355 r = get_user(vcpu->arch.sie_block->ckc,
1356 (u64 __user *)reg->addr);
1357 break;
536336c2
DD
1358 case KVM_REG_S390_PFTOKEN:
1359 r = get_user(vcpu->arch.pfault_token,
1360 (u64 __user *)reg->addr);
9fbd8082
DH
1361 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1362 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1363 break;
1364 case KVM_REG_S390_PFCOMPARE:
1365 r = get_user(vcpu->arch.pfault_compare,
1366 (u64 __user *)reg->addr);
1367 break;
1368 case KVM_REG_S390_PFSELECT:
1369 r = get_user(vcpu->arch.pfault_select,
1370 (u64 __user *)reg->addr);
1371 break;
672550fb
CB
1372 case KVM_REG_S390_PP:
1373 r = get_user(vcpu->arch.sie_block->pp,
1374 (u64 __user *)reg->addr);
1375 break;
afa45ff5
CB
1376 case KVM_REG_S390_GBEA:
1377 r = get_user(vcpu->arch.sie_block->gbea,
1378 (u64 __user *)reg->addr);
1379 break;
14eebd91
CO
1380 default:
1381 break;
1382 }
1383
1384 return r;
1385}
b6d33834 1386
b0c632db
HC
1387static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1388{
b0c632db 1389 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1390 return 0;
1391}
1392
1393int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1394{
5a32c1af 1395 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1396 return 0;
1397}
1398
1399int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1400{
5a32c1af 1401 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1402 return 0;
1403}
1404
1405int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1406 struct kvm_sregs *sregs)
1407{
59674c1a 1408 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1409 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1410 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1411 return 0;
1412}
1413
1414int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1415 struct kvm_sregs *sregs)
1416{
59674c1a 1417 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1418 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1419 return 0;
1420}
1421
1422int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1423{
4725c860
MS
1424 if (test_fp_ctl(fpu->fpc))
1425 return -EINVAL;
b0c632db 1426 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1427 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1428 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1429 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1430 return 0;
1431}
1432
1433int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1434{
b0c632db
HC
1435 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1436 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1437 return 0;
1438}
1439
1440static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1441{
1442 int rc = 0;
1443
7a42fdc2 1444 if (!is_vcpu_stopped(vcpu))
b0c632db 1445 rc = -EBUSY;
d7b0b5eb
CO
1446 else {
1447 vcpu->run->psw_mask = psw.mask;
1448 vcpu->run->psw_addr = psw.addr;
1449 }
b0c632db
HC
1450 return rc;
1451}
1452
1453int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1454 struct kvm_translation *tr)
1455{
1456 return -EINVAL; /* not implemented yet */
1457}
1458
27291e21
DH
1459#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1460 KVM_GUESTDBG_USE_HW_BP | \
1461 KVM_GUESTDBG_ENABLE)
1462
d0bfb940
JK
1463int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1464 struct kvm_guest_debug *dbg)
b0c632db 1465{
27291e21
DH
1466 int rc = 0;
1467
1468 vcpu->guest_debug = 0;
1469 kvm_s390_clear_bp_data(vcpu);
1470
2de3bfc2 1471 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1472 return -EINVAL;
1473
1474 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1475 vcpu->guest_debug = dbg->control;
1476 /* enforce guest PER */
1477 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1478
1479 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1480 rc = kvm_s390_import_bp_data(vcpu, dbg);
1481 } else {
1482 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1483 vcpu->arch.guestdbg.last_bp = 0;
1484 }
1485
1486 if (rc) {
1487 vcpu->guest_debug = 0;
1488 kvm_s390_clear_bp_data(vcpu);
1489 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1490 }
1491
1492 return rc;
b0c632db
HC
1493}
1494
62d9f0db
MT
1495int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1496 struct kvm_mp_state *mp_state)
1497{
6352e4d2
DH
1498 /* CHECK_STOP and LOAD are not supported yet */
1499 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1500 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1501}
1502
1503int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1504 struct kvm_mp_state *mp_state)
1505{
6352e4d2
DH
1506 int rc = 0;
1507
1508 /* user space knows about this interface - let it control the state */
1509 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1510
1511 switch (mp_state->mp_state) {
1512 case KVM_MP_STATE_STOPPED:
1513 kvm_s390_vcpu_stop(vcpu);
1514 break;
1515 case KVM_MP_STATE_OPERATING:
1516 kvm_s390_vcpu_start(vcpu);
1517 break;
1518 case KVM_MP_STATE_LOAD:
1519 case KVM_MP_STATE_CHECK_STOP:
1520 /* fall through - CHECK_STOP and LOAD are not supported yet */
1521 default:
1522 rc = -ENXIO;
1523 }
1524
1525 return rc;
62d9f0db
MT
1526}
1527
b31605c1
DD
1528bool kvm_s390_cmma_enabled(struct kvm *kvm)
1529{
1530 if (!MACHINE_IS_LPAR)
1531 return false;
1532 /* only enable for z10 and later */
1533 if (!MACHINE_HAS_EDAT1)
1534 return false;
1535 if (!kvm->arch.use_cmma)
1536 return false;
1537 return true;
1538}
1539
8ad35755
DH
1540static bool ibs_enabled(struct kvm_vcpu *vcpu)
1541{
1542 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1543}
1544
2c70fe44
CB
1545static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1546{
8ad35755
DH
1547retry:
1548 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1549 /*
1550 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1551 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1552 * This ensures that the ipte instruction for this request has
1553 * already finished. We might race against a second unmapper that
1554 * wants to set the blocking bit. Lets just retry the request loop.
1555 */
8ad35755 1556 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1557 int rc;
1558 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1559 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1560 PAGE_SIZE * 2);
1561 if (rc)
1562 return rc;
8ad35755 1563 goto retry;
2c70fe44 1564 }
8ad35755 1565
d3d692c8
DH
1566 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1567 vcpu->arch.sie_block->ihcpu = 0xffff;
1568 goto retry;
1569 }
1570
8ad35755
DH
1571 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1572 if (!ibs_enabled(vcpu)) {
1573 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1574 atomic_set_mask(CPUSTAT_IBS,
1575 &vcpu->arch.sie_block->cpuflags);
1576 }
1577 goto retry;
2c70fe44 1578 }
8ad35755
DH
1579
1580 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1581 if (ibs_enabled(vcpu)) {
1582 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1583 atomic_clear_mask(CPUSTAT_IBS,
1584 &vcpu->arch.sie_block->cpuflags);
1585 }
1586 goto retry;
1587 }
1588
0759d068
DH
1589 /* nothing to do, just clear the request */
1590 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1591
2c70fe44
CB
1592 return 0;
1593}
1594
fa576c58
TH
1595/**
1596 * kvm_arch_fault_in_page - fault-in guest page if necessary
1597 * @vcpu: The corresponding virtual cpu
1598 * @gpa: Guest physical address
1599 * @writable: Whether the page should be writable or not
1600 *
1601 * Make sure that a guest page has been faulted-in on the host.
1602 *
1603 * Return: Zero on success, negative error code otherwise.
1604 */
1605long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1606{
527e30b4
MS
1607 return gmap_fault(vcpu->arch.gmap, gpa,
1608 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1609}
1610
3c038e6b
DD
1611static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1612 unsigned long token)
1613{
1614 struct kvm_s390_interrupt inti;
383d0b05 1615 struct kvm_s390_irq irq;
3c038e6b
DD
1616
1617 if (start_token) {
383d0b05
JF
1618 irq.u.ext.ext_params2 = token;
1619 irq.type = KVM_S390_INT_PFAULT_INIT;
1620 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1621 } else {
1622 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1623 inti.parm64 = token;
3c038e6b
DD
1624 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1625 }
1626}
1627
1628void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1629 struct kvm_async_pf *work)
1630{
1631 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1632 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1633}
1634
1635void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1636 struct kvm_async_pf *work)
1637{
1638 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1639 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1640}
1641
1642void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1643 struct kvm_async_pf *work)
1644{
1645 /* s390 will always inject the page directly */
1646}
1647
1648bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1649{
1650 /*
1651 * s390 will always inject the page directly,
1652 * but we still want check_async_completion to cleanup
1653 */
1654 return true;
1655}
1656
1657static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1658{
1659 hva_t hva;
1660 struct kvm_arch_async_pf arch;
1661 int rc;
1662
1663 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1664 return 0;
1665 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1666 vcpu->arch.pfault_compare)
1667 return 0;
1668 if (psw_extint_disabled(vcpu))
1669 return 0;
9a022067 1670 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1671 return 0;
1672 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1673 return 0;
1674 if (!vcpu->arch.gmap->pfault_enabled)
1675 return 0;
1676
81480cc1
HC
1677 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1678 hva += current->thread.gmap_addr & ~PAGE_MASK;
1679 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1680 return 0;
1681
1682 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1683 return rc;
1684}
1685
3fb4c40f 1686static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1687{
3fb4c40f 1688 int rc, cpuflags;
e168bf8d 1689
3c038e6b
DD
1690 /*
1691 * On s390 notifications for arriving pages will be delivered directly
1692 * to the guest but the house keeping for completed pfaults is
1693 * handled outside the worker.
1694 */
1695 kvm_check_async_pf_completion(vcpu);
1696
5a32c1af 1697 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1698
1699 if (need_resched())
1700 schedule();
1701
d3a73acb 1702 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1703 s390_handle_mcck();
1704
79395031
JF
1705 if (!kvm_is_ucontrol(vcpu->kvm)) {
1706 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1707 if (rc)
1708 return rc;
1709 }
0ff31867 1710
2c70fe44
CB
1711 rc = kvm_s390_handle_requests(vcpu);
1712 if (rc)
1713 return rc;
1714
27291e21
DH
1715 if (guestdbg_enabled(vcpu)) {
1716 kvm_s390_backup_guest_per_regs(vcpu);
1717 kvm_s390_patch_guest_per_regs(vcpu);
1718 }
1719
b0c632db 1720 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1721 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1722 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1723 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1724
3fb4c40f
TH
1725 return 0;
1726}
1727
492d8642
TH
1728static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1729{
1730 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1731 u8 opcode;
1732 int rc;
1733
1734 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1735 trace_kvm_s390_sie_fault(vcpu);
1736
1737 /*
1738 * We want to inject an addressing exception, which is defined as a
1739 * suppressing or terminating exception. However, since we came here
1740 * by a DAT access exception, the PSW still points to the faulting
1741 * instruction since DAT exceptions are nullifying. So we've got
1742 * to look up the current opcode to get the length of the instruction
1743 * to be able to forward the PSW.
1744 */
1745 rc = read_guest(vcpu, psw->addr, &opcode, 1);
1746 if (rc)
1747 return kvm_s390_inject_prog_cond(vcpu, rc);
1748 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1749
1750 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1751}
1752
3fb4c40f
TH
1753static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1754{
24eb3a82 1755 int rc = -1;
2b29a9fd
DD
1756
1757 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1758 vcpu->arch.sie_block->icptcode);
1759 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1760
27291e21
DH
1761 if (guestdbg_enabled(vcpu))
1762 kvm_s390_restore_guest_per_regs(vcpu);
1763
3fb4c40f 1764 if (exit_reason >= 0) {
7c470539 1765 rc = 0;
210b1607
TH
1766 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1767 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1768 vcpu->run->s390_ucontrol.trans_exc_code =
1769 current->thread.gmap_addr;
1770 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1771 rc = -EREMOTE;
24eb3a82
DD
1772
1773 } else if (current->thread.gmap_pfault) {
3c038e6b 1774 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1775 current->thread.gmap_pfault = 0;
fa576c58 1776 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1777 rc = 0;
fa576c58
TH
1778 } else {
1779 gpa_t gpa = current->thread.gmap_addr;
1780 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1781 }
24eb3a82
DD
1782 }
1783
492d8642
TH
1784 if (rc == -1)
1785 rc = vcpu_post_run_fault_in_sie(vcpu);
b0c632db 1786
5a32c1af 1787 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1788
a76ccff6
TH
1789 if (rc == 0) {
1790 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1791 /* Don't exit for host interrupts. */
1792 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1793 else
1794 rc = kvm_handle_sie_intercept(vcpu);
1795 }
1796
3fb4c40f
TH
1797 return rc;
1798}
1799
1800static int __vcpu_run(struct kvm_vcpu *vcpu)
1801{
1802 int rc, exit_reason;
1803
800c1065
TH
1804 /*
1805 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1806 * ning the guest), so that memslots (and other stuff) are protected
1807 */
1808 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1809
a76ccff6
TH
1810 do {
1811 rc = vcpu_pre_run(vcpu);
1812 if (rc)
1813 break;
3fb4c40f 1814
800c1065 1815 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1816 /*
1817 * As PF_VCPU will be used in fault handler, between
1818 * guest_enter and guest_exit should be no uaccess.
1819 */
1820 preempt_disable();
1821 kvm_guest_enter();
1822 preempt_enable();
1823 exit_reason = sie64a(vcpu->arch.sie_block,
1824 vcpu->run->s.regs.gprs);
1825 kvm_guest_exit();
800c1065 1826 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1827
1828 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1829 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1830
800c1065 1831 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1832 return rc;
b0c632db
HC
1833}
1834
b028ee3e
DH
1835static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1836{
1837 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1838 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1839 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1840 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1841 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1842 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1843 /* some control register changes require a tlb flush */
1844 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1845 }
1846 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1847 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1848 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1849 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1850 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1851 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1852 }
1853 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1854 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1855 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1856 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
1857 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1858 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
1859 }
1860 kvm_run->kvm_dirty_regs = 0;
1861}
1862
1863static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1864{
1865 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1866 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1867 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1868 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1869 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1870 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1871 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1872 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1873 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1874 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1875 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1876 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1877}
1878
b0c632db
HC
1879int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1880{
8f2abe6a 1881 int rc;
b0c632db
HC
1882 sigset_t sigsaved;
1883
27291e21
DH
1884 if (guestdbg_exit_pending(vcpu)) {
1885 kvm_s390_prepare_debug_exit(vcpu);
1886 return 0;
1887 }
1888
b0c632db
HC
1889 if (vcpu->sigset_active)
1890 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1891
6352e4d2
DH
1892 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1893 kvm_s390_vcpu_start(vcpu);
1894 } else if (is_vcpu_stopped(vcpu)) {
1895 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1896 vcpu->vcpu_id);
1897 return -EINVAL;
1898 }
b0c632db 1899
b028ee3e 1900 sync_regs(vcpu, kvm_run);
d7b0b5eb 1901
dab4079d 1902 might_fault();
a76ccff6 1903 rc = __vcpu_run(vcpu);
9ace903d 1904
b1d16c49
CE
1905 if (signal_pending(current) && !rc) {
1906 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1907 rc = -EINTR;
b1d16c49 1908 }
8f2abe6a 1909
27291e21
DH
1910 if (guestdbg_exit_pending(vcpu) && !rc) {
1911 kvm_s390_prepare_debug_exit(vcpu);
1912 rc = 0;
1913 }
1914
b8e660b8 1915 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1916 /* intercept cannot be handled in-kernel, prepare kvm-run */
1917 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1918 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1919 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1920 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1921 rc = 0;
1922 }
1923
1924 if (rc == -EREMOTE) {
1925 /* intercept was handled, but userspace support is needed
1926 * kvm_run has been prepared by the handler */
1927 rc = 0;
1928 }
b0c632db 1929
b028ee3e 1930 store_regs(vcpu, kvm_run);
d7b0b5eb 1931
b0c632db
HC
1932 if (vcpu->sigset_active)
1933 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1934
b0c632db 1935 vcpu->stat.exit_userspace++;
7e8e6ab4 1936 return rc;
b0c632db
HC
1937}
1938
b0c632db
HC
1939/*
1940 * store status at address
1941 * we use have two special cases:
1942 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1943 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1944 */
d0bce605 1945int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1946{
092670cd 1947 unsigned char archmode = 1;
fda902cb 1948 unsigned int px;
178bd789 1949 u64 clkcomp;
d0bce605 1950 int rc;
b0c632db 1951
d0bce605
HC
1952 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1953 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1954 return -EFAULT;
d0bce605
HC
1955 gpa = SAVE_AREA_BASE;
1956 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1957 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1958 return -EFAULT;
d0bce605
HC
1959 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1960 }
1961 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1962 vcpu->arch.guest_fpregs.fprs, 128);
1963 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1964 vcpu->run->s.regs.gprs, 128);
1965 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1966 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1967 px = kvm_s390_get_prefix(vcpu);
d0bce605 1968 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1969 &px, 4);
d0bce605
HC
1970 rc |= write_guest_abs(vcpu,
1971 gpa + offsetof(struct save_area, fp_ctrl_reg),
1972 &vcpu->arch.guest_fpregs.fpc, 4);
1973 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1974 &vcpu->arch.sie_block->todpr, 4);
1975 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1976 &vcpu->arch.sie_block->cputm, 8);
178bd789 1977 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1978 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1979 &clkcomp, 8);
1980 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1981 &vcpu->run->s.regs.acrs, 64);
1982 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1983 &vcpu->arch.sie_block->gcr, 128);
1984 return rc ? -EFAULT : 0;
b0c632db
HC
1985}
1986
e879892c
TH
1987int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1988{
1989 /*
1990 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1991 * copying in vcpu load/put. Lets update our copies before we save
1992 * it into the save area
1993 */
1994 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1995 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1996 save_access_regs(vcpu->run->s.regs.acrs);
1997
1998 return kvm_s390_store_status_unloaded(vcpu, addr);
1999}
2000
8ad35755
DH
2001static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2002{
2003 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2004 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
2005 exit_sie_sync(vcpu);
2006}
2007
2008static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2009{
2010 unsigned int i;
2011 struct kvm_vcpu *vcpu;
2012
2013 kvm_for_each_vcpu(i, vcpu, kvm) {
2014 __disable_ibs_on_vcpu(vcpu);
2015 }
2016}
2017
2018static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2019{
2020 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2021 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2022 exit_sie_sync(vcpu);
2023}
2024
6852d7b6
DH
2025void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2026{
8ad35755
DH
2027 int i, online_vcpus, started_vcpus = 0;
2028
2029 if (!is_vcpu_stopped(vcpu))
2030 return;
2031
6852d7b6 2032 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2033 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2034 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2035 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2036
2037 for (i = 0; i < online_vcpus; i++) {
2038 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2039 started_vcpus++;
2040 }
2041
2042 if (started_vcpus == 0) {
2043 /* we're the only active VCPU -> speed it up */
2044 __enable_ibs_on_vcpu(vcpu);
2045 } else if (started_vcpus == 1) {
2046 /*
2047 * As we are starting a second VCPU, we have to disable
2048 * the IBS facility on all VCPUs to remove potentially
2049 * oustanding ENABLE requests.
2050 */
2051 __disable_ibs_on_all_vcpus(vcpu->kvm);
2052 }
2053
6852d7b6 2054 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2055 /*
2056 * Another VCPU might have used IBS while we were offline.
2057 * Let's play safe and flush the VCPU at startup.
2058 */
d3d692c8 2059 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2060 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2061 return;
6852d7b6
DH
2062}
2063
2064void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2065{
8ad35755
DH
2066 int i, online_vcpus, started_vcpus = 0;
2067 struct kvm_vcpu *started_vcpu = NULL;
2068
2069 if (is_vcpu_stopped(vcpu))
2070 return;
2071
6852d7b6 2072 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2073 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2074 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2075 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2076
32f5ff63 2077 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2078 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2079
6cddd432 2080 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2081 __disable_ibs_on_vcpu(vcpu);
2082
2083 for (i = 0; i < online_vcpus; i++) {
2084 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2085 started_vcpus++;
2086 started_vcpu = vcpu->kvm->vcpus[i];
2087 }
2088 }
2089
2090 if (started_vcpus == 1) {
2091 /*
2092 * As we only have one VCPU left, we want to enable the
2093 * IBS facility for that VCPU to speed it up.
2094 */
2095 __enable_ibs_on_vcpu(started_vcpu);
2096 }
2097
433b9ee4 2098 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2099 return;
6852d7b6
DH
2100}
2101
d6712df9
CH
2102static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2103 struct kvm_enable_cap *cap)
2104{
2105 int r;
2106
2107 if (cap->flags)
2108 return -EINVAL;
2109
2110 switch (cap->cap) {
fa6b7fe9
CH
2111 case KVM_CAP_S390_CSS_SUPPORT:
2112 if (!vcpu->kvm->arch.css_support) {
2113 vcpu->kvm->arch.css_support = 1;
2114 trace_kvm_s390_enable_css(vcpu->kvm);
2115 }
2116 r = 0;
2117 break;
d6712df9
CH
2118 default:
2119 r = -EINVAL;
2120 break;
2121 }
2122 return r;
2123}
2124
b0c632db
HC
2125long kvm_arch_vcpu_ioctl(struct file *filp,
2126 unsigned int ioctl, unsigned long arg)
2127{
2128 struct kvm_vcpu *vcpu = filp->private_data;
2129 void __user *argp = (void __user *)arg;
800c1065 2130 int idx;
bc923cc9 2131 long r;
b0c632db 2132
93736624
AK
2133 switch (ioctl) {
2134 case KVM_S390_INTERRUPT: {
ba5c1e9b 2135 struct kvm_s390_interrupt s390int;
383d0b05 2136 struct kvm_s390_irq s390irq;
ba5c1e9b 2137
93736624 2138 r = -EFAULT;
ba5c1e9b 2139 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2140 break;
383d0b05
JF
2141 if (s390int_to_s390irq(&s390int, &s390irq))
2142 return -EINVAL;
2143 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2144 break;
ba5c1e9b 2145 }
b0c632db 2146 case KVM_S390_STORE_STATUS:
800c1065 2147 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2148 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2149 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2150 break;
b0c632db
HC
2151 case KVM_S390_SET_INITIAL_PSW: {
2152 psw_t psw;
2153
bc923cc9 2154 r = -EFAULT;
b0c632db 2155 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2156 break;
2157 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2158 break;
b0c632db
HC
2159 }
2160 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2161 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2162 break;
14eebd91
CO
2163 case KVM_SET_ONE_REG:
2164 case KVM_GET_ONE_REG: {
2165 struct kvm_one_reg reg;
2166 r = -EFAULT;
2167 if (copy_from_user(&reg, argp, sizeof(reg)))
2168 break;
2169 if (ioctl == KVM_SET_ONE_REG)
2170 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2171 else
2172 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2173 break;
2174 }
27e0393f
CO
2175#ifdef CONFIG_KVM_S390_UCONTROL
2176 case KVM_S390_UCAS_MAP: {
2177 struct kvm_s390_ucas_mapping ucasmap;
2178
2179 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2180 r = -EFAULT;
2181 break;
2182 }
2183
2184 if (!kvm_is_ucontrol(vcpu->kvm)) {
2185 r = -EINVAL;
2186 break;
2187 }
2188
2189 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2190 ucasmap.vcpu_addr, ucasmap.length);
2191 break;
2192 }
2193 case KVM_S390_UCAS_UNMAP: {
2194 struct kvm_s390_ucas_mapping ucasmap;
2195
2196 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2197 r = -EFAULT;
2198 break;
2199 }
2200
2201 if (!kvm_is_ucontrol(vcpu->kvm)) {
2202 r = -EINVAL;
2203 break;
2204 }
2205
2206 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2207 ucasmap.length);
2208 break;
2209 }
2210#endif
ccc7910f 2211 case KVM_S390_VCPU_FAULT: {
527e30b4 2212 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2213 break;
2214 }
d6712df9
CH
2215 case KVM_ENABLE_CAP:
2216 {
2217 struct kvm_enable_cap cap;
2218 r = -EFAULT;
2219 if (copy_from_user(&cap, argp, sizeof(cap)))
2220 break;
2221 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2222 break;
2223 }
b0c632db 2224 default:
3e6afcf1 2225 r = -ENOTTY;
b0c632db 2226 }
bc923cc9 2227 return r;
b0c632db
HC
2228}
2229
5b1c1493
CO
2230int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2231{
2232#ifdef CONFIG_KVM_S390_UCONTROL
2233 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2234 && (kvm_is_ucontrol(vcpu->kvm))) {
2235 vmf->page = virt_to_page(vcpu->arch.sie_block);
2236 get_page(vmf->page);
2237 return 0;
2238 }
2239#endif
2240 return VM_FAULT_SIGBUS;
2241}
2242
5587027c
AK
2243int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2244 unsigned long npages)
db3fe4eb
TY
2245{
2246 return 0;
2247}
2248
b0c632db 2249/* Section: memory related */
f7784b8e
MT
2250int kvm_arch_prepare_memory_region(struct kvm *kvm,
2251 struct kvm_memory_slot *memslot,
7b6195a9
TY
2252 struct kvm_userspace_memory_region *mem,
2253 enum kvm_mr_change change)
b0c632db 2254{
dd2887e7
NW
2255 /* A few sanity checks. We can have memory slots which have to be
2256 located/ended at a segment boundary (1MB). The memory in userland is
2257 ok to be fragmented into various different vmas. It is okay to mmap()
2258 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2259
598841ca 2260 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2261 return -EINVAL;
2262
598841ca 2263 if (mem->memory_size & 0xffffful)
b0c632db
HC
2264 return -EINVAL;
2265
f7784b8e
MT
2266 return 0;
2267}
2268
2269void kvm_arch_commit_memory_region(struct kvm *kvm,
2270 struct kvm_userspace_memory_region *mem,
8482644a
TY
2271 const struct kvm_memory_slot *old,
2272 enum kvm_mr_change change)
f7784b8e 2273{
f7850c92 2274 int rc;
f7784b8e 2275
2cef4deb
CB
2276 /* If the basics of the memslot do not change, we do not want
2277 * to update the gmap. Every update causes several unnecessary
2278 * segment translation exceptions. This is usually handled just
2279 * fine by the normal fault handler + gmap, but it will also
2280 * cause faults on the prefix page of running guest CPUs.
2281 */
2282 if (old->userspace_addr == mem->userspace_addr &&
2283 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2284 old->npages * PAGE_SIZE == mem->memory_size)
2285 return;
598841ca
CO
2286
2287 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2288 mem->guest_phys_addr, mem->memory_size);
2289 if (rc)
f7850c92 2290 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 2291 return;
b0c632db
HC
2292}
2293
b0c632db
HC
2294static int __init kvm_s390_init(void)
2295{
9d8d5786 2296 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2297}
2298
2299static void __exit kvm_s390_exit(void)
2300{
2301 kvm_exit();
2302}
2303
2304module_init(kvm_s390_init);
2305module_exit(kvm_s390_exit);
566af940
CH
2306
2307/*
2308 * Enable autoloading of the kvm module.
2309 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2310 * since x86 takes a different approach.
2311 */
2312#include <linux/miscdevice.h>
2313MODULE_ALIAS_MISCDEV(KVM_MINOR);
2314MODULE_ALIAS("devname:kvm");