KVM: prepare for KVM_(S|G)ET_MP_STATE on other architectures
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 53 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 54 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
55 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 57 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 58 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
59 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 66 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
67 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 72 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
73 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 75 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
76 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 78 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 79 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 80 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 81 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
82 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 87 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 88 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 89 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
90 { NULL }
91};
92
78c4b59f 93unsigned long *vfacilities;
2c70fe44 94static struct gmap_notifier gmap_notifier;
b0c632db 95
78c4b59f 96/* test availability of vfacility */
280ef0f1 97int test_vfacility(unsigned long nr)
78c4b59f
MM
98{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
b0c632db 102/* Section: not file related */
10474ae8 103int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
104{
105 /* every s390 is virtualization enabled ;-) */
10474ae8 106 return 0;
b0c632db
HC
107}
108
109void kvm_arch_hardware_disable(void *garbage)
110{
111}
112
2c70fe44
CB
113static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
b0c632db
HC
115int kvm_arch_hardware_setup(void)
116{
2c70fe44
CB
117 gmap_notifier.notifier_call = kvm_gmap_notifier;
118 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
119 return 0;
120}
121
122void kvm_arch_hardware_unsetup(void)
123{
2c70fe44 124 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
125}
126
127void kvm_arch_check_processor_compat(void *rtn)
128{
129}
130
131int kvm_arch_init(void *opaque)
132{
133 return 0;
134}
135
136void kvm_arch_exit(void)
137{
138}
139
140/* Section: device related */
141long kvm_arch_dev_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
143{
144 if (ioctl == KVM_S390_ENABLE_SIE)
145 return s390_enable_sie();
146 return -EINVAL;
147}
148
149int kvm_dev_ioctl_check_extension(long ext)
150{
d7b0b5eb
CO
151 int r;
152
2bd0ac4e 153 switch (ext) {
d7b0b5eb 154 case KVM_CAP_S390_PSW:
b6cf8788 155 case KVM_CAP_S390_GMAP:
52e16b18 156 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
157#ifdef CONFIG_KVM_S390_UCONTROL
158 case KVM_CAP_S390_UCONTROL:
159#endif
3c038e6b 160 case KVM_CAP_ASYNC_PF:
60b413c9 161 case KVM_CAP_SYNC_REGS:
14eebd91 162 case KVM_CAP_ONE_REG:
d6712df9 163 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 164 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 165 case KVM_CAP_IRQFD:
10ccaa1e 166 case KVM_CAP_IOEVENTFD:
c05c4186 167 case KVM_CAP_DEVICE_CTRL:
d938dc55 168 case KVM_CAP_ENABLE_CAP_VM:
f2061656 169 case KVM_CAP_VM_ATTRIBUTES:
d7b0b5eb
CO
170 r = 1;
171 break;
e726b1bd
CB
172 case KVM_CAP_NR_VCPUS:
173 case KVM_CAP_MAX_VCPUS:
174 r = KVM_MAX_VCPUS;
175 break;
e1e2e605
NW
176 case KVM_CAP_NR_MEMSLOTS:
177 r = KVM_USER_MEM_SLOTS;
178 break;
1526bf9c 179 case KVM_CAP_S390_COW:
abf09bed 180 r = MACHINE_HAS_ESOP;
1526bf9c 181 break;
2bd0ac4e 182 default:
d7b0b5eb 183 r = 0;
2bd0ac4e 184 }
d7b0b5eb 185 return r;
b0c632db
HC
186}
187
15f36ebd
JH
188static void kvm_s390_sync_dirty_log(struct kvm *kvm,
189 struct kvm_memory_slot *memslot)
190{
191 gfn_t cur_gfn, last_gfn;
192 unsigned long address;
193 struct gmap *gmap = kvm->arch.gmap;
194
195 down_read(&gmap->mm->mmap_sem);
196 /* Loop over all guest pages */
197 last_gfn = memslot->base_gfn + memslot->npages;
198 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
199 address = gfn_to_hva_memslot(memslot, cur_gfn);
200
201 if (gmap_test_and_clear_dirty(address, gmap))
202 mark_page_dirty(kvm, cur_gfn);
203 }
204 up_read(&gmap->mm->mmap_sem);
205}
206
b0c632db
HC
207/* Section: vm related */
208/*
209 * Get (and clear) the dirty memory log for a memory slot.
210 */
211int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
212 struct kvm_dirty_log *log)
213{
15f36ebd
JH
214 int r;
215 unsigned long n;
216 struct kvm_memory_slot *memslot;
217 int is_dirty = 0;
218
219 mutex_lock(&kvm->slots_lock);
220
221 r = -EINVAL;
222 if (log->slot >= KVM_USER_MEM_SLOTS)
223 goto out;
224
225 memslot = id_to_memslot(kvm->memslots, log->slot);
226 r = -ENOENT;
227 if (!memslot->dirty_bitmap)
228 goto out;
229
230 kvm_s390_sync_dirty_log(kvm, memslot);
231 r = kvm_get_dirty_log(kvm, log, &is_dirty);
232 if (r)
233 goto out;
234
235 /* Clear the dirty log */
236 if (is_dirty) {
237 n = kvm_dirty_bitmap_bytes(memslot);
238 memset(memslot->dirty_bitmap, 0, n);
239 }
240 r = 0;
241out:
242 mutex_unlock(&kvm->slots_lock);
243 return r;
b0c632db
HC
244}
245
d938dc55
CH
246static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
247{
248 int r;
249
250 if (cap->flags)
251 return -EINVAL;
252
253 switch (cap->cap) {
84223598
CH
254 case KVM_CAP_S390_IRQCHIP:
255 kvm->arch.use_irqchip = 1;
256 r = 0;
257 break;
d938dc55
CH
258 default:
259 r = -EINVAL;
260 break;
261 }
262 return r;
263}
264
4f718eab
DD
265static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
266{
267 int ret;
268 unsigned int idx;
269 switch (attr->attr) {
270 case KVM_S390_VM_MEM_ENABLE_CMMA:
271 ret = -EBUSY;
272 mutex_lock(&kvm->lock);
273 if (atomic_read(&kvm->online_vcpus) == 0) {
274 kvm->arch.use_cmma = 1;
275 ret = 0;
276 }
277 mutex_unlock(&kvm->lock);
278 break;
279 case KVM_S390_VM_MEM_CLR_CMMA:
280 mutex_lock(&kvm->lock);
281 idx = srcu_read_lock(&kvm->srcu);
282 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
283 srcu_read_unlock(&kvm->srcu, idx);
284 mutex_unlock(&kvm->lock);
285 ret = 0;
286 break;
287 default:
288 ret = -ENXIO;
289 break;
290 }
291 return ret;
292}
293
f2061656
DD
294static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
295{
296 int ret;
297
298 switch (attr->group) {
4f718eab
DD
299 case KVM_S390_VM_MEM_CTRL:
300 ret = kvm_s390_mem_control(kvm, attr);
301 break;
f2061656
DD
302 default:
303 ret = -ENXIO;
304 break;
305 }
306
307 return ret;
308}
309
310static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
311{
312 return -ENXIO;
313}
314
315static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
316{
317 int ret;
318
319 switch (attr->group) {
4f718eab
DD
320 case KVM_S390_VM_MEM_CTRL:
321 switch (attr->attr) {
322 case KVM_S390_VM_MEM_ENABLE_CMMA:
323 case KVM_S390_VM_MEM_CLR_CMMA:
324 ret = 0;
325 break;
326 default:
327 ret = -ENXIO;
328 break;
329 }
330 break;
f2061656
DD
331 default:
332 ret = -ENXIO;
333 break;
334 }
335
336 return ret;
337}
338
b0c632db
HC
339long kvm_arch_vm_ioctl(struct file *filp,
340 unsigned int ioctl, unsigned long arg)
341{
342 struct kvm *kvm = filp->private_data;
343 void __user *argp = (void __user *)arg;
f2061656 344 struct kvm_device_attr attr;
b0c632db
HC
345 int r;
346
347 switch (ioctl) {
ba5c1e9b
CO
348 case KVM_S390_INTERRUPT: {
349 struct kvm_s390_interrupt s390int;
350
351 r = -EFAULT;
352 if (copy_from_user(&s390int, argp, sizeof(s390int)))
353 break;
354 r = kvm_s390_inject_vm(kvm, &s390int);
355 break;
356 }
d938dc55
CH
357 case KVM_ENABLE_CAP: {
358 struct kvm_enable_cap cap;
359 r = -EFAULT;
360 if (copy_from_user(&cap, argp, sizeof(cap)))
361 break;
362 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
363 break;
364 }
84223598
CH
365 case KVM_CREATE_IRQCHIP: {
366 struct kvm_irq_routing_entry routing;
367
368 r = -EINVAL;
369 if (kvm->arch.use_irqchip) {
370 /* Set up dummy routing. */
371 memset(&routing, 0, sizeof(routing));
372 kvm_set_irq_routing(kvm, &routing, 0, 0);
373 r = 0;
374 }
375 break;
376 }
f2061656
DD
377 case KVM_SET_DEVICE_ATTR: {
378 r = -EFAULT;
379 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
380 break;
381 r = kvm_s390_vm_set_attr(kvm, &attr);
382 break;
383 }
384 case KVM_GET_DEVICE_ATTR: {
385 r = -EFAULT;
386 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
387 break;
388 r = kvm_s390_vm_get_attr(kvm, &attr);
389 break;
390 }
391 case KVM_HAS_DEVICE_ATTR: {
392 r = -EFAULT;
393 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
394 break;
395 r = kvm_s390_vm_has_attr(kvm, &attr);
396 break;
397 }
b0c632db 398 default:
367e1319 399 r = -ENOTTY;
b0c632db
HC
400 }
401
402 return r;
403}
404
e08b9637 405int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 406{
b0c632db
HC
407 int rc;
408 char debug_name[16];
f6c137ff 409 static unsigned long sca_offset;
b0c632db 410
e08b9637
CO
411 rc = -EINVAL;
412#ifdef CONFIG_KVM_S390_UCONTROL
413 if (type & ~KVM_VM_S390_UCONTROL)
414 goto out_err;
415 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
416 goto out_err;
417#else
418 if (type)
419 goto out_err;
420#endif
421
b0c632db
HC
422 rc = s390_enable_sie();
423 if (rc)
d89f5eff 424 goto out_err;
b0c632db 425
b290411a
CO
426 rc = -ENOMEM;
427
b0c632db
HC
428 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
429 if (!kvm->arch.sca)
d89f5eff 430 goto out_err;
f6c137ff
CB
431 spin_lock(&kvm_lock);
432 sca_offset = (sca_offset + 16) & 0x7f0;
433 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
434 spin_unlock(&kvm_lock);
b0c632db
HC
435
436 sprintf(debug_name, "kvm-%u", current->pid);
437
438 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
439 if (!kvm->arch.dbf)
440 goto out_nodbf;
441
ba5c1e9b
CO
442 spin_lock_init(&kvm->arch.float_int.lock);
443 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 444 init_waitqueue_head(&kvm->arch.ipte_wq);
ba5c1e9b 445
b0c632db
HC
446 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
447 VM_EVENT(kvm, 3, "%s", "vm created");
448
e08b9637
CO
449 if (type & KVM_VM_S390_UCONTROL) {
450 kvm->arch.gmap = NULL;
451 } else {
452 kvm->arch.gmap = gmap_alloc(current->mm);
453 if (!kvm->arch.gmap)
454 goto out_nogmap;
2c70fe44 455 kvm->arch.gmap->private = kvm;
24eb3a82 456 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 457 }
fa6b7fe9
CH
458
459 kvm->arch.css_support = 0;
84223598 460 kvm->arch.use_irqchip = 0;
fa6b7fe9 461
8ad35755
DH
462 spin_lock_init(&kvm->arch.start_stop_lock);
463
d89f5eff 464 return 0;
598841ca
CO
465out_nogmap:
466 debug_unregister(kvm->arch.dbf);
b0c632db
HC
467out_nodbf:
468 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
469out_err:
470 return rc;
b0c632db
HC
471}
472
d329c035
CB
473void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
474{
475 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 476 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 477 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 478 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
479 if (!kvm_is_ucontrol(vcpu->kvm)) {
480 clear_bit(63 - vcpu->vcpu_id,
481 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
482 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
483 (__u64) vcpu->arch.sie_block)
484 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
485 }
abf4a71e 486 smp_mb();
27e0393f
CO
487
488 if (kvm_is_ucontrol(vcpu->kvm))
489 gmap_free(vcpu->arch.gmap);
490
b31605c1
DD
491 if (kvm_s390_cmma_enabled(vcpu->kvm))
492 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 493 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 494
6692cef3 495 kvm_vcpu_uninit(vcpu);
b110feaf 496 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
497}
498
499static void kvm_free_vcpus(struct kvm *kvm)
500{
501 unsigned int i;
988a2cae 502 struct kvm_vcpu *vcpu;
d329c035 503
988a2cae
GN
504 kvm_for_each_vcpu(i, vcpu, kvm)
505 kvm_arch_vcpu_destroy(vcpu);
506
507 mutex_lock(&kvm->lock);
508 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
509 kvm->vcpus[i] = NULL;
510
511 atomic_set(&kvm->online_vcpus, 0);
512 mutex_unlock(&kvm->lock);
d329c035
CB
513}
514
ad8ba2cd
SY
515void kvm_arch_sync_events(struct kvm *kvm)
516{
517}
518
b0c632db
HC
519void kvm_arch_destroy_vm(struct kvm *kvm)
520{
d329c035 521 kvm_free_vcpus(kvm);
b0c632db 522 free_page((unsigned long)(kvm->arch.sca));
d329c035 523 debug_unregister(kvm->arch.dbf);
27e0393f
CO
524 if (!kvm_is_ucontrol(kvm))
525 gmap_free(kvm->arch.gmap);
841b91c5 526 kvm_s390_destroy_adapters(kvm);
67335e63 527 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
528}
529
530/* Section: vcpu related */
531int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
532{
3c038e6b
DD
533 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
534 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
535 if (kvm_is_ucontrol(vcpu->kvm)) {
536 vcpu->arch.gmap = gmap_alloc(current->mm);
537 if (!vcpu->arch.gmap)
538 return -ENOMEM;
2c70fe44 539 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
540 return 0;
541 }
542
598841ca 543 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
544 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
545 KVM_SYNC_GPRS |
9eed0735
CB
546 KVM_SYNC_ACRS |
547 KVM_SYNC_CRS;
b0c632db
HC
548 return 0;
549}
550
551void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
552{
6692cef3 553 /* Nothing todo */
b0c632db
HC
554}
555
556void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
557{
4725c860
MS
558 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
559 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 560 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
561 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
562 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 563 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 564 gmap_enable(vcpu->arch.gmap);
9e6dabef 565 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
566}
567
568void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
569{
9e6dabef 570 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 571 gmap_disable(vcpu->arch.gmap);
4725c860
MS
572 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
573 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 574 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
575 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
576 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
577 restore_access_regs(vcpu->arch.host_acrs);
578}
579
580static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
581{
582 /* this equals initial cpu reset in pop, but we don't switch to ESA */
583 vcpu->arch.sie_block->gpsw.mask = 0UL;
584 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 585 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
586 vcpu->arch.sie_block->cputm = 0UL;
587 vcpu->arch.sie_block->ckc = 0UL;
588 vcpu->arch.sie_block->todpr = 0;
589 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
590 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
591 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
592 vcpu->arch.guest_fpregs.fpc = 0;
593 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
594 vcpu->arch.sie_block->gbea = 1;
672550fb 595 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
596 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
597 kvm_clear_async_pf_completion_queue(vcpu);
6852d7b6 598 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 599 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
600}
601
42897d86
MT
602int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
603{
604 return 0;
605}
606
b31605c1
DD
607void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
608{
609 free_page(vcpu->arch.sie_block->cbrlo);
610 vcpu->arch.sie_block->cbrlo = 0;
611}
612
613int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
614{
615 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
616 if (!vcpu->arch.sie_block->cbrlo)
617 return -ENOMEM;
618
619 vcpu->arch.sie_block->ecb2 |= 0x80;
620 vcpu->arch.sie_block->ecb2 &= ~0x08;
621 return 0;
622}
623
b0c632db
HC
624int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
625{
b31605c1 626 int rc = 0;
b31288fa 627
9e6dabef
CH
628 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
629 CPUSTAT_SM |
69d0d3a3
CB
630 CPUSTAT_STOPPED |
631 CPUSTAT_GED);
fc34531d 632 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
633 if (test_vfacility(50) && test_vfacility(73))
634 vcpu->arch.sie_block->ecb |= 0x10;
635
69d0d3a3 636 vcpu->arch.sie_block->ecb2 = 8;
4953919f 637 vcpu->arch.sie_block->eca = 0xD1002000U;
217a4406
HC
638 if (sclp_has_siif())
639 vcpu->arch.sie_block->eca |= 1;
78c4b59f 640 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
5a5e6536
MR
641 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
642 ICTL_TPROT;
643
b31605c1
DD
644 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
645 rc = kvm_s390_vcpu_setup_cmma(vcpu);
646 if (rc)
647 return rc;
b31288fa 648 }
ca872302
CB
649 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
650 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
651 (unsigned long) vcpu);
652 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 653 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 654 vcpu->arch.cpu_id.version = 0xff;
b31605c1 655 return rc;
b0c632db
HC
656}
657
658struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
659 unsigned int id)
660{
4d47555a 661 struct kvm_vcpu *vcpu;
7feb6bb8 662 struct sie_page *sie_page;
4d47555a
CO
663 int rc = -EINVAL;
664
665 if (id >= KVM_MAX_VCPUS)
666 goto out;
667
668 rc = -ENOMEM;
b0c632db 669
b110feaf 670 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 671 if (!vcpu)
4d47555a 672 goto out;
b0c632db 673
7feb6bb8
MM
674 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
675 if (!sie_page)
b0c632db
HC
676 goto out_free_cpu;
677
7feb6bb8
MM
678 vcpu->arch.sie_block = &sie_page->sie_block;
679 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
680
b0c632db 681 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
682 if (!kvm_is_ucontrol(kvm)) {
683 if (!kvm->arch.sca) {
684 WARN_ON_ONCE(1);
685 goto out_free_cpu;
686 }
687 if (!kvm->arch.sca->cpu[id].sda)
688 kvm->arch.sca->cpu[id].sda =
689 (__u64) vcpu->arch.sie_block;
690 vcpu->arch.sie_block->scaoh =
691 (__u32)(((__u64)kvm->arch.sca) >> 32);
692 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
693 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
694 }
b0c632db 695
ba5c1e9b
CO
696 spin_lock_init(&vcpu->arch.local_int.lock);
697 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
698 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 699 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 700 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 701
b0c632db
HC
702 rc = kvm_vcpu_init(vcpu, kvm, id);
703 if (rc)
7b06bf2f 704 goto out_free_sie_block;
b0c632db
HC
705 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
706 vcpu->arch.sie_block);
ade38c31 707 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 708
b0c632db 709 return vcpu;
7b06bf2f
WY
710out_free_sie_block:
711 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 712out_free_cpu:
b110feaf 713 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 714out:
b0c632db
HC
715 return ERR_PTR(rc);
716}
717
b0c632db
HC
718int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
719{
f87618e8 720 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
721}
722
49b99e1e
CB
723void s390_vcpu_block(struct kvm_vcpu *vcpu)
724{
725 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
726}
727
728void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
729{
730 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
731}
732
733/*
734 * Kick a guest cpu out of SIE and wait until SIE is not running.
735 * If the CPU is not running (e.g. waiting as idle) the function will
736 * return immediately. */
737void exit_sie(struct kvm_vcpu *vcpu)
738{
739 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
740 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
741 cpu_relax();
742}
743
744/* Kick a guest cpu out of SIE and prevent SIE-reentry */
745void exit_sie_sync(struct kvm_vcpu *vcpu)
746{
747 s390_vcpu_block(vcpu);
748 exit_sie(vcpu);
749}
750
2c70fe44
CB
751static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
752{
753 int i;
754 struct kvm *kvm = gmap->private;
755 struct kvm_vcpu *vcpu;
756
757 kvm_for_each_vcpu(i, vcpu, kvm) {
758 /* match against both prefix pages */
fda902cb 759 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
760 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
761 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
762 exit_sie_sync(vcpu);
763 }
764 }
765}
766
b6d33834
CD
767int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
768{
769 /* kvm common code refers to this, but never calls it */
770 BUG();
771 return 0;
772}
773
14eebd91
CO
774static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
775 struct kvm_one_reg *reg)
776{
777 int r = -EINVAL;
778
779 switch (reg->id) {
29b7c71b
CO
780 case KVM_REG_S390_TODPR:
781 r = put_user(vcpu->arch.sie_block->todpr,
782 (u32 __user *)reg->addr);
783 break;
784 case KVM_REG_S390_EPOCHDIFF:
785 r = put_user(vcpu->arch.sie_block->epoch,
786 (u64 __user *)reg->addr);
787 break;
46a6dd1c
J
788 case KVM_REG_S390_CPU_TIMER:
789 r = put_user(vcpu->arch.sie_block->cputm,
790 (u64 __user *)reg->addr);
791 break;
792 case KVM_REG_S390_CLOCK_COMP:
793 r = put_user(vcpu->arch.sie_block->ckc,
794 (u64 __user *)reg->addr);
795 break;
536336c2
DD
796 case KVM_REG_S390_PFTOKEN:
797 r = put_user(vcpu->arch.pfault_token,
798 (u64 __user *)reg->addr);
799 break;
800 case KVM_REG_S390_PFCOMPARE:
801 r = put_user(vcpu->arch.pfault_compare,
802 (u64 __user *)reg->addr);
803 break;
804 case KVM_REG_S390_PFSELECT:
805 r = put_user(vcpu->arch.pfault_select,
806 (u64 __user *)reg->addr);
807 break;
672550fb
CB
808 case KVM_REG_S390_PP:
809 r = put_user(vcpu->arch.sie_block->pp,
810 (u64 __user *)reg->addr);
811 break;
afa45ff5
CB
812 case KVM_REG_S390_GBEA:
813 r = put_user(vcpu->arch.sie_block->gbea,
814 (u64 __user *)reg->addr);
815 break;
14eebd91
CO
816 default:
817 break;
818 }
819
820 return r;
821}
822
823static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
824 struct kvm_one_reg *reg)
825{
826 int r = -EINVAL;
827
828 switch (reg->id) {
29b7c71b
CO
829 case KVM_REG_S390_TODPR:
830 r = get_user(vcpu->arch.sie_block->todpr,
831 (u32 __user *)reg->addr);
832 break;
833 case KVM_REG_S390_EPOCHDIFF:
834 r = get_user(vcpu->arch.sie_block->epoch,
835 (u64 __user *)reg->addr);
836 break;
46a6dd1c
J
837 case KVM_REG_S390_CPU_TIMER:
838 r = get_user(vcpu->arch.sie_block->cputm,
839 (u64 __user *)reg->addr);
840 break;
841 case KVM_REG_S390_CLOCK_COMP:
842 r = get_user(vcpu->arch.sie_block->ckc,
843 (u64 __user *)reg->addr);
844 break;
536336c2
DD
845 case KVM_REG_S390_PFTOKEN:
846 r = get_user(vcpu->arch.pfault_token,
847 (u64 __user *)reg->addr);
848 break;
849 case KVM_REG_S390_PFCOMPARE:
850 r = get_user(vcpu->arch.pfault_compare,
851 (u64 __user *)reg->addr);
852 break;
853 case KVM_REG_S390_PFSELECT:
854 r = get_user(vcpu->arch.pfault_select,
855 (u64 __user *)reg->addr);
856 break;
672550fb
CB
857 case KVM_REG_S390_PP:
858 r = get_user(vcpu->arch.sie_block->pp,
859 (u64 __user *)reg->addr);
860 break;
afa45ff5
CB
861 case KVM_REG_S390_GBEA:
862 r = get_user(vcpu->arch.sie_block->gbea,
863 (u64 __user *)reg->addr);
864 break;
14eebd91
CO
865 default:
866 break;
867 }
868
869 return r;
870}
b6d33834 871
b0c632db
HC
872static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
873{
b0c632db 874 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
875 return 0;
876}
877
878int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
879{
5a32c1af 880 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
881 return 0;
882}
883
884int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
885{
5a32c1af 886 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
887 return 0;
888}
889
890int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
891 struct kvm_sregs *sregs)
892{
59674c1a 893 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 894 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 895 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
896 return 0;
897}
898
899int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
900 struct kvm_sregs *sregs)
901{
59674c1a 902 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 903 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
904 return 0;
905}
906
907int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
908{
4725c860
MS
909 if (test_fp_ctl(fpu->fpc))
910 return -EINVAL;
b0c632db 911 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
912 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
913 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
914 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
915 return 0;
916}
917
918int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
919{
b0c632db
HC
920 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
921 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
922 return 0;
923}
924
925static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
926{
927 int rc = 0;
928
7a42fdc2 929 if (!is_vcpu_stopped(vcpu))
b0c632db 930 rc = -EBUSY;
d7b0b5eb
CO
931 else {
932 vcpu->run->psw_mask = psw.mask;
933 vcpu->run->psw_addr = psw.addr;
934 }
b0c632db
HC
935 return rc;
936}
937
938int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
939 struct kvm_translation *tr)
940{
941 return -EINVAL; /* not implemented yet */
942}
943
27291e21
DH
944#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
945 KVM_GUESTDBG_USE_HW_BP | \
946 KVM_GUESTDBG_ENABLE)
947
d0bfb940
JK
948int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
949 struct kvm_guest_debug *dbg)
b0c632db 950{
27291e21
DH
951 int rc = 0;
952
953 vcpu->guest_debug = 0;
954 kvm_s390_clear_bp_data(vcpu);
955
2de3bfc2 956 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
957 return -EINVAL;
958
959 if (dbg->control & KVM_GUESTDBG_ENABLE) {
960 vcpu->guest_debug = dbg->control;
961 /* enforce guest PER */
962 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
963
964 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
965 rc = kvm_s390_import_bp_data(vcpu, dbg);
966 } else {
967 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
968 vcpu->arch.guestdbg.last_bp = 0;
969 }
970
971 if (rc) {
972 vcpu->guest_debug = 0;
973 kvm_s390_clear_bp_data(vcpu);
974 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
975 }
976
977 return rc;
b0c632db
HC
978}
979
62d9f0db
MT
980int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
981 struct kvm_mp_state *mp_state)
982{
983 return -EINVAL; /* not implemented yet */
984}
985
986int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
987 struct kvm_mp_state *mp_state)
988{
989 return -EINVAL; /* not implemented yet */
990}
991
b31605c1
DD
992bool kvm_s390_cmma_enabled(struct kvm *kvm)
993{
994 if (!MACHINE_IS_LPAR)
995 return false;
996 /* only enable for z10 and later */
997 if (!MACHINE_HAS_EDAT1)
998 return false;
999 if (!kvm->arch.use_cmma)
1000 return false;
1001 return true;
1002}
1003
8ad35755
DH
1004static bool ibs_enabled(struct kvm_vcpu *vcpu)
1005{
1006 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1007}
1008
2c70fe44
CB
1009static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1010{
8ad35755
DH
1011retry:
1012 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1013 /*
1014 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1015 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1016 * This ensures that the ipte instruction for this request has
1017 * already finished. We might race against a second unmapper that
1018 * wants to set the blocking bit. Lets just retry the request loop.
1019 */
8ad35755 1020 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1021 int rc;
1022 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1023 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1024 PAGE_SIZE * 2);
1025 if (rc)
1026 return rc;
8ad35755 1027 goto retry;
2c70fe44 1028 }
8ad35755
DH
1029
1030 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1031 if (!ibs_enabled(vcpu)) {
1032 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1033 atomic_set_mask(CPUSTAT_IBS,
1034 &vcpu->arch.sie_block->cpuflags);
1035 }
1036 goto retry;
2c70fe44 1037 }
8ad35755
DH
1038
1039 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1040 if (ibs_enabled(vcpu)) {
1041 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1042 atomic_clear_mask(CPUSTAT_IBS,
1043 &vcpu->arch.sie_block->cpuflags);
1044 }
1045 goto retry;
1046 }
1047
2c70fe44
CB
1048 return 0;
1049}
1050
fa576c58
TH
1051/**
1052 * kvm_arch_fault_in_page - fault-in guest page if necessary
1053 * @vcpu: The corresponding virtual cpu
1054 * @gpa: Guest physical address
1055 * @writable: Whether the page should be writable or not
1056 *
1057 * Make sure that a guest page has been faulted-in on the host.
1058 *
1059 * Return: Zero on success, negative error code otherwise.
1060 */
1061long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1062{
24eb3a82 1063 struct mm_struct *mm = current->mm;
fa576c58
TH
1064 hva_t hva;
1065 long rc;
1066
1067 hva = gmap_fault(gpa, vcpu->arch.gmap);
1068 if (IS_ERR_VALUE(hva))
1069 return (long)hva;
24eb3a82 1070 down_read(&mm->mmap_sem);
fa576c58 1071 rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
24eb3a82 1072 up_read(&mm->mmap_sem);
fa576c58
TH
1073
1074 return rc < 0 ? rc : 0;
24eb3a82
DD
1075}
1076
3c038e6b
DD
1077static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1078 unsigned long token)
1079{
1080 struct kvm_s390_interrupt inti;
1081 inti.parm64 = token;
1082
1083 if (start_token) {
1084 inti.type = KVM_S390_INT_PFAULT_INIT;
1085 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1086 } else {
1087 inti.type = KVM_S390_INT_PFAULT_DONE;
1088 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1089 }
1090}
1091
1092void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1093 struct kvm_async_pf *work)
1094{
1095 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1096 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1097}
1098
1099void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1100 struct kvm_async_pf *work)
1101{
1102 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1103 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1104}
1105
1106void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1107 struct kvm_async_pf *work)
1108{
1109 /* s390 will always inject the page directly */
1110}
1111
1112bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1113{
1114 /*
1115 * s390 will always inject the page directly,
1116 * but we still want check_async_completion to cleanup
1117 */
1118 return true;
1119}
1120
1121static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1122{
1123 hva_t hva;
1124 struct kvm_arch_async_pf arch;
1125 int rc;
1126
1127 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1128 return 0;
1129 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1130 vcpu->arch.pfault_compare)
1131 return 0;
1132 if (psw_extint_disabled(vcpu))
1133 return 0;
1134 if (kvm_cpu_has_interrupt(vcpu))
1135 return 0;
1136 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1137 return 0;
1138 if (!vcpu->arch.gmap->pfault_enabled)
1139 return 0;
1140
81480cc1
HC
1141 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1142 hva += current->thread.gmap_addr & ~PAGE_MASK;
1143 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1144 return 0;
1145
1146 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1147 return rc;
1148}
1149
3fb4c40f 1150static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1151{
3fb4c40f 1152 int rc, cpuflags;
e168bf8d 1153
3c038e6b
DD
1154 /*
1155 * On s390 notifications for arriving pages will be delivered directly
1156 * to the guest but the house keeping for completed pfaults is
1157 * handled outside the worker.
1158 */
1159 kvm_check_async_pf_completion(vcpu);
1160
5a32c1af 1161 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1162
1163 if (need_resched())
1164 schedule();
1165
d3a73acb 1166 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1167 s390_handle_mcck();
1168
d6b6d166
CO
1169 if (!kvm_is_ucontrol(vcpu->kvm))
1170 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 1171
2c70fe44
CB
1172 rc = kvm_s390_handle_requests(vcpu);
1173 if (rc)
1174 return rc;
1175
27291e21
DH
1176 if (guestdbg_enabled(vcpu)) {
1177 kvm_s390_backup_guest_per_regs(vcpu);
1178 kvm_s390_patch_guest_per_regs(vcpu);
1179 }
1180
b0c632db 1181 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1182 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1183 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1184 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1185
3fb4c40f
TH
1186 return 0;
1187}
1188
1189static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1190{
24eb3a82 1191 int rc = -1;
2b29a9fd
DD
1192
1193 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1194 vcpu->arch.sie_block->icptcode);
1195 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1196
27291e21
DH
1197 if (guestdbg_enabled(vcpu))
1198 kvm_s390_restore_guest_per_regs(vcpu);
1199
3fb4c40f 1200 if (exit_reason >= 0) {
7c470539 1201 rc = 0;
210b1607
TH
1202 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1203 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1204 vcpu->run->s390_ucontrol.trans_exc_code =
1205 current->thread.gmap_addr;
1206 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1207 rc = -EREMOTE;
24eb3a82
DD
1208
1209 } else if (current->thread.gmap_pfault) {
3c038e6b 1210 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1211 current->thread.gmap_pfault = 0;
fa576c58 1212 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1213 rc = 0;
fa576c58
TH
1214 } else {
1215 gpa_t gpa = current->thread.gmap_addr;
1216 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1217 }
24eb3a82
DD
1218 }
1219
1220 if (rc == -1) {
699bde3b
CB
1221 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1222 trace_kvm_s390_sie_fault(vcpu);
1223 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1224 }
b0c632db 1225
5a32c1af 1226 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1227
a76ccff6
TH
1228 if (rc == 0) {
1229 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1230 /* Don't exit for host interrupts. */
1231 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1232 else
1233 rc = kvm_handle_sie_intercept(vcpu);
1234 }
1235
3fb4c40f
TH
1236 return rc;
1237}
1238
1239static int __vcpu_run(struct kvm_vcpu *vcpu)
1240{
1241 int rc, exit_reason;
1242
800c1065
TH
1243 /*
1244 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1245 * ning the guest), so that memslots (and other stuff) are protected
1246 */
1247 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1248
a76ccff6
TH
1249 do {
1250 rc = vcpu_pre_run(vcpu);
1251 if (rc)
1252 break;
3fb4c40f 1253
800c1065 1254 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1255 /*
1256 * As PF_VCPU will be used in fault handler, between
1257 * guest_enter and guest_exit should be no uaccess.
1258 */
1259 preempt_disable();
1260 kvm_guest_enter();
1261 preempt_enable();
1262 exit_reason = sie64a(vcpu->arch.sie_block,
1263 vcpu->run->s.regs.gprs);
1264 kvm_guest_exit();
800c1065 1265 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1266
1267 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1268 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1269
800c1065 1270 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1271 return rc;
b0c632db
HC
1272}
1273
1274int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1275{
8f2abe6a 1276 int rc;
b0c632db
HC
1277 sigset_t sigsaved;
1278
27291e21
DH
1279 if (guestdbg_exit_pending(vcpu)) {
1280 kvm_s390_prepare_debug_exit(vcpu);
1281 return 0;
1282 }
1283
b0c632db
HC
1284 if (vcpu->sigset_active)
1285 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1286
6852d7b6 1287 kvm_s390_vcpu_start(vcpu);
b0c632db 1288
8f2abe6a
CB
1289 switch (kvm_run->exit_reason) {
1290 case KVM_EXIT_S390_SIEIC:
8f2abe6a 1291 case KVM_EXIT_UNKNOWN:
9ace903d 1292 case KVM_EXIT_INTR:
8f2abe6a 1293 case KVM_EXIT_S390_RESET:
e168bf8d 1294 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 1295 case KVM_EXIT_S390_TSCH:
27291e21 1296 case KVM_EXIT_DEBUG:
8f2abe6a
CB
1297 break;
1298 default:
1299 BUG();
1300 }
1301
d7b0b5eb
CO
1302 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1303 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
1304 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1305 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1306 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1307 }
9eed0735
CB
1308 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1309 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1310 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1311 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1312 }
d7b0b5eb 1313
dab4079d 1314 might_fault();
a76ccff6 1315 rc = __vcpu_run(vcpu);
9ace903d 1316
b1d16c49
CE
1317 if (signal_pending(current) && !rc) {
1318 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1319 rc = -EINTR;
b1d16c49 1320 }
8f2abe6a 1321
27291e21
DH
1322 if (guestdbg_exit_pending(vcpu) && !rc) {
1323 kvm_s390_prepare_debug_exit(vcpu);
1324 rc = 0;
1325 }
1326
b8e660b8 1327 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1328 /* intercept cannot be handled in-kernel, prepare kvm-run */
1329 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1330 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1331 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1332 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1333 rc = 0;
1334 }
1335
1336 if (rc == -EREMOTE) {
1337 /* intercept was handled, but userspace support is needed
1338 * kvm_run has been prepared by the handler */
1339 rc = 0;
1340 }
b0c632db 1341
d7b0b5eb
CO
1342 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1343 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
fda902cb 1344 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
9eed0735 1345 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 1346
b0c632db
HC
1347 if (vcpu->sigset_active)
1348 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1349
b0c632db 1350 vcpu->stat.exit_userspace++;
7e8e6ab4 1351 return rc;
b0c632db
HC
1352}
1353
b0c632db
HC
1354/*
1355 * store status at address
1356 * we use have two special cases:
1357 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1358 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1359 */
d0bce605 1360int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1361{
092670cd 1362 unsigned char archmode = 1;
fda902cb 1363 unsigned int px;
178bd789 1364 u64 clkcomp;
d0bce605 1365 int rc;
b0c632db 1366
d0bce605
HC
1367 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1368 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1369 return -EFAULT;
d0bce605
HC
1370 gpa = SAVE_AREA_BASE;
1371 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1372 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1373 return -EFAULT;
d0bce605
HC
1374 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1375 }
1376 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1377 vcpu->arch.guest_fpregs.fprs, 128);
1378 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1379 vcpu->run->s.regs.gprs, 128);
1380 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1381 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1382 px = kvm_s390_get_prefix(vcpu);
d0bce605 1383 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1384 &px, 4);
d0bce605
HC
1385 rc |= write_guest_abs(vcpu,
1386 gpa + offsetof(struct save_area, fp_ctrl_reg),
1387 &vcpu->arch.guest_fpregs.fpc, 4);
1388 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1389 &vcpu->arch.sie_block->todpr, 4);
1390 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1391 &vcpu->arch.sie_block->cputm, 8);
178bd789 1392 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1393 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1394 &clkcomp, 8);
1395 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1396 &vcpu->run->s.regs.acrs, 64);
1397 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1398 &vcpu->arch.sie_block->gcr, 128);
1399 return rc ? -EFAULT : 0;
b0c632db
HC
1400}
1401
e879892c
TH
1402int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1403{
1404 /*
1405 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1406 * copying in vcpu load/put. Lets update our copies before we save
1407 * it into the save area
1408 */
1409 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1410 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1411 save_access_regs(vcpu->run->s.regs.acrs);
1412
1413 return kvm_s390_store_status_unloaded(vcpu, addr);
1414}
1415
8ad35755
DH
1416static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1417{
1418 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1419 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1420 exit_sie_sync(vcpu);
1421}
1422
1423static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1424{
1425 unsigned int i;
1426 struct kvm_vcpu *vcpu;
1427
1428 kvm_for_each_vcpu(i, vcpu, kvm) {
1429 __disable_ibs_on_vcpu(vcpu);
1430 }
1431}
1432
1433static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1434{
1435 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1436 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1437 exit_sie_sync(vcpu);
1438}
1439
6852d7b6
DH
1440void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1441{
8ad35755
DH
1442 int i, online_vcpus, started_vcpus = 0;
1443
1444 if (!is_vcpu_stopped(vcpu))
1445 return;
1446
6852d7b6 1447 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755
DH
1448 /* Only one cpu at a time may enter/leave the STOPPED state. */
1449 spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
1450 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1451
1452 for (i = 0; i < online_vcpus; i++) {
1453 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1454 started_vcpus++;
1455 }
1456
1457 if (started_vcpus == 0) {
1458 /* we're the only active VCPU -> speed it up */
1459 __enable_ibs_on_vcpu(vcpu);
1460 } else if (started_vcpus == 1) {
1461 /*
1462 * As we are starting a second VCPU, we have to disable
1463 * the IBS facility on all VCPUs to remove potentially
1464 * oustanding ENABLE requests.
1465 */
1466 __disable_ibs_on_all_vcpus(vcpu->kvm);
1467 }
1468
6852d7b6 1469 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1470 /*
1471 * Another VCPU might have used IBS while we were offline.
1472 * Let's play safe and flush the VCPU at startup.
1473 */
1474 vcpu->arch.sie_block->ihcpu = 0xffff;
1475 spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
1476 return;
6852d7b6
DH
1477}
1478
1479void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1480{
8ad35755
DH
1481 int i, online_vcpus, started_vcpus = 0;
1482 struct kvm_vcpu *started_vcpu = NULL;
1483
1484 if (is_vcpu_stopped(vcpu))
1485 return;
1486
6852d7b6 1487 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755
DH
1488 /* Only one cpu at a time may enter/leave the STOPPED state. */
1489 spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
1490 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1491
32f5ff63
DH
1492 /* Need to lock access to action_bits to avoid a SIGP race condition */
1493 spin_lock_bh(&vcpu->arch.local_int.lock);
6852d7b6 1494 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
32f5ff63
DH
1495
1496 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1497 vcpu->arch.local_int.action_bits &=
1498 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
1499 spin_unlock_bh(&vcpu->arch.local_int.lock);
1500
8ad35755
DH
1501 __disable_ibs_on_vcpu(vcpu);
1502
1503 for (i = 0; i < online_vcpus; i++) {
1504 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1505 started_vcpus++;
1506 started_vcpu = vcpu->kvm->vcpus[i];
1507 }
1508 }
1509
1510 if (started_vcpus == 1) {
1511 /*
1512 * As we only have one VCPU left, we want to enable the
1513 * IBS facility for that VCPU to speed it up.
1514 */
1515 __enable_ibs_on_vcpu(started_vcpu);
1516 }
1517
1518 spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
1519 return;
6852d7b6
DH
1520}
1521
d6712df9
CH
1522static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1523 struct kvm_enable_cap *cap)
1524{
1525 int r;
1526
1527 if (cap->flags)
1528 return -EINVAL;
1529
1530 switch (cap->cap) {
fa6b7fe9
CH
1531 case KVM_CAP_S390_CSS_SUPPORT:
1532 if (!vcpu->kvm->arch.css_support) {
1533 vcpu->kvm->arch.css_support = 1;
1534 trace_kvm_s390_enable_css(vcpu->kvm);
1535 }
1536 r = 0;
1537 break;
d6712df9
CH
1538 default:
1539 r = -EINVAL;
1540 break;
1541 }
1542 return r;
1543}
1544
b0c632db
HC
1545long kvm_arch_vcpu_ioctl(struct file *filp,
1546 unsigned int ioctl, unsigned long arg)
1547{
1548 struct kvm_vcpu *vcpu = filp->private_data;
1549 void __user *argp = (void __user *)arg;
800c1065 1550 int idx;
bc923cc9 1551 long r;
b0c632db 1552
93736624
AK
1553 switch (ioctl) {
1554 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1555 struct kvm_s390_interrupt s390int;
1556
93736624 1557 r = -EFAULT;
ba5c1e9b 1558 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1559 break;
1560 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1561 break;
ba5c1e9b 1562 }
b0c632db 1563 case KVM_S390_STORE_STATUS:
800c1065 1564 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1565 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1566 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1567 break;
b0c632db
HC
1568 case KVM_S390_SET_INITIAL_PSW: {
1569 psw_t psw;
1570
bc923cc9 1571 r = -EFAULT;
b0c632db 1572 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1573 break;
1574 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1575 break;
b0c632db
HC
1576 }
1577 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1578 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1579 break;
14eebd91
CO
1580 case KVM_SET_ONE_REG:
1581 case KVM_GET_ONE_REG: {
1582 struct kvm_one_reg reg;
1583 r = -EFAULT;
1584 if (copy_from_user(&reg, argp, sizeof(reg)))
1585 break;
1586 if (ioctl == KVM_SET_ONE_REG)
1587 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1588 else
1589 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1590 break;
1591 }
27e0393f
CO
1592#ifdef CONFIG_KVM_S390_UCONTROL
1593 case KVM_S390_UCAS_MAP: {
1594 struct kvm_s390_ucas_mapping ucasmap;
1595
1596 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1597 r = -EFAULT;
1598 break;
1599 }
1600
1601 if (!kvm_is_ucontrol(vcpu->kvm)) {
1602 r = -EINVAL;
1603 break;
1604 }
1605
1606 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1607 ucasmap.vcpu_addr, ucasmap.length);
1608 break;
1609 }
1610 case KVM_S390_UCAS_UNMAP: {
1611 struct kvm_s390_ucas_mapping ucasmap;
1612
1613 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1614 r = -EFAULT;
1615 break;
1616 }
1617
1618 if (!kvm_is_ucontrol(vcpu->kvm)) {
1619 r = -EINVAL;
1620 break;
1621 }
1622
1623 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1624 ucasmap.length);
1625 break;
1626 }
1627#endif
ccc7910f
CO
1628 case KVM_S390_VCPU_FAULT: {
1629 r = gmap_fault(arg, vcpu->arch.gmap);
1630 if (!IS_ERR_VALUE(r))
1631 r = 0;
1632 break;
1633 }
d6712df9
CH
1634 case KVM_ENABLE_CAP:
1635 {
1636 struct kvm_enable_cap cap;
1637 r = -EFAULT;
1638 if (copy_from_user(&cap, argp, sizeof(cap)))
1639 break;
1640 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1641 break;
1642 }
b0c632db 1643 default:
3e6afcf1 1644 r = -ENOTTY;
b0c632db 1645 }
bc923cc9 1646 return r;
b0c632db
HC
1647}
1648
5b1c1493
CO
1649int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1650{
1651#ifdef CONFIG_KVM_S390_UCONTROL
1652 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1653 && (kvm_is_ucontrol(vcpu->kvm))) {
1654 vmf->page = virt_to_page(vcpu->arch.sie_block);
1655 get_page(vmf->page);
1656 return 0;
1657 }
1658#endif
1659 return VM_FAULT_SIGBUS;
1660}
1661
5587027c 1662void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1663 struct kvm_memory_slot *dont)
1664{
1665}
1666
5587027c
AK
1667int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1668 unsigned long npages)
db3fe4eb
TY
1669{
1670 return 0;
1671}
1672
e59dbe09
TY
1673void kvm_arch_memslots_updated(struct kvm *kvm)
1674{
1675}
1676
b0c632db 1677/* Section: memory related */
f7784b8e
MT
1678int kvm_arch_prepare_memory_region(struct kvm *kvm,
1679 struct kvm_memory_slot *memslot,
7b6195a9
TY
1680 struct kvm_userspace_memory_region *mem,
1681 enum kvm_mr_change change)
b0c632db 1682{
dd2887e7
NW
1683 /* A few sanity checks. We can have memory slots which have to be
1684 located/ended at a segment boundary (1MB). The memory in userland is
1685 ok to be fragmented into various different vmas. It is okay to mmap()
1686 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1687
598841ca 1688 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1689 return -EINVAL;
1690
598841ca 1691 if (mem->memory_size & 0xffffful)
b0c632db
HC
1692 return -EINVAL;
1693
f7784b8e
MT
1694 return 0;
1695}
1696
1697void kvm_arch_commit_memory_region(struct kvm *kvm,
1698 struct kvm_userspace_memory_region *mem,
8482644a
TY
1699 const struct kvm_memory_slot *old,
1700 enum kvm_mr_change change)
f7784b8e 1701{
f7850c92 1702 int rc;
f7784b8e 1703
2cef4deb
CB
1704 /* If the basics of the memslot do not change, we do not want
1705 * to update the gmap. Every update causes several unnecessary
1706 * segment translation exceptions. This is usually handled just
1707 * fine by the normal fault handler + gmap, but it will also
1708 * cause faults on the prefix page of running guest CPUs.
1709 */
1710 if (old->userspace_addr == mem->userspace_addr &&
1711 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1712 old->npages * PAGE_SIZE == mem->memory_size)
1713 return;
598841ca
CO
1714
1715 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1716 mem->guest_phys_addr, mem->memory_size);
1717 if (rc)
f7850c92 1718 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1719 return;
b0c632db
HC
1720}
1721
2df72e9b
MT
1722void kvm_arch_flush_shadow_all(struct kvm *kvm)
1723{
1724}
1725
1726void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1727 struct kvm_memory_slot *slot)
34d4cb8f
MT
1728{
1729}
1730
b0c632db
HC
1731static int __init kvm_s390_init(void)
1732{
ef50f7ac 1733 int ret;
0ee75bea 1734 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1735 if (ret)
1736 return ret;
1737
1738 /*
1739 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1740 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1741 * only set facilities that are known to work in KVM.
1742 */
78c4b59f
MM
1743 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1744 if (!vfacilities) {
ef50f7ac
CB
1745 kvm_exit();
1746 return -ENOMEM;
1747 }
78c4b59f 1748 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1749 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1750 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1751 return 0;
b0c632db
HC
1752}
1753
1754static void __exit kvm_s390_exit(void)
1755{
78c4b59f 1756 free_page((unsigned long) vfacilities);
b0c632db
HC
1757 kvm_exit();
1758}
1759
1760module_init(kvm_s390_init);
1761module_exit(kvm_s390_exit);
566af940
CH
1762
1763/*
1764 * Enable autoloading of the kvm module.
1765 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1766 * since x86 takes a different approach.
1767 */
1768#include <linux/miscdevice.h>
1769MODULE_ALIAS_MISCDEV(KVM_MINOR);
1770MODULE_ALIAS("devname:kvm");