KVM: s390: move vcpu specific initalization to a later point
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
ce2e4f0b 53 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 54 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 55 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
56 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
57 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 58 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 59 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
60 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 67 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
68 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
69 { "instruction_spx", VCPU_STAT(instruction_spx) },
70 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
71 { "instruction_stap", VCPU_STAT(instruction_stap) },
72 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 73 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
74 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
75 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 76 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
77 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
78 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 79 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 80 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 81 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 82 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 83 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
84 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
85 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 86 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
87 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
88 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
5288fbf0
CB
89 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
90 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
91 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
92 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
93 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
94 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 95 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 96 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 97 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
98 { NULL }
99};
100
78c4b59f 101unsigned long *vfacilities;
2c70fe44 102static struct gmap_notifier gmap_notifier;
b0c632db 103
78c4b59f 104/* test availability of vfacility */
280ef0f1 105int test_vfacility(unsigned long nr)
78c4b59f
MM
106{
107 return __test_facility(nr, (void *) vfacilities);
108}
109
b0c632db 110/* Section: not file related */
13a34e06 111int kvm_arch_hardware_enable(void)
b0c632db
HC
112{
113 /* every s390 is virtualization enabled ;-) */
10474ae8 114 return 0;
b0c632db
HC
115}
116
2c70fe44
CB
117static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
118
b0c632db
HC
119int kvm_arch_hardware_setup(void)
120{
2c70fe44
CB
121 gmap_notifier.notifier_call = kvm_gmap_notifier;
122 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
123 return 0;
124}
125
126void kvm_arch_hardware_unsetup(void)
127{
2c70fe44 128 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
129}
130
b0c632db
HC
131int kvm_arch_init(void *opaque)
132{
84877d93
CH
133 /* Register floating interrupt controller interface. */
134 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
135}
136
b0c632db
HC
137/* Section: device related */
138long kvm_arch_dev_ioctl(struct file *filp,
139 unsigned int ioctl, unsigned long arg)
140{
141 if (ioctl == KVM_S390_ENABLE_SIE)
142 return s390_enable_sie();
143 return -EINVAL;
144}
145
784aa3d7 146int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 147{
d7b0b5eb
CO
148 int r;
149
2bd0ac4e 150 switch (ext) {
d7b0b5eb 151 case KVM_CAP_S390_PSW:
b6cf8788 152 case KVM_CAP_S390_GMAP:
52e16b18 153 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
154#ifdef CONFIG_KVM_S390_UCONTROL
155 case KVM_CAP_S390_UCONTROL:
156#endif
3c038e6b 157 case KVM_CAP_ASYNC_PF:
60b413c9 158 case KVM_CAP_SYNC_REGS:
14eebd91 159 case KVM_CAP_ONE_REG:
d6712df9 160 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 161 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 162 case KVM_CAP_IRQFD:
10ccaa1e 163 case KVM_CAP_IOEVENTFD:
c05c4186 164 case KVM_CAP_DEVICE_CTRL:
d938dc55 165 case KVM_CAP_ENABLE_CAP_VM:
78599d90 166 case KVM_CAP_S390_IRQCHIP:
f2061656 167 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 168 case KVM_CAP_MP_STATE:
d7b0b5eb
CO
169 r = 1;
170 break;
e726b1bd
CB
171 case KVM_CAP_NR_VCPUS:
172 case KVM_CAP_MAX_VCPUS:
173 r = KVM_MAX_VCPUS;
174 break;
e1e2e605
NW
175 case KVM_CAP_NR_MEMSLOTS:
176 r = KVM_USER_MEM_SLOTS;
177 break;
1526bf9c 178 case KVM_CAP_S390_COW:
abf09bed 179 r = MACHINE_HAS_ESOP;
1526bf9c 180 break;
2bd0ac4e 181 default:
d7b0b5eb 182 r = 0;
2bd0ac4e 183 }
d7b0b5eb 184 return r;
b0c632db
HC
185}
186
15f36ebd
JH
187static void kvm_s390_sync_dirty_log(struct kvm *kvm,
188 struct kvm_memory_slot *memslot)
189{
190 gfn_t cur_gfn, last_gfn;
191 unsigned long address;
192 struct gmap *gmap = kvm->arch.gmap;
193
194 down_read(&gmap->mm->mmap_sem);
195 /* Loop over all guest pages */
196 last_gfn = memslot->base_gfn + memslot->npages;
197 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
198 address = gfn_to_hva_memslot(memslot, cur_gfn);
199
200 if (gmap_test_and_clear_dirty(address, gmap))
201 mark_page_dirty(kvm, cur_gfn);
202 }
203 up_read(&gmap->mm->mmap_sem);
204}
205
b0c632db
HC
206/* Section: vm related */
207/*
208 * Get (and clear) the dirty memory log for a memory slot.
209 */
210int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
211 struct kvm_dirty_log *log)
212{
15f36ebd
JH
213 int r;
214 unsigned long n;
215 struct kvm_memory_slot *memslot;
216 int is_dirty = 0;
217
218 mutex_lock(&kvm->slots_lock);
219
220 r = -EINVAL;
221 if (log->slot >= KVM_USER_MEM_SLOTS)
222 goto out;
223
224 memslot = id_to_memslot(kvm->memslots, log->slot);
225 r = -ENOENT;
226 if (!memslot->dirty_bitmap)
227 goto out;
228
229 kvm_s390_sync_dirty_log(kvm, memslot);
230 r = kvm_get_dirty_log(kvm, log, &is_dirty);
231 if (r)
232 goto out;
233
234 /* Clear the dirty log */
235 if (is_dirty) {
236 n = kvm_dirty_bitmap_bytes(memslot);
237 memset(memslot->dirty_bitmap, 0, n);
238 }
239 r = 0;
240out:
241 mutex_unlock(&kvm->slots_lock);
242 return r;
b0c632db
HC
243}
244
d938dc55
CH
245static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
246{
247 int r;
248
249 if (cap->flags)
250 return -EINVAL;
251
252 switch (cap->cap) {
84223598
CH
253 case KVM_CAP_S390_IRQCHIP:
254 kvm->arch.use_irqchip = 1;
255 r = 0;
256 break;
d938dc55
CH
257 default:
258 r = -EINVAL;
259 break;
260 }
261 return r;
262}
263
4f718eab
DD
264static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
265{
266 int ret;
267 unsigned int idx;
268 switch (attr->attr) {
269 case KVM_S390_VM_MEM_ENABLE_CMMA:
270 ret = -EBUSY;
271 mutex_lock(&kvm->lock);
272 if (atomic_read(&kvm->online_vcpus) == 0) {
273 kvm->arch.use_cmma = 1;
274 ret = 0;
275 }
276 mutex_unlock(&kvm->lock);
277 break;
278 case KVM_S390_VM_MEM_CLR_CMMA:
279 mutex_lock(&kvm->lock);
280 idx = srcu_read_lock(&kvm->srcu);
a13cff31 281 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
282 srcu_read_unlock(&kvm->srcu, idx);
283 mutex_unlock(&kvm->lock);
284 ret = 0;
285 break;
286 default:
287 ret = -ENXIO;
288 break;
289 }
290 return ret;
291}
292
f2061656
DD
293static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
294{
295 int ret;
296
297 switch (attr->group) {
4f718eab
DD
298 case KVM_S390_VM_MEM_CTRL:
299 ret = kvm_s390_mem_control(kvm, attr);
300 break;
f2061656
DD
301 default:
302 ret = -ENXIO;
303 break;
304 }
305
306 return ret;
307}
308
309static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
310{
311 return -ENXIO;
312}
313
314static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
315{
316 int ret;
317
318 switch (attr->group) {
4f718eab
DD
319 case KVM_S390_VM_MEM_CTRL:
320 switch (attr->attr) {
321 case KVM_S390_VM_MEM_ENABLE_CMMA:
322 case KVM_S390_VM_MEM_CLR_CMMA:
323 ret = 0;
324 break;
325 default:
326 ret = -ENXIO;
327 break;
328 }
329 break;
f2061656
DD
330 default:
331 ret = -ENXIO;
332 break;
333 }
334
335 return ret;
336}
337
b0c632db
HC
338long kvm_arch_vm_ioctl(struct file *filp,
339 unsigned int ioctl, unsigned long arg)
340{
341 struct kvm *kvm = filp->private_data;
342 void __user *argp = (void __user *)arg;
f2061656 343 struct kvm_device_attr attr;
b0c632db
HC
344 int r;
345
346 switch (ioctl) {
ba5c1e9b
CO
347 case KVM_S390_INTERRUPT: {
348 struct kvm_s390_interrupt s390int;
349
350 r = -EFAULT;
351 if (copy_from_user(&s390int, argp, sizeof(s390int)))
352 break;
353 r = kvm_s390_inject_vm(kvm, &s390int);
354 break;
355 }
d938dc55
CH
356 case KVM_ENABLE_CAP: {
357 struct kvm_enable_cap cap;
358 r = -EFAULT;
359 if (copy_from_user(&cap, argp, sizeof(cap)))
360 break;
361 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
362 break;
363 }
84223598
CH
364 case KVM_CREATE_IRQCHIP: {
365 struct kvm_irq_routing_entry routing;
366
367 r = -EINVAL;
368 if (kvm->arch.use_irqchip) {
369 /* Set up dummy routing. */
370 memset(&routing, 0, sizeof(routing));
371 kvm_set_irq_routing(kvm, &routing, 0, 0);
372 r = 0;
373 }
374 break;
375 }
f2061656
DD
376 case KVM_SET_DEVICE_ATTR: {
377 r = -EFAULT;
378 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379 break;
380 r = kvm_s390_vm_set_attr(kvm, &attr);
381 break;
382 }
383 case KVM_GET_DEVICE_ATTR: {
384 r = -EFAULT;
385 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386 break;
387 r = kvm_s390_vm_get_attr(kvm, &attr);
388 break;
389 }
390 case KVM_HAS_DEVICE_ATTR: {
391 r = -EFAULT;
392 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
393 break;
394 r = kvm_s390_vm_has_attr(kvm, &attr);
395 break;
396 }
b0c632db 397 default:
367e1319 398 r = -ENOTTY;
b0c632db
HC
399 }
400
401 return r;
402}
403
5102ee87
TK
404static int kvm_s390_crypto_init(struct kvm *kvm)
405{
406 if (!test_vfacility(76))
407 return 0;
408
409 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
410 GFP_KERNEL | GFP_DMA);
411 if (!kvm->arch.crypto.crycb)
412 return -ENOMEM;
413
414 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
415 CRYCB_FORMAT1;
416
417 return 0;
418}
419
e08b9637 420int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 421{
b0c632db
HC
422 int rc;
423 char debug_name[16];
f6c137ff 424 static unsigned long sca_offset;
b0c632db 425
e08b9637
CO
426 rc = -EINVAL;
427#ifdef CONFIG_KVM_S390_UCONTROL
428 if (type & ~KVM_VM_S390_UCONTROL)
429 goto out_err;
430 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
431 goto out_err;
432#else
433 if (type)
434 goto out_err;
435#endif
436
b0c632db
HC
437 rc = s390_enable_sie();
438 if (rc)
d89f5eff 439 goto out_err;
b0c632db 440
b290411a
CO
441 rc = -ENOMEM;
442
b0c632db
HC
443 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
444 if (!kvm->arch.sca)
d89f5eff 445 goto out_err;
f6c137ff
CB
446 spin_lock(&kvm_lock);
447 sca_offset = (sca_offset + 16) & 0x7f0;
448 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
449 spin_unlock(&kvm_lock);
b0c632db
HC
450
451 sprintf(debug_name, "kvm-%u", current->pid);
452
453 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
454 if (!kvm->arch.dbf)
455 goto out_nodbf;
456
5102ee87
TK
457 if (kvm_s390_crypto_init(kvm) < 0)
458 goto out_crypto;
459
ba5c1e9b
CO
460 spin_lock_init(&kvm->arch.float_int.lock);
461 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 462 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 463 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 464
b0c632db
HC
465 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
466 VM_EVENT(kvm, 3, "%s", "vm created");
467
e08b9637
CO
468 if (type & KVM_VM_S390_UCONTROL) {
469 kvm->arch.gmap = NULL;
470 } else {
0349985a 471 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637
CO
472 if (!kvm->arch.gmap)
473 goto out_nogmap;
2c70fe44 474 kvm->arch.gmap->private = kvm;
24eb3a82 475 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 476 }
fa6b7fe9
CH
477
478 kvm->arch.css_support = 0;
84223598 479 kvm->arch.use_irqchip = 0;
fa6b7fe9 480
8ad35755
DH
481 spin_lock_init(&kvm->arch.start_stop_lock);
482
d89f5eff 483 return 0;
598841ca 484out_nogmap:
5102ee87
TK
485 kfree(kvm->arch.crypto.crycb);
486out_crypto:
598841ca 487 debug_unregister(kvm->arch.dbf);
b0c632db
HC
488out_nodbf:
489 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
490out_err:
491 return rc;
b0c632db
HC
492}
493
d329c035
CB
494void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
495{
496 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 497 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 498 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 499 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
500 if (!kvm_is_ucontrol(vcpu->kvm)) {
501 clear_bit(63 - vcpu->vcpu_id,
502 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
503 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
504 (__u64) vcpu->arch.sie_block)
505 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
506 }
abf4a71e 507 smp_mb();
27e0393f
CO
508
509 if (kvm_is_ucontrol(vcpu->kvm))
510 gmap_free(vcpu->arch.gmap);
511
b31605c1
DD
512 if (kvm_s390_cmma_enabled(vcpu->kvm))
513 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 514 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 515
6692cef3 516 kvm_vcpu_uninit(vcpu);
b110feaf 517 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
518}
519
520static void kvm_free_vcpus(struct kvm *kvm)
521{
522 unsigned int i;
988a2cae 523 struct kvm_vcpu *vcpu;
d329c035 524
988a2cae
GN
525 kvm_for_each_vcpu(i, vcpu, kvm)
526 kvm_arch_vcpu_destroy(vcpu);
527
528 mutex_lock(&kvm->lock);
529 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
530 kvm->vcpus[i] = NULL;
531
532 atomic_set(&kvm->online_vcpus, 0);
533 mutex_unlock(&kvm->lock);
d329c035
CB
534}
535
b0c632db
HC
536void kvm_arch_destroy_vm(struct kvm *kvm)
537{
d329c035 538 kvm_free_vcpus(kvm);
b0c632db 539 free_page((unsigned long)(kvm->arch.sca));
d329c035 540 debug_unregister(kvm->arch.dbf);
5102ee87 541 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
542 if (!kvm_is_ucontrol(kvm))
543 gmap_free(kvm->arch.gmap);
841b91c5 544 kvm_s390_destroy_adapters(kvm);
67335e63 545 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
546}
547
548/* Section: vcpu related */
dafd032a
DD
549static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
550{
551 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
552 if (!vcpu->arch.gmap)
553 return -ENOMEM;
554 vcpu->arch.gmap->private = vcpu->kvm;
555
556 return 0;
557}
558
b0c632db
HC
559int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
560{
3c038e6b
DD
561 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
562 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
563 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
564 KVM_SYNC_GPRS |
9eed0735 565 KVM_SYNC_ACRS |
b028ee3e
DH
566 KVM_SYNC_CRS |
567 KVM_SYNC_ARCH0 |
568 KVM_SYNC_PFAULT;
dafd032a
DD
569
570 if (kvm_is_ucontrol(vcpu->kvm))
571 return __kvm_ucontrol_vcpu_init(vcpu);
572
b0c632db
HC
573 return 0;
574}
575
b0c632db
HC
576void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
577{
4725c860
MS
578 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
579 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 580 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
581 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
582 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 583 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 584 gmap_enable(vcpu->arch.gmap);
9e6dabef 585 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
586}
587
588void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
589{
9e6dabef 590 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 591 gmap_disable(vcpu->arch.gmap);
4725c860
MS
592 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
593 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 594 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
595 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
596 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
597 restore_access_regs(vcpu->arch.host_acrs);
598}
599
600static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
601{
602 /* this equals initial cpu reset in pop, but we don't switch to ESA */
603 vcpu->arch.sie_block->gpsw.mask = 0UL;
604 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 605 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
606 vcpu->arch.sie_block->cputm = 0UL;
607 vcpu->arch.sie_block->ckc = 0UL;
608 vcpu->arch.sie_block->todpr = 0;
609 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
610 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
611 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
612 vcpu->arch.guest_fpregs.fpc = 0;
613 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
614 vcpu->arch.sie_block->gbea = 1;
672550fb 615 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
616 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
617 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
618 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
619 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 620 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
621}
622
31928aa5 623void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 624{
dafd032a
DD
625 if (!kvm_is_ucontrol(vcpu->kvm))
626 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
627}
628
5102ee87
TK
629static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
630{
631 if (!test_vfacility(76))
632 return;
633
634 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
635}
636
b31605c1
DD
637void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
638{
639 free_page(vcpu->arch.sie_block->cbrlo);
640 vcpu->arch.sie_block->cbrlo = 0;
641}
642
643int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
644{
645 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
646 if (!vcpu->arch.sie_block->cbrlo)
647 return -ENOMEM;
648
649 vcpu->arch.sie_block->ecb2 |= 0x80;
650 vcpu->arch.sie_block->ecb2 &= ~0x08;
651 return 0;
652}
653
b0c632db
HC
654int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
655{
b31605c1 656 int rc = 0;
b31288fa 657
9e6dabef
CH
658 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
659 CPUSTAT_SM |
69d0d3a3
CB
660 CPUSTAT_STOPPED |
661 CPUSTAT_GED);
fc34531d 662 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
663 if (test_vfacility(50) && test_vfacility(73))
664 vcpu->arch.sie_block->ecb |= 0x10;
665
69d0d3a3 666 vcpu->arch.sie_block->ecb2 = 8;
4953919f 667 vcpu->arch.sie_block->eca = 0xD1002000U;
217a4406
HC
668 if (sclp_has_siif())
669 vcpu->arch.sie_block->eca |= 1;
78c4b59f 670 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
5a5e6536
MR
671 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
672 ICTL_TPROT;
673
b31605c1
DD
674 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
675 rc = kvm_s390_vcpu_setup_cmma(vcpu);
676 if (rc)
677 return rc;
b31288fa 678 }
ca872302 679 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
ca872302 680 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 681 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 682 vcpu->arch.cpu_id.version = 0xff;
5102ee87
TK
683
684 kvm_s390_vcpu_crypto_setup(vcpu);
685
b31605c1 686 return rc;
b0c632db
HC
687}
688
689struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
690 unsigned int id)
691{
4d47555a 692 struct kvm_vcpu *vcpu;
7feb6bb8 693 struct sie_page *sie_page;
4d47555a
CO
694 int rc = -EINVAL;
695
696 if (id >= KVM_MAX_VCPUS)
697 goto out;
698
699 rc = -ENOMEM;
b0c632db 700
b110feaf 701 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 702 if (!vcpu)
4d47555a 703 goto out;
b0c632db 704
7feb6bb8
MM
705 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
706 if (!sie_page)
b0c632db
HC
707 goto out_free_cpu;
708
7feb6bb8
MM
709 vcpu->arch.sie_block = &sie_page->sie_block;
710 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
711
b0c632db 712 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
713 if (!kvm_is_ucontrol(kvm)) {
714 if (!kvm->arch.sca) {
715 WARN_ON_ONCE(1);
716 goto out_free_cpu;
717 }
718 if (!kvm->arch.sca->cpu[id].sda)
719 kvm->arch.sca->cpu[id].sda =
720 (__u64) vcpu->arch.sie_block;
721 vcpu->arch.sie_block->scaoh =
722 (__u32)(((__u64)kvm->arch.sca) >> 32);
723 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
724 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
725 }
b0c632db 726
ba5c1e9b 727 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 728 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 729 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 730 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 731
b0c632db
HC
732 rc = kvm_vcpu_init(vcpu, kvm, id);
733 if (rc)
7b06bf2f 734 goto out_free_sie_block;
b0c632db
HC
735 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
736 vcpu->arch.sie_block);
ade38c31 737 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 738
b0c632db 739 return vcpu;
7b06bf2f
WY
740out_free_sie_block:
741 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 742out_free_cpu:
b110feaf 743 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 744out:
b0c632db
HC
745 return ERR_PTR(rc);
746}
747
b0c632db
HC
748int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
749{
f87618e8 750 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
751}
752
49b99e1e
CB
753void s390_vcpu_block(struct kvm_vcpu *vcpu)
754{
755 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
756}
757
758void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
759{
760 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
761}
762
763/*
764 * Kick a guest cpu out of SIE and wait until SIE is not running.
765 * If the CPU is not running (e.g. waiting as idle) the function will
766 * return immediately. */
767void exit_sie(struct kvm_vcpu *vcpu)
768{
769 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
770 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
771 cpu_relax();
772}
773
774/* Kick a guest cpu out of SIE and prevent SIE-reentry */
775void exit_sie_sync(struct kvm_vcpu *vcpu)
776{
777 s390_vcpu_block(vcpu);
778 exit_sie(vcpu);
779}
780
2c70fe44
CB
781static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
782{
783 int i;
784 struct kvm *kvm = gmap->private;
785 struct kvm_vcpu *vcpu;
786
787 kvm_for_each_vcpu(i, vcpu, kvm) {
788 /* match against both prefix pages */
fda902cb 789 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
790 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
791 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
792 exit_sie_sync(vcpu);
793 }
794 }
795}
796
b6d33834
CD
797int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
798{
799 /* kvm common code refers to this, but never calls it */
800 BUG();
801 return 0;
802}
803
14eebd91
CO
804static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
805 struct kvm_one_reg *reg)
806{
807 int r = -EINVAL;
808
809 switch (reg->id) {
29b7c71b
CO
810 case KVM_REG_S390_TODPR:
811 r = put_user(vcpu->arch.sie_block->todpr,
812 (u32 __user *)reg->addr);
813 break;
814 case KVM_REG_S390_EPOCHDIFF:
815 r = put_user(vcpu->arch.sie_block->epoch,
816 (u64 __user *)reg->addr);
817 break;
46a6dd1c
J
818 case KVM_REG_S390_CPU_TIMER:
819 r = put_user(vcpu->arch.sie_block->cputm,
820 (u64 __user *)reg->addr);
821 break;
822 case KVM_REG_S390_CLOCK_COMP:
823 r = put_user(vcpu->arch.sie_block->ckc,
824 (u64 __user *)reg->addr);
825 break;
536336c2
DD
826 case KVM_REG_S390_PFTOKEN:
827 r = put_user(vcpu->arch.pfault_token,
828 (u64 __user *)reg->addr);
829 break;
830 case KVM_REG_S390_PFCOMPARE:
831 r = put_user(vcpu->arch.pfault_compare,
832 (u64 __user *)reg->addr);
833 break;
834 case KVM_REG_S390_PFSELECT:
835 r = put_user(vcpu->arch.pfault_select,
836 (u64 __user *)reg->addr);
837 break;
672550fb
CB
838 case KVM_REG_S390_PP:
839 r = put_user(vcpu->arch.sie_block->pp,
840 (u64 __user *)reg->addr);
841 break;
afa45ff5
CB
842 case KVM_REG_S390_GBEA:
843 r = put_user(vcpu->arch.sie_block->gbea,
844 (u64 __user *)reg->addr);
845 break;
14eebd91
CO
846 default:
847 break;
848 }
849
850 return r;
851}
852
853static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
854 struct kvm_one_reg *reg)
855{
856 int r = -EINVAL;
857
858 switch (reg->id) {
29b7c71b
CO
859 case KVM_REG_S390_TODPR:
860 r = get_user(vcpu->arch.sie_block->todpr,
861 (u32 __user *)reg->addr);
862 break;
863 case KVM_REG_S390_EPOCHDIFF:
864 r = get_user(vcpu->arch.sie_block->epoch,
865 (u64 __user *)reg->addr);
866 break;
46a6dd1c
J
867 case KVM_REG_S390_CPU_TIMER:
868 r = get_user(vcpu->arch.sie_block->cputm,
869 (u64 __user *)reg->addr);
870 break;
871 case KVM_REG_S390_CLOCK_COMP:
872 r = get_user(vcpu->arch.sie_block->ckc,
873 (u64 __user *)reg->addr);
874 break;
536336c2
DD
875 case KVM_REG_S390_PFTOKEN:
876 r = get_user(vcpu->arch.pfault_token,
877 (u64 __user *)reg->addr);
878 break;
879 case KVM_REG_S390_PFCOMPARE:
880 r = get_user(vcpu->arch.pfault_compare,
881 (u64 __user *)reg->addr);
882 break;
883 case KVM_REG_S390_PFSELECT:
884 r = get_user(vcpu->arch.pfault_select,
885 (u64 __user *)reg->addr);
886 break;
672550fb
CB
887 case KVM_REG_S390_PP:
888 r = get_user(vcpu->arch.sie_block->pp,
889 (u64 __user *)reg->addr);
890 break;
afa45ff5
CB
891 case KVM_REG_S390_GBEA:
892 r = get_user(vcpu->arch.sie_block->gbea,
893 (u64 __user *)reg->addr);
894 break;
14eebd91
CO
895 default:
896 break;
897 }
898
899 return r;
900}
b6d33834 901
b0c632db
HC
902static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
903{
b0c632db 904 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
905 return 0;
906}
907
908int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
909{
5a32c1af 910 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
911 return 0;
912}
913
914int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
915{
5a32c1af 916 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
917 return 0;
918}
919
920int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
921 struct kvm_sregs *sregs)
922{
59674c1a 923 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 924 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 925 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
926 return 0;
927}
928
929int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
930 struct kvm_sregs *sregs)
931{
59674c1a 932 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 933 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
934 return 0;
935}
936
937int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
938{
4725c860
MS
939 if (test_fp_ctl(fpu->fpc))
940 return -EINVAL;
b0c632db 941 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
942 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
943 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
944 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
945 return 0;
946}
947
948int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
949{
b0c632db
HC
950 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
951 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
952 return 0;
953}
954
955static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
956{
957 int rc = 0;
958
7a42fdc2 959 if (!is_vcpu_stopped(vcpu))
b0c632db 960 rc = -EBUSY;
d7b0b5eb
CO
961 else {
962 vcpu->run->psw_mask = psw.mask;
963 vcpu->run->psw_addr = psw.addr;
964 }
b0c632db
HC
965 return rc;
966}
967
968int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
969 struct kvm_translation *tr)
970{
971 return -EINVAL; /* not implemented yet */
972}
973
27291e21
DH
974#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
975 KVM_GUESTDBG_USE_HW_BP | \
976 KVM_GUESTDBG_ENABLE)
977
d0bfb940
JK
978int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
979 struct kvm_guest_debug *dbg)
b0c632db 980{
27291e21
DH
981 int rc = 0;
982
983 vcpu->guest_debug = 0;
984 kvm_s390_clear_bp_data(vcpu);
985
2de3bfc2 986 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
987 return -EINVAL;
988
989 if (dbg->control & KVM_GUESTDBG_ENABLE) {
990 vcpu->guest_debug = dbg->control;
991 /* enforce guest PER */
992 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
993
994 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
995 rc = kvm_s390_import_bp_data(vcpu, dbg);
996 } else {
997 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
998 vcpu->arch.guestdbg.last_bp = 0;
999 }
1000
1001 if (rc) {
1002 vcpu->guest_debug = 0;
1003 kvm_s390_clear_bp_data(vcpu);
1004 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1005 }
1006
1007 return rc;
b0c632db
HC
1008}
1009
62d9f0db
MT
1010int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1011 struct kvm_mp_state *mp_state)
1012{
6352e4d2
DH
1013 /* CHECK_STOP and LOAD are not supported yet */
1014 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1015 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1016}
1017
1018int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1019 struct kvm_mp_state *mp_state)
1020{
6352e4d2
DH
1021 int rc = 0;
1022
1023 /* user space knows about this interface - let it control the state */
1024 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1025
1026 switch (mp_state->mp_state) {
1027 case KVM_MP_STATE_STOPPED:
1028 kvm_s390_vcpu_stop(vcpu);
1029 break;
1030 case KVM_MP_STATE_OPERATING:
1031 kvm_s390_vcpu_start(vcpu);
1032 break;
1033 case KVM_MP_STATE_LOAD:
1034 case KVM_MP_STATE_CHECK_STOP:
1035 /* fall through - CHECK_STOP and LOAD are not supported yet */
1036 default:
1037 rc = -ENXIO;
1038 }
1039
1040 return rc;
62d9f0db
MT
1041}
1042
b31605c1
DD
1043bool kvm_s390_cmma_enabled(struct kvm *kvm)
1044{
1045 if (!MACHINE_IS_LPAR)
1046 return false;
1047 /* only enable for z10 and later */
1048 if (!MACHINE_HAS_EDAT1)
1049 return false;
1050 if (!kvm->arch.use_cmma)
1051 return false;
1052 return true;
1053}
1054
8ad35755
DH
1055static bool ibs_enabled(struct kvm_vcpu *vcpu)
1056{
1057 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1058}
1059
2c70fe44
CB
1060static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1061{
8ad35755
DH
1062retry:
1063 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1064 /*
1065 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1066 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1067 * This ensures that the ipte instruction for this request has
1068 * already finished. We might race against a second unmapper that
1069 * wants to set the blocking bit. Lets just retry the request loop.
1070 */
8ad35755 1071 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1072 int rc;
1073 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1074 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1075 PAGE_SIZE * 2);
1076 if (rc)
1077 return rc;
8ad35755 1078 goto retry;
2c70fe44 1079 }
8ad35755 1080
d3d692c8
DH
1081 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1082 vcpu->arch.sie_block->ihcpu = 0xffff;
1083 goto retry;
1084 }
1085
8ad35755
DH
1086 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1087 if (!ibs_enabled(vcpu)) {
1088 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1089 atomic_set_mask(CPUSTAT_IBS,
1090 &vcpu->arch.sie_block->cpuflags);
1091 }
1092 goto retry;
2c70fe44 1093 }
8ad35755
DH
1094
1095 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1096 if (ibs_enabled(vcpu)) {
1097 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1098 atomic_clear_mask(CPUSTAT_IBS,
1099 &vcpu->arch.sie_block->cpuflags);
1100 }
1101 goto retry;
1102 }
1103
0759d068
DH
1104 /* nothing to do, just clear the request */
1105 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1106
2c70fe44
CB
1107 return 0;
1108}
1109
fa576c58
TH
1110/**
1111 * kvm_arch_fault_in_page - fault-in guest page if necessary
1112 * @vcpu: The corresponding virtual cpu
1113 * @gpa: Guest physical address
1114 * @writable: Whether the page should be writable or not
1115 *
1116 * Make sure that a guest page has been faulted-in on the host.
1117 *
1118 * Return: Zero on success, negative error code otherwise.
1119 */
1120long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1121{
527e30b4
MS
1122 return gmap_fault(vcpu->arch.gmap, gpa,
1123 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1124}
1125
3c038e6b
DD
1126static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1127 unsigned long token)
1128{
1129 struct kvm_s390_interrupt inti;
383d0b05 1130 struct kvm_s390_irq irq;
3c038e6b
DD
1131
1132 if (start_token) {
383d0b05
JF
1133 irq.u.ext.ext_params2 = token;
1134 irq.type = KVM_S390_INT_PFAULT_INIT;
1135 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1136 } else {
1137 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1138 inti.parm64 = token;
3c038e6b
DD
1139 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1140 }
1141}
1142
1143void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1144 struct kvm_async_pf *work)
1145{
1146 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1147 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1148}
1149
1150void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1151 struct kvm_async_pf *work)
1152{
1153 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1154 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1155}
1156
1157void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1158 struct kvm_async_pf *work)
1159{
1160 /* s390 will always inject the page directly */
1161}
1162
1163bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1164{
1165 /*
1166 * s390 will always inject the page directly,
1167 * but we still want check_async_completion to cleanup
1168 */
1169 return true;
1170}
1171
1172static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1173{
1174 hva_t hva;
1175 struct kvm_arch_async_pf arch;
1176 int rc;
1177
1178 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1179 return 0;
1180 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1181 vcpu->arch.pfault_compare)
1182 return 0;
1183 if (psw_extint_disabled(vcpu))
1184 return 0;
1185 if (kvm_cpu_has_interrupt(vcpu))
1186 return 0;
1187 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1188 return 0;
1189 if (!vcpu->arch.gmap->pfault_enabled)
1190 return 0;
1191
81480cc1
HC
1192 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1193 hva += current->thread.gmap_addr & ~PAGE_MASK;
1194 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1195 return 0;
1196
1197 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1198 return rc;
1199}
1200
3fb4c40f 1201static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1202{
3fb4c40f 1203 int rc, cpuflags;
e168bf8d 1204
3c038e6b
DD
1205 /*
1206 * On s390 notifications for arriving pages will be delivered directly
1207 * to the guest but the house keeping for completed pfaults is
1208 * handled outside the worker.
1209 */
1210 kvm_check_async_pf_completion(vcpu);
1211
5a32c1af 1212 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1213
1214 if (need_resched())
1215 schedule();
1216
d3a73acb 1217 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1218 s390_handle_mcck();
1219
79395031
JF
1220 if (!kvm_is_ucontrol(vcpu->kvm)) {
1221 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1222 if (rc)
1223 return rc;
1224 }
0ff31867 1225
2c70fe44
CB
1226 rc = kvm_s390_handle_requests(vcpu);
1227 if (rc)
1228 return rc;
1229
27291e21
DH
1230 if (guestdbg_enabled(vcpu)) {
1231 kvm_s390_backup_guest_per_regs(vcpu);
1232 kvm_s390_patch_guest_per_regs(vcpu);
1233 }
1234
b0c632db 1235 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1236 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1237 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1238 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1239
3fb4c40f
TH
1240 return 0;
1241}
1242
1243static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1244{
24eb3a82 1245 int rc = -1;
2b29a9fd
DD
1246
1247 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1248 vcpu->arch.sie_block->icptcode);
1249 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1250
27291e21
DH
1251 if (guestdbg_enabled(vcpu))
1252 kvm_s390_restore_guest_per_regs(vcpu);
1253
3fb4c40f 1254 if (exit_reason >= 0) {
7c470539 1255 rc = 0;
210b1607
TH
1256 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1257 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1258 vcpu->run->s390_ucontrol.trans_exc_code =
1259 current->thread.gmap_addr;
1260 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1261 rc = -EREMOTE;
24eb3a82
DD
1262
1263 } else if (current->thread.gmap_pfault) {
3c038e6b 1264 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1265 current->thread.gmap_pfault = 0;
fa576c58 1266 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1267 rc = 0;
fa576c58
TH
1268 } else {
1269 gpa_t gpa = current->thread.gmap_addr;
1270 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1271 }
24eb3a82
DD
1272 }
1273
1274 if (rc == -1) {
699bde3b
CB
1275 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1276 trace_kvm_s390_sie_fault(vcpu);
1277 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1278 }
b0c632db 1279
5a32c1af 1280 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1281
a76ccff6
TH
1282 if (rc == 0) {
1283 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1284 /* Don't exit for host interrupts. */
1285 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1286 else
1287 rc = kvm_handle_sie_intercept(vcpu);
1288 }
1289
3fb4c40f
TH
1290 return rc;
1291}
1292
1293static int __vcpu_run(struct kvm_vcpu *vcpu)
1294{
1295 int rc, exit_reason;
1296
800c1065
TH
1297 /*
1298 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1299 * ning the guest), so that memslots (and other stuff) are protected
1300 */
1301 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1302
a76ccff6
TH
1303 do {
1304 rc = vcpu_pre_run(vcpu);
1305 if (rc)
1306 break;
3fb4c40f 1307
800c1065 1308 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1309 /*
1310 * As PF_VCPU will be used in fault handler, between
1311 * guest_enter and guest_exit should be no uaccess.
1312 */
1313 preempt_disable();
1314 kvm_guest_enter();
1315 preempt_enable();
1316 exit_reason = sie64a(vcpu->arch.sie_block,
1317 vcpu->run->s.regs.gprs);
1318 kvm_guest_exit();
800c1065 1319 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1320
1321 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1322 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1323
800c1065 1324 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1325 return rc;
b0c632db
HC
1326}
1327
b028ee3e
DH
1328static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1329{
1330 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1331 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1332 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1333 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1334 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1335 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1336 /* some control register changes require a tlb flush */
1337 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1338 }
1339 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1340 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1341 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1342 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1343 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1344 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1345 }
1346 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1347 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1348 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1349 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1350 }
1351 kvm_run->kvm_dirty_regs = 0;
1352}
1353
1354static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1355{
1356 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1357 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1358 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1359 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1360 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1361 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1362 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1363 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1364 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1365 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1366 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1367 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1368}
1369
b0c632db
HC
1370int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1371{
8f2abe6a 1372 int rc;
b0c632db
HC
1373 sigset_t sigsaved;
1374
27291e21
DH
1375 if (guestdbg_exit_pending(vcpu)) {
1376 kvm_s390_prepare_debug_exit(vcpu);
1377 return 0;
1378 }
1379
b0c632db
HC
1380 if (vcpu->sigset_active)
1381 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1382
6352e4d2
DH
1383 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1384 kvm_s390_vcpu_start(vcpu);
1385 } else if (is_vcpu_stopped(vcpu)) {
1386 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1387 vcpu->vcpu_id);
1388 return -EINVAL;
1389 }
b0c632db 1390
b028ee3e 1391 sync_regs(vcpu, kvm_run);
d7b0b5eb 1392
dab4079d 1393 might_fault();
a76ccff6 1394 rc = __vcpu_run(vcpu);
9ace903d 1395
b1d16c49
CE
1396 if (signal_pending(current) && !rc) {
1397 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1398 rc = -EINTR;
b1d16c49 1399 }
8f2abe6a 1400
27291e21
DH
1401 if (guestdbg_exit_pending(vcpu) && !rc) {
1402 kvm_s390_prepare_debug_exit(vcpu);
1403 rc = 0;
1404 }
1405
b8e660b8 1406 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1407 /* intercept cannot be handled in-kernel, prepare kvm-run */
1408 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1409 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1410 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1411 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1412 rc = 0;
1413 }
1414
1415 if (rc == -EREMOTE) {
1416 /* intercept was handled, but userspace support is needed
1417 * kvm_run has been prepared by the handler */
1418 rc = 0;
1419 }
b0c632db 1420
b028ee3e 1421 store_regs(vcpu, kvm_run);
d7b0b5eb 1422
b0c632db
HC
1423 if (vcpu->sigset_active)
1424 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1425
b0c632db 1426 vcpu->stat.exit_userspace++;
7e8e6ab4 1427 return rc;
b0c632db
HC
1428}
1429
b0c632db
HC
1430/*
1431 * store status at address
1432 * we use have two special cases:
1433 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1434 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1435 */
d0bce605 1436int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1437{
092670cd 1438 unsigned char archmode = 1;
fda902cb 1439 unsigned int px;
178bd789 1440 u64 clkcomp;
d0bce605 1441 int rc;
b0c632db 1442
d0bce605
HC
1443 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1444 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1445 return -EFAULT;
d0bce605
HC
1446 gpa = SAVE_AREA_BASE;
1447 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1448 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1449 return -EFAULT;
d0bce605
HC
1450 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1451 }
1452 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1453 vcpu->arch.guest_fpregs.fprs, 128);
1454 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1455 vcpu->run->s.regs.gprs, 128);
1456 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1457 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1458 px = kvm_s390_get_prefix(vcpu);
d0bce605 1459 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1460 &px, 4);
d0bce605
HC
1461 rc |= write_guest_abs(vcpu,
1462 gpa + offsetof(struct save_area, fp_ctrl_reg),
1463 &vcpu->arch.guest_fpregs.fpc, 4);
1464 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1465 &vcpu->arch.sie_block->todpr, 4);
1466 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1467 &vcpu->arch.sie_block->cputm, 8);
178bd789 1468 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1469 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1470 &clkcomp, 8);
1471 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1472 &vcpu->run->s.regs.acrs, 64);
1473 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1474 &vcpu->arch.sie_block->gcr, 128);
1475 return rc ? -EFAULT : 0;
b0c632db
HC
1476}
1477
e879892c
TH
1478int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1479{
1480 /*
1481 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1482 * copying in vcpu load/put. Lets update our copies before we save
1483 * it into the save area
1484 */
1485 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1486 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1487 save_access_regs(vcpu->run->s.regs.acrs);
1488
1489 return kvm_s390_store_status_unloaded(vcpu, addr);
1490}
1491
8ad35755
DH
1492static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1493{
1494 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1495 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1496 exit_sie_sync(vcpu);
1497}
1498
1499static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1500{
1501 unsigned int i;
1502 struct kvm_vcpu *vcpu;
1503
1504 kvm_for_each_vcpu(i, vcpu, kvm) {
1505 __disable_ibs_on_vcpu(vcpu);
1506 }
1507}
1508
1509static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1510{
1511 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1512 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1513 exit_sie_sync(vcpu);
1514}
1515
6852d7b6
DH
1516void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1517{
8ad35755
DH
1518 int i, online_vcpus, started_vcpus = 0;
1519
1520 if (!is_vcpu_stopped(vcpu))
1521 return;
1522
6852d7b6 1523 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 1524 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1525 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1526 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1527
1528 for (i = 0; i < online_vcpus; i++) {
1529 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1530 started_vcpus++;
1531 }
1532
1533 if (started_vcpus == 0) {
1534 /* we're the only active VCPU -> speed it up */
1535 __enable_ibs_on_vcpu(vcpu);
1536 } else if (started_vcpus == 1) {
1537 /*
1538 * As we are starting a second VCPU, we have to disable
1539 * the IBS facility on all VCPUs to remove potentially
1540 * oustanding ENABLE requests.
1541 */
1542 __disable_ibs_on_all_vcpus(vcpu->kvm);
1543 }
1544
6852d7b6 1545 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1546 /*
1547 * Another VCPU might have used IBS while we were offline.
1548 * Let's play safe and flush the VCPU at startup.
1549 */
d3d692c8 1550 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 1551 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1552 return;
6852d7b6
DH
1553}
1554
1555void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1556{
8ad35755
DH
1557 int i, online_vcpus, started_vcpus = 0;
1558 struct kvm_vcpu *started_vcpu = NULL;
1559
1560 if (is_vcpu_stopped(vcpu))
1561 return;
1562
6852d7b6 1563 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 1564 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1565 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1566 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1567
32f5ff63 1568 /* Need to lock access to action_bits to avoid a SIGP race condition */
4ae3c081 1569 spin_lock(&vcpu->arch.local_int.lock);
6852d7b6 1570 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
32f5ff63
DH
1571
1572 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1573 vcpu->arch.local_int.action_bits &=
1574 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
4ae3c081 1575 spin_unlock(&vcpu->arch.local_int.lock);
32f5ff63 1576
8ad35755
DH
1577 __disable_ibs_on_vcpu(vcpu);
1578
1579 for (i = 0; i < online_vcpus; i++) {
1580 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1581 started_vcpus++;
1582 started_vcpu = vcpu->kvm->vcpus[i];
1583 }
1584 }
1585
1586 if (started_vcpus == 1) {
1587 /*
1588 * As we only have one VCPU left, we want to enable the
1589 * IBS facility for that VCPU to speed it up.
1590 */
1591 __enable_ibs_on_vcpu(started_vcpu);
1592 }
1593
433b9ee4 1594 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1595 return;
6852d7b6
DH
1596}
1597
d6712df9
CH
1598static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1599 struct kvm_enable_cap *cap)
1600{
1601 int r;
1602
1603 if (cap->flags)
1604 return -EINVAL;
1605
1606 switch (cap->cap) {
fa6b7fe9
CH
1607 case KVM_CAP_S390_CSS_SUPPORT:
1608 if (!vcpu->kvm->arch.css_support) {
1609 vcpu->kvm->arch.css_support = 1;
1610 trace_kvm_s390_enable_css(vcpu->kvm);
1611 }
1612 r = 0;
1613 break;
d6712df9
CH
1614 default:
1615 r = -EINVAL;
1616 break;
1617 }
1618 return r;
1619}
1620
b0c632db
HC
1621long kvm_arch_vcpu_ioctl(struct file *filp,
1622 unsigned int ioctl, unsigned long arg)
1623{
1624 struct kvm_vcpu *vcpu = filp->private_data;
1625 void __user *argp = (void __user *)arg;
800c1065 1626 int idx;
bc923cc9 1627 long r;
b0c632db 1628
93736624
AK
1629 switch (ioctl) {
1630 case KVM_S390_INTERRUPT: {
ba5c1e9b 1631 struct kvm_s390_interrupt s390int;
383d0b05 1632 struct kvm_s390_irq s390irq;
ba5c1e9b 1633
93736624 1634 r = -EFAULT;
ba5c1e9b 1635 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 1636 break;
383d0b05
JF
1637 if (s390int_to_s390irq(&s390int, &s390irq))
1638 return -EINVAL;
1639 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 1640 break;
ba5c1e9b 1641 }
b0c632db 1642 case KVM_S390_STORE_STATUS:
800c1065 1643 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1644 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1645 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1646 break;
b0c632db
HC
1647 case KVM_S390_SET_INITIAL_PSW: {
1648 psw_t psw;
1649
bc923cc9 1650 r = -EFAULT;
b0c632db 1651 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1652 break;
1653 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1654 break;
b0c632db
HC
1655 }
1656 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1657 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1658 break;
14eebd91
CO
1659 case KVM_SET_ONE_REG:
1660 case KVM_GET_ONE_REG: {
1661 struct kvm_one_reg reg;
1662 r = -EFAULT;
1663 if (copy_from_user(&reg, argp, sizeof(reg)))
1664 break;
1665 if (ioctl == KVM_SET_ONE_REG)
1666 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1667 else
1668 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1669 break;
1670 }
27e0393f
CO
1671#ifdef CONFIG_KVM_S390_UCONTROL
1672 case KVM_S390_UCAS_MAP: {
1673 struct kvm_s390_ucas_mapping ucasmap;
1674
1675 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1676 r = -EFAULT;
1677 break;
1678 }
1679
1680 if (!kvm_is_ucontrol(vcpu->kvm)) {
1681 r = -EINVAL;
1682 break;
1683 }
1684
1685 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1686 ucasmap.vcpu_addr, ucasmap.length);
1687 break;
1688 }
1689 case KVM_S390_UCAS_UNMAP: {
1690 struct kvm_s390_ucas_mapping ucasmap;
1691
1692 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1693 r = -EFAULT;
1694 break;
1695 }
1696
1697 if (!kvm_is_ucontrol(vcpu->kvm)) {
1698 r = -EINVAL;
1699 break;
1700 }
1701
1702 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1703 ucasmap.length);
1704 break;
1705 }
1706#endif
ccc7910f 1707 case KVM_S390_VCPU_FAULT: {
527e30b4 1708 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
1709 break;
1710 }
d6712df9
CH
1711 case KVM_ENABLE_CAP:
1712 {
1713 struct kvm_enable_cap cap;
1714 r = -EFAULT;
1715 if (copy_from_user(&cap, argp, sizeof(cap)))
1716 break;
1717 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1718 break;
1719 }
b0c632db 1720 default:
3e6afcf1 1721 r = -ENOTTY;
b0c632db 1722 }
bc923cc9 1723 return r;
b0c632db
HC
1724}
1725
5b1c1493
CO
1726int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1727{
1728#ifdef CONFIG_KVM_S390_UCONTROL
1729 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1730 && (kvm_is_ucontrol(vcpu->kvm))) {
1731 vmf->page = virt_to_page(vcpu->arch.sie_block);
1732 get_page(vmf->page);
1733 return 0;
1734 }
1735#endif
1736 return VM_FAULT_SIGBUS;
1737}
1738
5587027c
AK
1739int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1740 unsigned long npages)
db3fe4eb
TY
1741{
1742 return 0;
1743}
1744
b0c632db 1745/* Section: memory related */
f7784b8e
MT
1746int kvm_arch_prepare_memory_region(struct kvm *kvm,
1747 struct kvm_memory_slot *memslot,
7b6195a9
TY
1748 struct kvm_userspace_memory_region *mem,
1749 enum kvm_mr_change change)
b0c632db 1750{
dd2887e7
NW
1751 /* A few sanity checks. We can have memory slots which have to be
1752 located/ended at a segment boundary (1MB). The memory in userland is
1753 ok to be fragmented into various different vmas. It is okay to mmap()
1754 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1755
598841ca 1756 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1757 return -EINVAL;
1758
598841ca 1759 if (mem->memory_size & 0xffffful)
b0c632db
HC
1760 return -EINVAL;
1761
f7784b8e
MT
1762 return 0;
1763}
1764
1765void kvm_arch_commit_memory_region(struct kvm *kvm,
1766 struct kvm_userspace_memory_region *mem,
8482644a
TY
1767 const struct kvm_memory_slot *old,
1768 enum kvm_mr_change change)
f7784b8e 1769{
f7850c92 1770 int rc;
f7784b8e 1771
2cef4deb
CB
1772 /* If the basics of the memslot do not change, we do not want
1773 * to update the gmap. Every update causes several unnecessary
1774 * segment translation exceptions. This is usually handled just
1775 * fine by the normal fault handler + gmap, but it will also
1776 * cause faults on the prefix page of running guest CPUs.
1777 */
1778 if (old->userspace_addr == mem->userspace_addr &&
1779 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1780 old->npages * PAGE_SIZE == mem->memory_size)
1781 return;
598841ca
CO
1782
1783 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1784 mem->guest_phys_addr, mem->memory_size);
1785 if (rc)
f7850c92 1786 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1787 return;
b0c632db
HC
1788}
1789
b0c632db
HC
1790static int __init kvm_s390_init(void)
1791{
ef50f7ac 1792 int ret;
0ee75bea 1793 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1794 if (ret)
1795 return ret;
1796
1797 /*
1798 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1799 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1800 * only set facilities that are known to work in KVM.
1801 */
78c4b59f
MM
1802 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1803 if (!vfacilities) {
ef50f7ac
CB
1804 kvm_exit();
1805 return -ENOMEM;
1806 }
78c4b59f 1807 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
7be81a46 1808 vfacilities[0] &= 0xff82fffbf47c2000UL;
7feb6bb8 1809 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1810 return 0;
b0c632db
HC
1811}
1812
1813static void __exit kvm_s390_exit(void)
1814{
78c4b59f 1815 free_page((unsigned long) vfacilities);
b0c632db
HC
1816 kvm_exit();
1817}
1818
1819module_init(kvm_s390_init);
1820module_exit(kvm_s390_exit);
566af940
CH
1821
1822/*
1823 * Enable autoloading of the kvm module.
1824 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1825 * since x86 takes a different approach.
1826 */
1827#include <linux/miscdevice.h>
1828MODULE_ALIAS_MISCDEV(KVM_MINOR);
1829MODULE_ALIAS("devname:kvm");