KVM: x86: Break kvm_for_each_vcpu loop after finding the VP_INDEX
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
78c4b59f 31#include <asm/facility.h>
1526bf9c 32#include <asm/sclp.h>
8f2abe6a 33#include "kvm-s390.h"
b0c632db
HC
34#include "gaccess.h"
35
5786fffa
CH
36#define CREATE_TRACE_POINTS
37#include "trace.h"
ade38c31 38#include "trace-s390.h"
5786fffa 39
b0c632db
HC
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 44 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
45 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
49 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
53 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 55 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
64 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 73 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 82 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 83 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 84 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
85 { NULL }
86};
87
78c4b59f 88unsigned long *vfacilities;
2c70fe44 89static struct gmap_notifier gmap_notifier;
b0c632db 90
78c4b59f
MM
91/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
b0c632db 97/* Section: not file related */
10474ae8 98int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
99{
100 /* every s390 is virtualization enabled ;-) */
10474ae8 101 return 0;
b0c632db
HC
102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
2c70fe44
CB
108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
b0c632db
HC
110int kvm_arch_hardware_setup(void)
111{
2c70fe44
CB
112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
2c70fe44 119 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
d7b0b5eb
CO
146 int r;
147
2bd0ac4e 148 switch (ext) {
d7b0b5eb 149 case KVM_CAP_S390_PSW:
b6cf8788 150 case KVM_CAP_S390_GMAP:
52e16b18 151 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
3c038e6b 155 case KVM_CAP_ASYNC_PF:
60b413c9 156 case KVM_CAP_SYNC_REGS:
14eebd91 157 case KVM_CAP_ONE_REG:
d6712df9 158 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 159 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 160 case KVM_CAP_IOEVENTFD:
c05c4186 161 case KVM_CAP_DEVICE_CTRL:
d7b0b5eb
CO
162 r = 1;
163 break;
e726b1bd
CB
164 case KVM_CAP_NR_VCPUS:
165 case KVM_CAP_MAX_VCPUS:
166 r = KVM_MAX_VCPUS;
167 break;
e1e2e605
NW
168 case KVM_CAP_NR_MEMSLOTS:
169 r = KVM_USER_MEM_SLOTS;
170 break;
1526bf9c 171 case KVM_CAP_S390_COW:
abf09bed 172 r = MACHINE_HAS_ESOP;
1526bf9c 173 break;
2bd0ac4e 174 default:
d7b0b5eb 175 r = 0;
2bd0ac4e 176 }
d7b0b5eb 177 return r;
b0c632db
HC
178}
179
180/* Section: vm related */
181/*
182 * Get (and clear) the dirty memory log for a memory slot.
183 */
184int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
185 struct kvm_dirty_log *log)
186{
187 return 0;
188}
189
190long kvm_arch_vm_ioctl(struct file *filp,
191 unsigned int ioctl, unsigned long arg)
192{
193 struct kvm *kvm = filp->private_data;
194 void __user *argp = (void __user *)arg;
195 int r;
196
197 switch (ioctl) {
ba5c1e9b
CO
198 case KVM_S390_INTERRUPT: {
199 struct kvm_s390_interrupt s390int;
200
201 r = -EFAULT;
202 if (copy_from_user(&s390int, argp, sizeof(s390int)))
203 break;
204 r = kvm_s390_inject_vm(kvm, &s390int);
205 break;
206 }
b0c632db 207 default:
367e1319 208 r = -ENOTTY;
b0c632db
HC
209 }
210
211 return r;
212}
213
e08b9637 214int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 215{
b0c632db
HC
216 int rc;
217 char debug_name[16];
218
e08b9637
CO
219 rc = -EINVAL;
220#ifdef CONFIG_KVM_S390_UCONTROL
221 if (type & ~KVM_VM_S390_UCONTROL)
222 goto out_err;
223 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
224 goto out_err;
225#else
226 if (type)
227 goto out_err;
228#endif
229
b0c632db
HC
230 rc = s390_enable_sie();
231 if (rc)
d89f5eff 232 goto out_err;
b0c632db 233
b290411a
CO
234 rc = -ENOMEM;
235
b0c632db
HC
236 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
237 if (!kvm->arch.sca)
d89f5eff 238 goto out_err;
b0c632db
HC
239
240 sprintf(debug_name, "kvm-%u", current->pid);
241
242 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
243 if (!kvm->arch.dbf)
244 goto out_nodbf;
245
ba5c1e9b
CO
246 spin_lock_init(&kvm->arch.float_int.lock);
247 INIT_LIST_HEAD(&kvm->arch.float_int.list);
248
b0c632db
HC
249 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
250 VM_EVENT(kvm, 3, "%s", "vm created");
251
e08b9637
CO
252 if (type & KVM_VM_S390_UCONTROL) {
253 kvm->arch.gmap = NULL;
254 } else {
255 kvm->arch.gmap = gmap_alloc(current->mm);
256 if (!kvm->arch.gmap)
257 goto out_nogmap;
2c70fe44 258 kvm->arch.gmap->private = kvm;
24eb3a82 259 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 260 }
fa6b7fe9
CH
261
262 kvm->arch.css_support = 0;
263
d89f5eff 264 return 0;
598841ca
CO
265out_nogmap:
266 debug_unregister(kvm->arch.dbf);
b0c632db
HC
267out_nodbf:
268 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
269out_err:
270 return rc;
b0c632db
HC
271}
272
d329c035
CB
273void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
274{
275 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 276 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3c038e6b 277 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
278 if (!kvm_is_ucontrol(vcpu->kvm)) {
279 clear_bit(63 - vcpu->vcpu_id,
280 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
281 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
282 (__u64) vcpu->arch.sie_block)
283 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
284 }
abf4a71e 285 smp_mb();
27e0393f
CO
286
287 if (kvm_is_ucontrol(vcpu->kvm))
288 gmap_free(vcpu->arch.gmap);
289
d329c035 290 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 291 kvm_vcpu_uninit(vcpu);
b110feaf 292 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
293}
294
295static void kvm_free_vcpus(struct kvm *kvm)
296{
297 unsigned int i;
988a2cae 298 struct kvm_vcpu *vcpu;
d329c035 299
988a2cae
GN
300 kvm_for_each_vcpu(i, vcpu, kvm)
301 kvm_arch_vcpu_destroy(vcpu);
302
303 mutex_lock(&kvm->lock);
304 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
305 kvm->vcpus[i] = NULL;
306
307 atomic_set(&kvm->online_vcpus, 0);
308 mutex_unlock(&kvm->lock);
d329c035
CB
309}
310
ad8ba2cd
SY
311void kvm_arch_sync_events(struct kvm *kvm)
312{
313}
314
b0c632db
HC
315void kvm_arch_destroy_vm(struct kvm *kvm)
316{
d329c035 317 kvm_free_vcpus(kvm);
b0c632db 318 free_page((unsigned long)(kvm->arch.sca));
d329c035 319 debug_unregister(kvm->arch.dbf);
27e0393f
CO
320 if (!kvm_is_ucontrol(kvm))
321 gmap_free(kvm->arch.gmap);
b0c632db
HC
322}
323
324/* Section: vcpu related */
325int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
326{
3c038e6b
DD
327 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
328 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
329 if (kvm_is_ucontrol(vcpu->kvm)) {
330 vcpu->arch.gmap = gmap_alloc(current->mm);
331 if (!vcpu->arch.gmap)
332 return -ENOMEM;
2c70fe44 333 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
334 return 0;
335 }
336
598841ca 337 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
338 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
339 KVM_SYNC_GPRS |
9eed0735
CB
340 KVM_SYNC_ACRS |
341 KVM_SYNC_CRS;
b0c632db
HC
342 return 0;
343}
344
345void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
346{
6692cef3 347 /* Nothing todo */
b0c632db
HC
348}
349
350void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
351{
4725c860
MS
352 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
353 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 354 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
355 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
356 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 357 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 358 gmap_enable(vcpu->arch.gmap);
9e6dabef 359 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
360}
361
362void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
363{
9e6dabef 364 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 365 gmap_disable(vcpu->arch.gmap);
4725c860
MS
366 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
367 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 368 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
369 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
370 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
371 restore_access_regs(vcpu->arch.host_acrs);
372}
373
374static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
375{
376 /* this equals initial cpu reset in pop, but we don't switch to ESA */
377 vcpu->arch.sie_block->gpsw.mask = 0UL;
378 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 379 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
380 vcpu->arch.sie_block->cputm = 0UL;
381 vcpu->arch.sie_block->ckc = 0UL;
382 vcpu->arch.sie_block->todpr = 0;
383 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
384 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
385 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
386 vcpu->arch.guest_fpregs.fpc = 0;
387 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
388 vcpu->arch.sie_block->gbea = 1;
3c038e6b
DD
389 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
390 kvm_clear_async_pf_completion_queue(vcpu);
61bde82c 391 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
392}
393
42897d86
MT
394int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
395{
396 return 0;
397}
398
b0c632db
HC
399int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
400{
9e6dabef
CH
401 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
402 CPUSTAT_SM |
69d0d3a3
CB
403 CPUSTAT_STOPPED |
404 CPUSTAT_GED);
fc34531d 405 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
406 if (test_vfacility(50) && test_vfacility(73))
407 vcpu->arch.sie_block->ecb |= 0x10;
408
69d0d3a3 409 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 410 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 411 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
ca872302
CB
412 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
413 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
414 (unsigned long) vcpu);
415 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 416 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 417 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
418 return 0;
419}
420
421struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
422 unsigned int id)
423{
4d47555a 424 struct kvm_vcpu *vcpu;
7feb6bb8 425 struct sie_page *sie_page;
4d47555a
CO
426 int rc = -EINVAL;
427
428 if (id >= KVM_MAX_VCPUS)
429 goto out;
430
431 rc = -ENOMEM;
b0c632db 432
b110feaf 433 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 434 if (!vcpu)
4d47555a 435 goto out;
b0c632db 436
7feb6bb8
MM
437 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
438 if (!sie_page)
b0c632db
HC
439 goto out_free_cpu;
440
7feb6bb8
MM
441 vcpu->arch.sie_block = &sie_page->sie_block;
442 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
443
b0c632db 444 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
445 if (!kvm_is_ucontrol(kvm)) {
446 if (!kvm->arch.sca) {
447 WARN_ON_ONCE(1);
448 goto out_free_cpu;
449 }
450 if (!kvm->arch.sca->cpu[id].sda)
451 kvm->arch.sca->cpu[id].sda =
452 (__u64) vcpu->arch.sie_block;
453 vcpu->arch.sie_block->scaoh =
454 (__u32)(((__u64)kvm->arch.sca) >> 32);
455 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
456 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
457 }
b0c632db 458
ba5c1e9b
CO
459 spin_lock_init(&vcpu->arch.local_int.lock);
460 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
461 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 462 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 463 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 464 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 465 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 466 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 467
b0c632db
HC
468 rc = kvm_vcpu_init(vcpu, kvm, id);
469 if (rc)
7b06bf2f 470 goto out_free_sie_block;
b0c632db
HC
471 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
472 vcpu->arch.sie_block);
ade38c31 473 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 474
b0c632db 475 return vcpu;
7b06bf2f
WY
476out_free_sie_block:
477 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 478out_free_cpu:
b110feaf 479 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 480out:
b0c632db
HC
481 return ERR_PTR(rc);
482}
483
b0c632db
HC
484int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
485{
f87618e8 486 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
487}
488
49b99e1e
CB
489void s390_vcpu_block(struct kvm_vcpu *vcpu)
490{
491 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
492}
493
494void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
495{
496 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
497}
498
499/*
500 * Kick a guest cpu out of SIE and wait until SIE is not running.
501 * If the CPU is not running (e.g. waiting as idle) the function will
502 * return immediately. */
503void exit_sie(struct kvm_vcpu *vcpu)
504{
505 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
506 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
507 cpu_relax();
508}
509
510/* Kick a guest cpu out of SIE and prevent SIE-reentry */
511void exit_sie_sync(struct kvm_vcpu *vcpu)
512{
513 s390_vcpu_block(vcpu);
514 exit_sie(vcpu);
515}
516
2c70fe44
CB
517static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
518{
519 int i;
520 struct kvm *kvm = gmap->private;
521 struct kvm_vcpu *vcpu;
522
523 kvm_for_each_vcpu(i, vcpu, kvm) {
524 /* match against both prefix pages */
525 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
526 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
527 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
528 exit_sie_sync(vcpu);
529 }
530 }
531}
532
b6d33834
CD
533int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
534{
535 /* kvm common code refers to this, but never calls it */
536 BUG();
537 return 0;
538}
539
14eebd91
CO
540static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
541 struct kvm_one_reg *reg)
542{
543 int r = -EINVAL;
544
545 switch (reg->id) {
29b7c71b
CO
546 case KVM_REG_S390_TODPR:
547 r = put_user(vcpu->arch.sie_block->todpr,
548 (u32 __user *)reg->addr);
549 break;
550 case KVM_REG_S390_EPOCHDIFF:
551 r = put_user(vcpu->arch.sie_block->epoch,
552 (u64 __user *)reg->addr);
553 break;
46a6dd1c
J
554 case KVM_REG_S390_CPU_TIMER:
555 r = put_user(vcpu->arch.sie_block->cputm,
556 (u64 __user *)reg->addr);
557 break;
558 case KVM_REG_S390_CLOCK_COMP:
559 r = put_user(vcpu->arch.sie_block->ckc,
560 (u64 __user *)reg->addr);
561 break;
536336c2
DD
562 case KVM_REG_S390_PFTOKEN:
563 r = put_user(vcpu->arch.pfault_token,
564 (u64 __user *)reg->addr);
565 break;
566 case KVM_REG_S390_PFCOMPARE:
567 r = put_user(vcpu->arch.pfault_compare,
568 (u64 __user *)reg->addr);
569 break;
570 case KVM_REG_S390_PFSELECT:
571 r = put_user(vcpu->arch.pfault_select,
572 (u64 __user *)reg->addr);
573 break;
14eebd91
CO
574 default:
575 break;
576 }
577
578 return r;
579}
580
581static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
582 struct kvm_one_reg *reg)
583{
584 int r = -EINVAL;
585
586 switch (reg->id) {
29b7c71b
CO
587 case KVM_REG_S390_TODPR:
588 r = get_user(vcpu->arch.sie_block->todpr,
589 (u32 __user *)reg->addr);
590 break;
591 case KVM_REG_S390_EPOCHDIFF:
592 r = get_user(vcpu->arch.sie_block->epoch,
593 (u64 __user *)reg->addr);
594 break;
46a6dd1c
J
595 case KVM_REG_S390_CPU_TIMER:
596 r = get_user(vcpu->arch.sie_block->cputm,
597 (u64 __user *)reg->addr);
598 break;
599 case KVM_REG_S390_CLOCK_COMP:
600 r = get_user(vcpu->arch.sie_block->ckc,
601 (u64 __user *)reg->addr);
602 break;
536336c2
DD
603 case KVM_REG_S390_PFTOKEN:
604 r = get_user(vcpu->arch.pfault_token,
605 (u64 __user *)reg->addr);
606 break;
607 case KVM_REG_S390_PFCOMPARE:
608 r = get_user(vcpu->arch.pfault_compare,
609 (u64 __user *)reg->addr);
610 break;
611 case KVM_REG_S390_PFSELECT:
612 r = get_user(vcpu->arch.pfault_select,
613 (u64 __user *)reg->addr);
614 break;
14eebd91
CO
615 default:
616 break;
617 }
618
619 return r;
620}
b6d33834 621
b0c632db
HC
622static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
623{
b0c632db 624 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
625 return 0;
626}
627
628int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
629{
5a32c1af 630 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
631 return 0;
632}
633
634int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
635{
5a32c1af 636 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
637 return 0;
638}
639
640int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
641 struct kvm_sregs *sregs)
642{
59674c1a 643 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 644 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 645 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
646 return 0;
647}
648
649int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
650 struct kvm_sregs *sregs)
651{
59674c1a 652 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 653 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
654 return 0;
655}
656
657int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
658{
4725c860
MS
659 if (test_fp_ctl(fpu->fpc))
660 return -EINVAL;
b0c632db 661 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
662 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
663 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
664 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
665 return 0;
666}
667
668int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
669{
b0c632db
HC
670 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
671 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
672 return 0;
673}
674
675static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
676{
677 int rc = 0;
678
9e6dabef 679 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 680 rc = -EBUSY;
d7b0b5eb
CO
681 else {
682 vcpu->run->psw_mask = psw.mask;
683 vcpu->run->psw_addr = psw.addr;
684 }
b0c632db
HC
685 return rc;
686}
687
688int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
689 struct kvm_translation *tr)
690{
691 return -EINVAL; /* not implemented yet */
692}
693
d0bfb940
JK
694int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
695 struct kvm_guest_debug *dbg)
b0c632db
HC
696{
697 return -EINVAL; /* not implemented yet */
698}
699
62d9f0db
MT
700int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
701 struct kvm_mp_state *mp_state)
702{
703 return -EINVAL; /* not implemented yet */
704}
705
706int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
707 struct kvm_mp_state *mp_state)
708{
709 return -EINVAL; /* not implemented yet */
710}
711
2c70fe44
CB
712static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
713{
714 /*
715 * We use MMU_RELOAD just to re-arm the ipte notifier for the
716 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
717 * This ensures that the ipte instruction for this request has
718 * already finished. We might race against a second unmapper that
719 * wants to set the blocking bit. Lets just retry the request loop.
720 */
721 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
722 int rc;
723 rc = gmap_ipte_notify(vcpu->arch.gmap,
724 vcpu->arch.sie_block->prefix,
725 PAGE_SIZE * 2);
726 if (rc)
727 return rc;
728 s390_vcpu_unblock(vcpu);
729 }
730 return 0;
731}
732
24eb3a82
DD
733static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
734{
735 long rc;
736 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
737 struct mm_struct *mm = current->mm;
738 down_read(&mm->mmap_sem);
739 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
740 up_read(&mm->mmap_sem);
741 return rc;
742}
743
3c038e6b
DD
744static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
745 unsigned long token)
746{
747 struct kvm_s390_interrupt inti;
748 inti.parm64 = token;
749
750 if (start_token) {
751 inti.type = KVM_S390_INT_PFAULT_INIT;
752 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
753 } else {
754 inti.type = KVM_S390_INT_PFAULT_DONE;
755 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
756 }
757}
758
759void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
760 struct kvm_async_pf *work)
761{
762 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
763 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
764}
765
766void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
767 struct kvm_async_pf *work)
768{
769 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
770 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
771}
772
773void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
774 struct kvm_async_pf *work)
775{
776 /* s390 will always inject the page directly */
777}
778
779bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
780{
781 /*
782 * s390 will always inject the page directly,
783 * but we still want check_async_completion to cleanup
784 */
785 return true;
786}
787
788static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
789{
790 hva_t hva;
791 struct kvm_arch_async_pf arch;
792 int rc;
793
794 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
795 return 0;
796 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
797 vcpu->arch.pfault_compare)
798 return 0;
799 if (psw_extint_disabled(vcpu))
800 return 0;
801 if (kvm_cpu_has_interrupt(vcpu))
802 return 0;
803 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
804 return 0;
805 if (!vcpu->arch.gmap->pfault_enabled)
806 return 0;
807
808 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
809 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
810 return 0;
811
812 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
813 return rc;
814}
815
3fb4c40f 816static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 817{
3fb4c40f 818 int rc, cpuflags;
e168bf8d 819
3c038e6b
DD
820 /*
821 * On s390 notifications for arriving pages will be delivered directly
822 * to the guest but the house keeping for completed pfaults is
823 * handled outside the worker.
824 */
825 kvm_check_async_pf_completion(vcpu);
826
5a32c1af 827 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
828
829 if (need_resched())
830 schedule();
831
71cde587
CB
832 if (test_thread_flag(TIF_MCCK_PENDING))
833 s390_handle_mcck();
834
d6b6d166
CO
835 if (!kvm_is_ucontrol(vcpu->kvm))
836 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 837
2c70fe44
CB
838 rc = kvm_s390_handle_requests(vcpu);
839 if (rc)
840 return rc;
841
b0c632db 842 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
843 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
844 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
845 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 846
3fb4c40f
TH
847 return 0;
848}
849
850static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
851{
24eb3a82 852 int rc = -1;
2b29a9fd
DD
853
854 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
855 vcpu->arch.sie_block->icptcode);
856 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
857
3fb4c40f 858 if (exit_reason >= 0) {
7c470539 859 rc = 0;
210b1607
TH
860 } else if (kvm_is_ucontrol(vcpu->kvm)) {
861 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
862 vcpu->run->s390_ucontrol.trans_exc_code =
863 current->thread.gmap_addr;
864 vcpu->run->s390_ucontrol.pgm_code = 0x10;
865 rc = -EREMOTE;
24eb3a82
DD
866
867 } else if (current->thread.gmap_pfault) {
3c038e6b 868 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 869 current->thread.gmap_pfault = 0;
3c038e6b
DD
870 if (kvm_arch_setup_async_pf(vcpu) ||
871 (kvm_arch_fault_in_sync(vcpu) >= 0))
24eb3a82
DD
872 rc = 0;
873 }
874
875 if (rc == -1) {
699bde3b
CB
876 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
877 trace_kvm_s390_sie_fault(vcpu);
878 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 879 }
b0c632db 880
5a32c1af 881 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 882
a76ccff6
TH
883 if (rc == 0) {
884 if (kvm_is_ucontrol(vcpu->kvm))
885 rc = -EOPNOTSUPP;
886 else
887 rc = kvm_handle_sie_intercept(vcpu);
888 }
889
3fb4c40f
TH
890 return rc;
891}
892
893static int __vcpu_run(struct kvm_vcpu *vcpu)
894{
895 int rc, exit_reason;
896
800c1065
TH
897 /*
898 * We try to hold kvm->srcu during most of vcpu_run (except when run-
899 * ning the guest), so that memslots (and other stuff) are protected
900 */
901 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
902
a76ccff6
TH
903 do {
904 rc = vcpu_pre_run(vcpu);
905 if (rc)
906 break;
3fb4c40f 907
800c1065 908 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
909 /*
910 * As PF_VCPU will be used in fault handler, between
911 * guest_enter and guest_exit should be no uaccess.
912 */
913 preempt_disable();
914 kvm_guest_enter();
915 preempt_enable();
916 exit_reason = sie64a(vcpu->arch.sie_block,
917 vcpu->run->s.regs.gprs);
918 kvm_guest_exit();
800c1065 919 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
920
921 rc = vcpu_post_run(vcpu, exit_reason);
922 } while (!signal_pending(current) && !rc);
3fb4c40f 923
800c1065 924 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 925 return rc;
b0c632db
HC
926}
927
928int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
929{
8f2abe6a 930 int rc;
b0c632db
HC
931 sigset_t sigsaved;
932
b0c632db
HC
933 if (vcpu->sigset_active)
934 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
935
9e6dabef 936 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 937
ba5c1e9b
CO
938 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
939
8f2abe6a
CB
940 switch (kvm_run->exit_reason) {
941 case KVM_EXIT_S390_SIEIC:
8f2abe6a 942 case KVM_EXIT_UNKNOWN:
9ace903d 943 case KVM_EXIT_INTR:
8f2abe6a 944 case KVM_EXIT_S390_RESET:
e168bf8d 945 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 946 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
947 break;
948 default:
949 BUG();
950 }
951
d7b0b5eb
CO
952 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
953 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
954 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
955 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
956 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
957 }
9eed0735
CB
958 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
959 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
960 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
961 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
962 }
d7b0b5eb 963
dab4079d 964 might_fault();
a76ccff6 965 rc = __vcpu_run(vcpu);
9ace903d 966
b1d16c49
CE
967 if (signal_pending(current) && !rc) {
968 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 969 rc = -EINTR;
b1d16c49 970 }
8f2abe6a 971
b8e660b8 972 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
973 /* intercept cannot be handled in-kernel, prepare kvm-run */
974 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
975 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
976 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
977 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
978 rc = 0;
979 }
980
981 if (rc == -EREMOTE) {
982 /* intercept was handled, but userspace support is needed
983 * kvm_run has been prepared by the handler */
984 rc = 0;
985 }
b0c632db 986
d7b0b5eb
CO
987 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
988 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 989 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 990 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 991
b0c632db
HC
992 if (vcpu->sigset_active)
993 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
994
b0c632db 995 vcpu->stat.exit_userspace++;
7e8e6ab4 996 return rc;
b0c632db
HC
997}
998
092670cd 999static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
1000 unsigned long n, int prefix)
1001{
1002 if (prefix)
1003 return copy_to_guest(vcpu, guestdest, from, n);
1004 else
1005 return copy_to_guest_absolute(vcpu, guestdest, from, n);
1006}
1007
1008/*
1009 * store status at address
1010 * we use have two special cases:
1011 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1012 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1013 */
e879892c 1014int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 1015{
092670cd 1016 unsigned char archmode = 1;
b0c632db 1017 int prefix;
178bd789 1018 u64 clkcomp;
b0c632db
HC
1019
1020 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
1021 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1022 return -EFAULT;
1023 addr = SAVE_AREA_BASE;
1024 prefix = 0;
1025 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1026 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1027 return -EFAULT;
1028 addr = SAVE_AREA_BASE;
1029 prefix = 1;
1030 } else
1031 prefix = 0;
1032
f64ca217 1033 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
1034 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1035 return -EFAULT;
1036
f64ca217 1037 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 1038 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
1039 return -EFAULT;
1040
f64ca217 1041 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
1042 &vcpu->arch.sie_block->gpsw, 16, prefix))
1043 return -EFAULT;
1044
f64ca217 1045 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
1046 &vcpu->arch.sie_block->prefix, 4, prefix))
1047 return -EFAULT;
1048
1049 if (__guestcopy(vcpu,
f64ca217 1050 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
1051 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1052 return -EFAULT;
1053
f64ca217 1054 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
1055 &vcpu->arch.sie_block->todpr, 4, prefix))
1056 return -EFAULT;
1057
f64ca217 1058 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
1059 &vcpu->arch.sie_block->cputm, 8, prefix))
1060 return -EFAULT;
1061
178bd789 1062 clkcomp = vcpu->arch.sie_block->ckc >> 8;
f64ca217 1063 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
178bd789 1064 &clkcomp, 8, prefix))
b0c632db
HC
1065 return -EFAULT;
1066
f64ca217 1067 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 1068 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
1069 return -EFAULT;
1070
1071 if (__guestcopy(vcpu,
f64ca217 1072 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
1073 &vcpu->arch.sie_block->gcr, 128, prefix))
1074 return -EFAULT;
1075 return 0;
1076}
1077
e879892c
TH
1078int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1079{
1080 /*
1081 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1082 * copying in vcpu load/put. Lets update our copies before we save
1083 * it into the save area
1084 */
1085 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1086 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1087 save_access_regs(vcpu->run->s.regs.acrs);
1088
1089 return kvm_s390_store_status_unloaded(vcpu, addr);
1090}
1091
d6712df9
CH
1092static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1093 struct kvm_enable_cap *cap)
1094{
1095 int r;
1096
1097 if (cap->flags)
1098 return -EINVAL;
1099
1100 switch (cap->cap) {
fa6b7fe9
CH
1101 case KVM_CAP_S390_CSS_SUPPORT:
1102 if (!vcpu->kvm->arch.css_support) {
1103 vcpu->kvm->arch.css_support = 1;
1104 trace_kvm_s390_enable_css(vcpu->kvm);
1105 }
1106 r = 0;
1107 break;
d6712df9
CH
1108 default:
1109 r = -EINVAL;
1110 break;
1111 }
1112 return r;
1113}
1114
b0c632db
HC
1115long kvm_arch_vcpu_ioctl(struct file *filp,
1116 unsigned int ioctl, unsigned long arg)
1117{
1118 struct kvm_vcpu *vcpu = filp->private_data;
1119 void __user *argp = (void __user *)arg;
800c1065 1120 int idx;
bc923cc9 1121 long r;
b0c632db 1122
93736624
AK
1123 switch (ioctl) {
1124 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1125 struct kvm_s390_interrupt s390int;
1126
93736624 1127 r = -EFAULT;
ba5c1e9b 1128 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1129 break;
1130 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1131 break;
ba5c1e9b 1132 }
b0c632db 1133 case KVM_S390_STORE_STATUS:
800c1065 1134 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1135 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1136 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1137 break;
b0c632db
HC
1138 case KVM_S390_SET_INITIAL_PSW: {
1139 psw_t psw;
1140
bc923cc9 1141 r = -EFAULT;
b0c632db 1142 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1143 break;
1144 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1145 break;
b0c632db
HC
1146 }
1147 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1148 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1149 break;
14eebd91
CO
1150 case KVM_SET_ONE_REG:
1151 case KVM_GET_ONE_REG: {
1152 struct kvm_one_reg reg;
1153 r = -EFAULT;
1154 if (copy_from_user(&reg, argp, sizeof(reg)))
1155 break;
1156 if (ioctl == KVM_SET_ONE_REG)
1157 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1158 else
1159 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1160 break;
1161 }
27e0393f
CO
1162#ifdef CONFIG_KVM_S390_UCONTROL
1163 case KVM_S390_UCAS_MAP: {
1164 struct kvm_s390_ucas_mapping ucasmap;
1165
1166 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1167 r = -EFAULT;
1168 break;
1169 }
1170
1171 if (!kvm_is_ucontrol(vcpu->kvm)) {
1172 r = -EINVAL;
1173 break;
1174 }
1175
1176 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1177 ucasmap.vcpu_addr, ucasmap.length);
1178 break;
1179 }
1180 case KVM_S390_UCAS_UNMAP: {
1181 struct kvm_s390_ucas_mapping ucasmap;
1182
1183 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1184 r = -EFAULT;
1185 break;
1186 }
1187
1188 if (!kvm_is_ucontrol(vcpu->kvm)) {
1189 r = -EINVAL;
1190 break;
1191 }
1192
1193 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1194 ucasmap.length);
1195 break;
1196 }
1197#endif
ccc7910f
CO
1198 case KVM_S390_VCPU_FAULT: {
1199 r = gmap_fault(arg, vcpu->arch.gmap);
1200 if (!IS_ERR_VALUE(r))
1201 r = 0;
1202 break;
1203 }
d6712df9
CH
1204 case KVM_ENABLE_CAP:
1205 {
1206 struct kvm_enable_cap cap;
1207 r = -EFAULT;
1208 if (copy_from_user(&cap, argp, sizeof(cap)))
1209 break;
1210 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1211 break;
1212 }
b0c632db 1213 default:
3e6afcf1 1214 r = -ENOTTY;
b0c632db 1215 }
bc923cc9 1216 return r;
b0c632db
HC
1217}
1218
5b1c1493
CO
1219int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1220{
1221#ifdef CONFIG_KVM_S390_UCONTROL
1222 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1223 && (kvm_is_ucontrol(vcpu->kvm))) {
1224 vmf->page = virt_to_page(vcpu->arch.sie_block);
1225 get_page(vmf->page);
1226 return 0;
1227 }
1228#endif
1229 return VM_FAULT_SIGBUS;
1230}
1231
5587027c 1232void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1233 struct kvm_memory_slot *dont)
1234{
1235}
1236
5587027c
AK
1237int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1238 unsigned long npages)
db3fe4eb
TY
1239{
1240 return 0;
1241}
1242
e59dbe09
TY
1243void kvm_arch_memslots_updated(struct kvm *kvm)
1244{
1245}
1246
b0c632db 1247/* Section: memory related */
f7784b8e
MT
1248int kvm_arch_prepare_memory_region(struct kvm *kvm,
1249 struct kvm_memory_slot *memslot,
7b6195a9
TY
1250 struct kvm_userspace_memory_region *mem,
1251 enum kvm_mr_change change)
b0c632db 1252{
dd2887e7
NW
1253 /* A few sanity checks. We can have memory slots which have to be
1254 located/ended at a segment boundary (1MB). The memory in userland is
1255 ok to be fragmented into various different vmas. It is okay to mmap()
1256 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1257
598841ca 1258 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1259 return -EINVAL;
1260
598841ca 1261 if (mem->memory_size & 0xffffful)
b0c632db
HC
1262 return -EINVAL;
1263
f7784b8e
MT
1264 return 0;
1265}
1266
1267void kvm_arch_commit_memory_region(struct kvm *kvm,
1268 struct kvm_userspace_memory_region *mem,
8482644a
TY
1269 const struct kvm_memory_slot *old,
1270 enum kvm_mr_change change)
f7784b8e 1271{
f7850c92 1272 int rc;
f7784b8e 1273
2cef4deb
CB
1274 /* If the basics of the memslot do not change, we do not want
1275 * to update the gmap. Every update causes several unnecessary
1276 * segment translation exceptions. This is usually handled just
1277 * fine by the normal fault handler + gmap, but it will also
1278 * cause faults on the prefix page of running guest CPUs.
1279 */
1280 if (old->userspace_addr == mem->userspace_addr &&
1281 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1282 old->npages * PAGE_SIZE == mem->memory_size)
1283 return;
598841ca
CO
1284
1285 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1286 mem->guest_phys_addr, mem->memory_size);
1287 if (rc)
f7850c92 1288 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1289 return;
b0c632db
HC
1290}
1291
2df72e9b
MT
1292void kvm_arch_flush_shadow_all(struct kvm *kvm)
1293{
1294}
1295
1296void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1297 struct kvm_memory_slot *slot)
34d4cb8f
MT
1298{
1299}
1300
b0c632db
HC
1301static int __init kvm_s390_init(void)
1302{
ef50f7ac 1303 int ret;
0ee75bea 1304 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1305 if (ret)
1306 return ret;
1307
1308 /*
1309 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1310 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1311 * only set facilities that are known to work in KVM.
1312 */
78c4b59f
MM
1313 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1314 if (!vfacilities) {
ef50f7ac
CB
1315 kvm_exit();
1316 return -ENOMEM;
1317 }
78c4b59f 1318 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1319 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1320 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1321 return 0;
b0c632db
HC
1322}
1323
1324static void __exit kvm_s390_exit(void)
1325{
78c4b59f 1326 free_page((unsigned long) vfacilities);
b0c632db
HC
1327 kvm_exit();
1328}
1329
1330module_init(kvm_s390_init);
1331module_exit(kvm_s390_exit);
566af940
CH
1332
1333/*
1334 * Enable autoloading of the kvm module.
1335 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1336 * since x86 takes a different approach.
1337 */
1338#include <linux/miscdevice.h>
1339MODULE_ALIAS_MISCDEV(KVM_MINOR);
1340MODULE_ALIAS("devname:kvm");