Merge tag 'sound-3.6' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
35#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36
37struct kvm_stats_debugfs_item debugfs_entries[] = {
38 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 39 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
40 { "exit_validity", VCPU_STAT(exit_validity) },
41 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
42 { "exit_external_request", VCPU_STAT(exit_external_request) },
43 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
44 { "exit_instruction", VCPU_STAT(exit_instruction) },
45 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
46 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 47 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
48 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
49 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 50 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
51 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
52 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
53 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
54 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
55 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
56 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
57 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
58 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
59 { "instruction_spx", VCPU_STAT(instruction_spx) },
60 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
61 { "instruction_stap", VCPU_STAT(instruction_stap) },
62 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
63 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
64 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
65 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
66 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 67 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 68 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 69 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 70 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
71 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
72 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
73 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
74 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
75 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 76 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 77 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 78 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
79 { NULL }
80};
81
ef50f7ac 82static unsigned long long *facilities;
b0c632db
HC
83
84/* Section: not file related */
10474ae8 85int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
86{
87 /* every s390 is virtualization enabled ;-) */
10474ae8 88 return 0;
b0c632db
HC
89}
90
91void kvm_arch_hardware_disable(void *garbage)
92{
93}
94
b0c632db
HC
95int kvm_arch_hardware_setup(void)
96{
97 return 0;
98}
99
100void kvm_arch_hardware_unsetup(void)
101{
102}
103
104void kvm_arch_check_processor_compat(void *rtn)
105{
106}
107
108int kvm_arch_init(void *opaque)
109{
110 return 0;
111}
112
113void kvm_arch_exit(void)
114{
115}
116
117/* Section: device related */
118long kvm_arch_dev_ioctl(struct file *filp,
119 unsigned int ioctl, unsigned long arg)
120{
121 if (ioctl == KVM_S390_ENABLE_SIE)
122 return s390_enable_sie();
123 return -EINVAL;
124}
125
126int kvm_dev_ioctl_check_extension(long ext)
127{
d7b0b5eb
CO
128 int r;
129
2bd0ac4e 130 switch (ext) {
d7b0b5eb 131 case KVM_CAP_S390_PSW:
b6cf8788 132 case KVM_CAP_S390_GMAP:
52e16b18 133 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
134#ifdef CONFIG_KVM_S390_UCONTROL
135 case KVM_CAP_S390_UCONTROL:
136#endif
60b413c9 137 case KVM_CAP_SYNC_REGS:
14eebd91 138 case KVM_CAP_ONE_REG:
d7b0b5eb
CO
139 r = 1;
140 break;
e726b1bd
CB
141 case KVM_CAP_NR_VCPUS:
142 case KVM_CAP_MAX_VCPUS:
143 r = KVM_MAX_VCPUS;
144 break;
1526bf9c
CB
145 case KVM_CAP_S390_COW:
146 r = sclp_get_fac85() & 0x2;
147 break;
2bd0ac4e 148 default:
d7b0b5eb 149 r = 0;
2bd0ac4e 150 }
d7b0b5eb 151 return r;
b0c632db
HC
152}
153
154/* Section: vm related */
155/*
156 * Get (and clear) the dirty memory log for a memory slot.
157 */
158int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
159 struct kvm_dirty_log *log)
160{
161 return 0;
162}
163
164long kvm_arch_vm_ioctl(struct file *filp,
165 unsigned int ioctl, unsigned long arg)
166{
167 struct kvm *kvm = filp->private_data;
168 void __user *argp = (void __user *)arg;
169 int r;
170
171 switch (ioctl) {
ba5c1e9b
CO
172 case KVM_S390_INTERRUPT: {
173 struct kvm_s390_interrupt s390int;
174
175 r = -EFAULT;
176 if (copy_from_user(&s390int, argp, sizeof(s390int)))
177 break;
178 r = kvm_s390_inject_vm(kvm, &s390int);
179 break;
180 }
b0c632db 181 default:
367e1319 182 r = -ENOTTY;
b0c632db
HC
183 }
184
185 return r;
186}
187
e08b9637 188int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 189{
b0c632db
HC
190 int rc;
191 char debug_name[16];
192
e08b9637
CO
193 rc = -EINVAL;
194#ifdef CONFIG_KVM_S390_UCONTROL
195 if (type & ~KVM_VM_S390_UCONTROL)
196 goto out_err;
197 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
198 goto out_err;
199#else
200 if (type)
201 goto out_err;
202#endif
203
b0c632db
HC
204 rc = s390_enable_sie();
205 if (rc)
d89f5eff 206 goto out_err;
b0c632db 207
b290411a
CO
208 rc = -ENOMEM;
209
b0c632db
HC
210 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
211 if (!kvm->arch.sca)
d89f5eff 212 goto out_err;
b0c632db
HC
213
214 sprintf(debug_name, "kvm-%u", current->pid);
215
216 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
217 if (!kvm->arch.dbf)
218 goto out_nodbf;
219
ba5c1e9b
CO
220 spin_lock_init(&kvm->arch.float_int.lock);
221 INIT_LIST_HEAD(&kvm->arch.float_int.list);
222
b0c632db
HC
223 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
224 VM_EVENT(kvm, 3, "%s", "vm created");
225
e08b9637
CO
226 if (type & KVM_VM_S390_UCONTROL) {
227 kvm->arch.gmap = NULL;
228 } else {
229 kvm->arch.gmap = gmap_alloc(current->mm);
230 if (!kvm->arch.gmap)
231 goto out_nogmap;
232 }
d89f5eff 233 return 0;
598841ca
CO
234out_nogmap:
235 debug_unregister(kvm->arch.dbf);
b0c632db
HC
236out_nodbf:
237 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
238out_err:
239 return rc;
b0c632db
HC
240}
241
d329c035
CB
242void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
243{
244 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
245 if (!kvm_is_ucontrol(vcpu->kvm)) {
246 clear_bit(63 - vcpu->vcpu_id,
247 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
248 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
249 (__u64) vcpu->arch.sie_block)
250 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
251 }
abf4a71e 252 smp_mb();
27e0393f
CO
253
254 if (kvm_is_ucontrol(vcpu->kvm))
255 gmap_free(vcpu->arch.gmap);
256
d329c035 257 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 258 kvm_vcpu_uninit(vcpu);
d329c035
CB
259 kfree(vcpu);
260}
261
262static void kvm_free_vcpus(struct kvm *kvm)
263{
264 unsigned int i;
988a2cae 265 struct kvm_vcpu *vcpu;
d329c035 266
988a2cae
GN
267 kvm_for_each_vcpu(i, vcpu, kvm)
268 kvm_arch_vcpu_destroy(vcpu);
269
270 mutex_lock(&kvm->lock);
271 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
272 kvm->vcpus[i] = NULL;
273
274 atomic_set(&kvm->online_vcpus, 0);
275 mutex_unlock(&kvm->lock);
d329c035
CB
276}
277
ad8ba2cd
SY
278void kvm_arch_sync_events(struct kvm *kvm)
279{
280}
281
b0c632db
HC
282void kvm_arch_destroy_vm(struct kvm *kvm)
283{
d329c035 284 kvm_free_vcpus(kvm);
b0c632db 285 free_page((unsigned long)(kvm->arch.sca));
d329c035 286 debug_unregister(kvm->arch.dbf);
27e0393f
CO
287 if (!kvm_is_ucontrol(kvm))
288 gmap_free(kvm->arch.gmap);
b0c632db
HC
289}
290
291/* Section: vcpu related */
292int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
293{
27e0393f
CO
294 if (kvm_is_ucontrol(vcpu->kvm)) {
295 vcpu->arch.gmap = gmap_alloc(current->mm);
296 if (!vcpu->arch.gmap)
297 return -ENOMEM;
298 return 0;
299 }
300
598841ca 301 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
302 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
303 KVM_SYNC_GPRS |
9eed0735
CB
304 KVM_SYNC_ACRS |
305 KVM_SYNC_CRS;
b0c632db
HC
306 return 0;
307}
308
309void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
310{
6692cef3 311 /* Nothing todo */
b0c632db
HC
312}
313
314void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
315{
316 save_fp_regs(&vcpu->arch.host_fpregs);
317 save_access_regs(vcpu->arch.host_acrs);
318 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
319 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 320 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 321 gmap_enable(vcpu->arch.gmap);
9e6dabef 322 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
323}
324
325void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
326{
9e6dabef 327 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 328 gmap_disable(vcpu->arch.gmap);
b0c632db 329 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 330 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
331 restore_fp_regs(&vcpu->arch.host_fpregs);
332 restore_access_regs(vcpu->arch.host_acrs);
333}
334
335static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
336{
337 /* this equals initial cpu reset in pop, but we don't switch to ESA */
338 vcpu->arch.sie_block->gpsw.mask = 0UL;
339 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 340 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
341 vcpu->arch.sie_block->cputm = 0UL;
342 vcpu->arch.sie_block->ckc = 0UL;
343 vcpu->arch.sie_block->todpr = 0;
344 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
345 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
346 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
347 vcpu->arch.guest_fpregs.fpc = 0;
348 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
349 vcpu->arch.sie_block->gbea = 1;
61bde82c 350 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
351}
352
353int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
354{
9e6dabef
CH
355 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
356 CPUSTAT_SM |
357 CPUSTAT_STOPPED);
fc34531d 358 vcpu->arch.sie_block->ecb = 6;
b0c632db 359 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 360 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
361 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
362 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
363 (unsigned long) vcpu);
364 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 365 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 366 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
367 return 0;
368}
369
370struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
371 unsigned int id)
372{
4d47555a
CO
373 struct kvm_vcpu *vcpu;
374 int rc = -EINVAL;
375
376 if (id >= KVM_MAX_VCPUS)
377 goto out;
378
379 rc = -ENOMEM;
b0c632db 380
4d47555a 381 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 382 if (!vcpu)
4d47555a 383 goto out;
b0c632db 384
180c12fb
CB
385 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
386 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
387
388 if (!vcpu->arch.sie_block)
389 goto out_free_cpu;
390
391 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
392 if (!kvm_is_ucontrol(kvm)) {
393 if (!kvm->arch.sca) {
394 WARN_ON_ONCE(1);
395 goto out_free_cpu;
396 }
397 if (!kvm->arch.sca->cpu[id].sda)
398 kvm->arch.sca->cpu[id].sda =
399 (__u64) vcpu->arch.sie_block;
400 vcpu->arch.sie_block->scaoh =
401 (__u32)(((__u64)kvm->arch.sca) >> 32);
402 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
403 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
404 }
b0c632db 405
ba5c1e9b
CO
406 spin_lock_init(&vcpu->arch.local_int.lock);
407 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
408 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 409 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
410 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
411 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 412 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 413 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 414
b0c632db
HC
415 rc = kvm_vcpu_init(vcpu, kvm, id);
416 if (rc)
7b06bf2f 417 goto out_free_sie_block;
b0c632db
HC
418 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
419 vcpu->arch.sie_block);
420
b0c632db 421 return vcpu;
7b06bf2f
WY
422out_free_sie_block:
423 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
424out_free_cpu:
425 kfree(vcpu);
4d47555a 426out:
b0c632db
HC
427 return ERR_PTR(rc);
428}
429
b0c632db
HC
430int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
431{
432 /* kvm common code refers to this, but never calls it */
433 BUG();
434 return 0;
435}
436
b6d33834
CD
437int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
438{
439 /* kvm common code refers to this, but never calls it */
440 BUG();
441 return 0;
442}
443
14eebd91
CO
444static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
445 struct kvm_one_reg *reg)
446{
447 int r = -EINVAL;
448
449 switch (reg->id) {
29b7c71b
CO
450 case KVM_REG_S390_TODPR:
451 r = put_user(vcpu->arch.sie_block->todpr,
452 (u32 __user *)reg->addr);
453 break;
454 case KVM_REG_S390_EPOCHDIFF:
455 r = put_user(vcpu->arch.sie_block->epoch,
456 (u64 __user *)reg->addr);
457 break;
46a6dd1c
J
458 case KVM_REG_S390_CPU_TIMER:
459 r = put_user(vcpu->arch.sie_block->cputm,
460 (u64 __user *)reg->addr);
461 break;
462 case KVM_REG_S390_CLOCK_COMP:
463 r = put_user(vcpu->arch.sie_block->ckc,
464 (u64 __user *)reg->addr);
465 break;
14eebd91
CO
466 default:
467 break;
468 }
469
470 return r;
471}
472
473static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
474 struct kvm_one_reg *reg)
475{
476 int r = -EINVAL;
477
478 switch (reg->id) {
29b7c71b
CO
479 case KVM_REG_S390_TODPR:
480 r = get_user(vcpu->arch.sie_block->todpr,
481 (u32 __user *)reg->addr);
482 break;
483 case KVM_REG_S390_EPOCHDIFF:
484 r = get_user(vcpu->arch.sie_block->epoch,
485 (u64 __user *)reg->addr);
486 break;
46a6dd1c
J
487 case KVM_REG_S390_CPU_TIMER:
488 r = get_user(vcpu->arch.sie_block->cputm,
489 (u64 __user *)reg->addr);
490 break;
491 case KVM_REG_S390_CLOCK_COMP:
492 r = get_user(vcpu->arch.sie_block->ckc,
493 (u64 __user *)reg->addr);
494 break;
14eebd91
CO
495 default:
496 break;
497 }
498
499 return r;
500}
b6d33834 501
b0c632db
HC
502static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
503{
b0c632db 504 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
505 return 0;
506}
507
508int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
509{
5a32c1af 510 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
511 return 0;
512}
513
514int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
515{
5a32c1af 516 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
517 return 0;
518}
519
520int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
521 struct kvm_sregs *sregs)
522{
59674c1a 523 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 524 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 525 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
526 return 0;
527}
528
529int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
530 struct kvm_sregs *sregs)
531{
59674c1a 532 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 533 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
534 return 0;
535}
536
537int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
538{
b0c632db 539 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 540 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 541 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
542 return 0;
543}
544
545int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
546{
b0c632db
HC
547 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
548 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
549 return 0;
550}
551
552static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
553{
554 int rc = 0;
555
9e6dabef 556 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 557 rc = -EBUSY;
d7b0b5eb
CO
558 else {
559 vcpu->run->psw_mask = psw.mask;
560 vcpu->run->psw_addr = psw.addr;
561 }
b0c632db
HC
562 return rc;
563}
564
565int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
566 struct kvm_translation *tr)
567{
568 return -EINVAL; /* not implemented yet */
569}
570
d0bfb940
JK
571int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
572 struct kvm_guest_debug *dbg)
b0c632db
HC
573{
574 return -EINVAL; /* not implemented yet */
575}
576
62d9f0db
MT
577int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
578 struct kvm_mp_state *mp_state)
579{
580 return -EINVAL; /* not implemented yet */
581}
582
583int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
584 struct kvm_mp_state *mp_state)
585{
586 return -EINVAL; /* not implemented yet */
587}
588
e168bf8d 589static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 590{
e168bf8d
CO
591 int rc;
592
5a32c1af 593 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
594
595 if (need_resched())
596 schedule();
597
71cde587
CB
598 if (test_thread_flag(TIF_MCCK_PENDING))
599 s390_handle_mcck();
600
d6b6d166
CO
601 if (!kvm_is_ucontrol(vcpu->kvm))
602 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 603
b0c632db
HC
604 vcpu->arch.sie_block->icptcode = 0;
605 local_irq_disable();
606 kvm_guest_enter();
607 local_irq_enable();
608 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
609 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 610 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
611 if (rc) {
612 if (kvm_is_ucontrol(vcpu->kvm)) {
613 rc = SIE_INTERCEPT_UCONTROL;
614 } else {
615 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
616 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
617 rc = 0;
618 }
1f0d0f09 619 }
b0c632db
HC
620 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
621 vcpu->arch.sie_block->icptcode);
622 local_irq_disable();
623 kvm_guest_exit();
624 local_irq_enable();
625
5a32c1af 626 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 627 return rc;
b0c632db
HC
628}
629
630int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
631{
8f2abe6a 632 int rc;
b0c632db
HC
633 sigset_t sigsaved;
634
9ace903d 635rerun_vcpu:
b0c632db
HC
636 if (vcpu->sigset_active)
637 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
638
9e6dabef 639 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 640
ba5c1e9b
CO
641 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
642
8f2abe6a
CB
643 switch (kvm_run->exit_reason) {
644 case KVM_EXIT_S390_SIEIC:
8f2abe6a 645 case KVM_EXIT_UNKNOWN:
9ace903d 646 case KVM_EXIT_INTR:
8f2abe6a 647 case KVM_EXIT_S390_RESET:
e168bf8d 648 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
649 break;
650 default:
651 BUG();
652 }
653
d7b0b5eb
CO
654 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
655 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
656 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
657 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
658 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
659 }
9eed0735
CB
660 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
661 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
662 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
663 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
664 }
d7b0b5eb 665
dab4079d 666 might_fault();
8f2abe6a
CB
667
668 do {
e168bf8d
CO
669 rc = __vcpu_run(vcpu);
670 if (rc)
671 break;
c0d744a9
CO
672 if (kvm_is_ucontrol(vcpu->kvm))
673 rc = -EOPNOTSUPP;
674 else
675 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
676 } while (!signal_pending(current) && !rc);
677
9ace903d
CE
678 if (rc == SIE_INTERCEPT_RERUNVCPU)
679 goto rerun_vcpu;
680
b1d16c49
CE
681 if (signal_pending(current) && !rc) {
682 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 683 rc = -EINTR;
b1d16c49 684 }
8f2abe6a 685
e168bf8d
CO
686#ifdef CONFIG_KVM_S390_UCONTROL
687 if (rc == SIE_INTERCEPT_UCONTROL) {
688 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
689 kvm_run->s390_ucontrol.trans_exc_code =
690 current->thread.gmap_addr;
691 kvm_run->s390_ucontrol.pgm_code = 0x10;
692 rc = 0;
693 }
694#endif
695
b8e660b8 696 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
697 /* intercept cannot be handled in-kernel, prepare kvm-run */
698 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
699 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
700 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
701 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
702 rc = 0;
703 }
704
705 if (rc == -EREMOTE) {
706 /* intercept was handled, but userspace support is needed
707 * kvm_run has been prepared by the handler */
708 rc = 0;
709 }
b0c632db 710
d7b0b5eb
CO
711 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
712 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 713 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 714 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 715
b0c632db
HC
716 if (vcpu->sigset_active)
717 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
718
b0c632db 719 vcpu->stat.exit_userspace++;
7e8e6ab4 720 return rc;
b0c632db
HC
721}
722
092670cd 723static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
724 unsigned long n, int prefix)
725{
726 if (prefix)
727 return copy_to_guest(vcpu, guestdest, from, n);
728 else
729 return copy_to_guest_absolute(vcpu, guestdest, from, n);
730}
731
732/*
733 * store status at address
734 * we use have two special cases:
735 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
736 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
737 */
971eb77f 738int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 739{
092670cd 740 unsigned char archmode = 1;
b0c632db
HC
741 int prefix;
742
743 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
744 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
745 return -EFAULT;
746 addr = SAVE_AREA_BASE;
747 prefix = 0;
748 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
749 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
750 return -EFAULT;
751 addr = SAVE_AREA_BASE;
752 prefix = 1;
753 } else
754 prefix = 0;
755
f64ca217 756 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
757 vcpu->arch.guest_fpregs.fprs, 128, prefix))
758 return -EFAULT;
759
f64ca217 760 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 761 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
762 return -EFAULT;
763
f64ca217 764 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
765 &vcpu->arch.sie_block->gpsw, 16, prefix))
766 return -EFAULT;
767
f64ca217 768 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
769 &vcpu->arch.sie_block->prefix, 4, prefix))
770 return -EFAULT;
771
772 if (__guestcopy(vcpu,
f64ca217 773 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
774 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
775 return -EFAULT;
776
f64ca217 777 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
778 &vcpu->arch.sie_block->todpr, 4, prefix))
779 return -EFAULT;
780
f64ca217 781 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
782 &vcpu->arch.sie_block->cputm, 8, prefix))
783 return -EFAULT;
784
f64ca217 785 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
786 &vcpu->arch.sie_block->ckc, 8, prefix))
787 return -EFAULT;
788
f64ca217 789 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 790 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
791 return -EFAULT;
792
793 if (__guestcopy(vcpu,
f64ca217 794 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
795 &vcpu->arch.sie_block->gcr, 128, prefix))
796 return -EFAULT;
797 return 0;
798}
799
b0c632db
HC
800long kvm_arch_vcpu_ioctl(struct file *filp,
801 unsigned int ioctl, unsigned long arg)
802{
803 struct kvm_vcpu *vcpu = filp->private_data;
804 void __user *argp = (void __user *)arg;
bc923cc9 805 long r;
b0c632db 806
93736624
AK
807 switch (ioctl) {
808 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
809 struct kvm_s390_interrupt s390int;
810
93736624 811 r = -EFAULT;
ba5c1e9b 812 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
813 break;
814 r = kvm_s390_inject_vcpu(vcpu, &s390int);
815 break;
ba5c1e9b 816 }
b0c632db 817 case KVM_S390_STORE_STATUS:
bc923cc9
AK
818 r = kvm_s390_vcpu_store_status(vcpu, arg);
819 break;
b0c632db
HC
820 case KVM_S390_SET_INITIAL_PSW: {
821 psw_t psw;
822
bc923cc9 823 r = -EFAULT;
b0c632db 824 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
825 break;
826 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
827 break;
b0c632db
HC
828 }
829 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
830 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
831 break;
14eebd91
CO
832 case KVM_SET_ONE_REG:
833 case KVM_GET_ONE_REG: {
834 struct kvm_one_reg reg;
835 r = -EFAULT;
836 if (copy_from_user(&reg, argp, sizeof(reg)))
837 break;
838 if (ioctl == KVM_SET_ONE_REG)
839 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
840 else
841 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
842 break;
843 }
27e0393f
CO
844#ifdef CONFIG_KVM_S390_UCONTROL
845 case KVM_S390_UCAS_MAP: {
846 struct kvm_s390_ucas_mapping ucasmap;
847
848 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
849 r = -EFAULT;
850 break;
851 }
852
853 if (!kvm_is_ucontrol(vcpu->kvm)) {
854 r = -EINVAL;
855 break;
856 }
857
858 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
859 ucasmap.vcpu_addr, ucasmap.length);
860 break;
861 }
862 case KVM_S390_UCAS_UNMAP: {
863 struct kvm_s390_ucas_mapping ucasmap;
864
865 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
866 r = -EFAULT;
867 break;
868 }
869
870 if (!kvm_is_ucontrol(vcpu->kvm)) {
871 r = -EINVAL;
872 break;
873 }
874
875 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
876 ucasmap.length);
877 break;
878 }
879#endif
ccc7910f
CO
880 case KVM_S390_VCPU_FAULT: {
881 r = gmap_fault(arg, vcpu->arch.gmap);
882 if (!IS_ERR_VALUE(r))
883 r = 0;
884 break;
885 }
b0c632db 886 default:
3e6afcf1 887 r = -ENOTTY;
b0c632db 888 }
bc923cc9 889 return r;
b0c632db
HC
890}
891
5b1c1493
CO
892int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
893{
894#ifdef CONFIG_KVM_S390_UCONTROL
895 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
896 && (kvm_is_ucontrol(vcpu->kvm))) {
897 vmf->page = virt_to_page(vcpu->arch.sie_block);
898 get_page(vmf->page);
899 return 0;
900 }
901#endif
902 return VM_FAULT_SIGBUS;
903}
904
db3fe4eb
TY
905void kvm_arch_free_memslot(struct kvm_memory_slot *free,
906 struct kvm_memory_slot *dont)
907{
908}
909
910int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
911{
912 return 0;
913}
914
b0c632db 915/* Section: memory related */
f7784b8e
MT
916int kvm_arch_prepare_memory_region(struct kvm *kvm,
917 struct kvm_memory_slot *memslot,
918 struct kvm_memory_slot old,
919 struct kvm_userspace_memory_region *mem,
920 int user_alloc)
b0c632db
HC
921{
922 /* A few sanity checks. We can have exactly one memory slot which has
923 to start at guest virtual zero and which has to be located at a
924 page boundary in userland and which has to end at a page boundary.
925 The memory in userland is ok to be fragmented into various different
926 vmas. It is okay to mmap() and munmap() stuff in this slot after
927 doing this call at any time */
928
628eb9b8 929 if (mem->slot)
b0c632db
HC
930 return -EINVAL;
931
932 if (mem->guest_phys_addr)
933 return -EINVAL;
934
598841ca 935 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
936 return -EINVAL;
937
598841ca 938 if (mem->memory_size & 0xffffful)
b0c632db
HC
939 return -EINVAL;
940
2668dab7
CO
941 if (!user_alloc)
942 return -EINVAL;
943
f7784b8e
MT
944 return 0;
945}
946
947void kvm_arch_commit_memory_region(struct kvm *kvm,
948 struct kvm_userspace_memory_region *mem,
949 struct kvm_memory_slot old,
950 int user_alloc)
951{
f7850c92 952 int rc;
f7784b8e 953
598841ca
CO
954
955 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
956 mem->guest_phys_addr, mem->memory_size);
957 if (rc)
f7850c92 958 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 959 return;
b0c632db
HC
960}
961
34d4cb8f
MT
962void kvm_arch_flush_shadow(struct kvm *kvm)
963{
964}
965
b0c632db
HC
966static int __init kvm_s390_init(void)
967{
ef50f7ac 968 int ret;
0ee75bea 969 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
970 if (ret)
971 return ret;
972
973 /*
974 * guests can ask for up to 255+1 double words, we need a full page
25985edc 975 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
976 * only set facilities that are known to work in KVM.
977 */
c2f0e8c8 978 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
979 if (!facilities) {
980 kvm_exit();
981 return -ENOMEM;
982 }
14375bc4 983 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 984 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 985 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 986 return 0;
b0c632db
HC
987}
988
989static void __exit kvm_s390_exit(void)
990{
ef50f7ac 991 free_page((unsigned long) facilities);
b0c632db
HC
992 kvm_exit();
993}
994
995module_init(kvm_s390_init);
996module_exit(kvm_s390_exit);