KVM: s390: ucontrol: export SIE control block to user
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
d7b0b5eb
CO
132 r = 1;
133 break;
2bd0ac4e 134 default:
d7b0b5eb 135 r = 0;
2bd0ac4e 136 }
d7b0b5eb 137 return r;
b0c632db
HC
138}
139
140/* Section: vm related */
141/*
142 * Get (and clear) the dirty memory log for a memory slot.
143 */
144int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
146{
147 return 0;
148}
149
150long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
152{
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
155 int r;
156
157 switch (ioctl) {
ba5c1e9b
CO
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
160
161 r = -EFAULT;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163 break;
164 r = kvm_s390_inject_vm(kvm, &s390int);
165 break;
166 }
b0c632db 167 default:
367e1319 168 r = -ENOTTY;
b0c632db
HC
169 }
170
171 return r;
172}
173
e08b9637 174int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 175{
b0c632db
HC
176 int rc;
177 char debug_name[16];
178
e08b9637
CO
179 rc = -EINVAL;
180#ifdef CONFIG_KVM_S390_UCONTROL
181 if (type & ~KVM_VM_S390_UCONTROL)
182 goto out_err;
183 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
184 goto out_err;
185#else
186 if (type)
187 goto out_err;
188#endif
189
b0c632db
HC
190 rc = s390_enable_sie();
191 if (rc)
d89f5eff 192 goto out_err;
b0c632db 193
b290411a
CO
194 rc = -ENOMEM;
195
b0c632db
HC
196 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
197 if (!kvm->arch.sca)
d89f5eff 198 goto out_err;
b0c632db
HC
199
200 sprintf(debug_name, "kvm-%u", current->pid);
201
202 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
203 if (!kvm->arch.dbf)
204 goto out_nodbf;
205
ba5c1e9b
CO
206 spin_lock_init(&kvm->arch.float_int.lock);
207 INIT_LIST_HEAD(&kvm->arch.float_int.list);
208
b0c632db
HC
209 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210 VM_EVENT(kvm, 3, "%s", "vm created");
211
e08b9637
CO
212 if (type & KVM_VM_S390_UCONTROL) {
213 kvm->arch.gmap = NULL;
214 } else {
215 kvm->arch.gmap = gmap_alloc(current->mm);
216 if (!kvm->arch.gmap)
217 goto out_nogmap;
218 }
d89f5eff 219 return 0;
598841ca
CO
220out_nogmap:
221 debug_unregister(kvm->arch.dbf);
b0c632db
HC
222out_nodbf:
223 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
224out_err:
225 return rc;
b0c632db
HC
226}
227
d329c035
CB
228void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
229{
230 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 231 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
232 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
233 (__u64) vcpu->arch.sie_block)
234 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
235 smp_mb();
27e0393f
CO
236
237 if (kvm_is_ucontrol(vcpu->kvm))
238 gmap_free(vcpu->arch.gmap);
239
d329c035 240 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 241 kvm_vcpu_uninit(vcpu);
d329c035
CB
242 kfree(vcpu);
243}
244
245static void kvm_free_vcpus(struct kvm *kvm)
246{
247 unsigned int i;
988a2cae 248 struct kvm_vcpu *vcpu;
d329c035 249
988a2cae
GN
250 kvm_for_each_vcpu(i, vcpu, kvm)
251 kvm_arch_vcpu_destroy(vcpu);
252
253 mutex_lock(&kvm->lock);
254 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
255 kvm->vcpus[i] = NULL;
256
257 atomic_set(&kvm->online_vcpus, 0);
258 mutex_unlock(&kvm->lock);
d329c035
CB
259}
260
ad8ba2cd
SY
261void kvm_arch_sync_events(struct kvm *kvm)
262{
263}
264
b0c632db
HC
265void kvm_arch_destroy_vm(struct kvm *kvm)
266{
d329c035 267 kvm_free_vcpus(kvm);
b0c632db 268 free_page((unsigned long)(kvm->arch.sca));
d329c035 269 debug_unregister(kvm->arch.dbf);
27e0393f
CO
270 if (!kvm_is_ucontrol(kvm))
271 gmap_free(kvm->arch.gmap);
b0c632db
HC
272}
273
274/* Section: vcpu related */
275int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
276{
27e0393f
CO
277 if (kvm_is_ucontrol(vcpu->kvm)) {
278 vcpu->arch.gmap = gmap_alloc(current->mm);
279 if (!vcpu->arch.gmap)
280 return -ENOMEM;
281 return 0;
282 }
283
598841ca 284 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
285 return 0;
286}
287
288void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
289{
6692cef3 290 /* Nothing todo */
b0c632db
HC
291}
292
293void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
294{
295 save_fp_regs(&vcpu->arch.host_fpregs);
296 save_access_regs(vcpu->arch.host_acrs);
297 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
298 restore_fp_regs(&vcpu->arch.guest_fpregs);
299 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 300 gmap_enable(vcpu->arch.gmap);
9e6dabef 301 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
302}
303
304void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
305{
9e6dabef 306 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 307 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
308 save_fp_regs(&vcpu->arch.guest_fpregs);
309 save_access_regs(vcpu->arch.guest_acrs);
310 restore_fp_regs(&vcpu->arch.host_fpregs);
311 restore_access_regs(vcpu->arch.host_acrs);
312}
313
314static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
315{
316 /* this equals initial cpu reset in pop, but we don't switch to ESA */
317 vcpu->arch.sie_block->gpsw.mask = 0UL;
318 vcpu->arch.sie_block->gpsw.addr = 0UL;
319 vcpu->arch.sie_block->prefix = 0UL;
320 vcpu->arch.sie_block->ihcpu = 0xffff;
321 vcpu->arch.sie_block->cputm = 0UL;
322 vcpu->arch.sie_block->ckc = 0UL;
323 vcpu->arch.sie_block->todpr = 0;
324 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
325 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
326 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
327 vcpu->arch.guest_fpregs.fpc = 0;
328 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
329 vcpu->arch.sie_block->gbea = 1;
330}
331
332int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
333{
9e6dabef
CH
334 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
335 CPUSTAT_SM |
336 CPUSTAT_STOPPED);
fc34531d 337 vcpu->arch.sie_block->ecb = 6;
b0c632db 338 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 339 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
340 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
341 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
342 (unsigned long) vcpu);
343 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 344 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 345 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
346 return 0;
347}
348
349struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
350 unsigned int id)
351{
4d47555a
CO
352 struct kvm_vcpu *vcpu;
353 int rc = -EINVAL;
354
355 if (id >= KVM_MAX_VCPUS)
356 goto out;
357
358 rc = -ENOMEM;
b0c632db 359
4d47555a 360 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 361 if (!vcpu)
4d47555a 362 goto out;
b0c632db 363
180c12fb
CB
364 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
365 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
366
367 if (!vcpu->arch.sie_block)
368 goto out_free_cpu;
369
370 vcpu->arch.sie_block->icpua = id;
371 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
372 if (!kvm->arch.sca->cpu[id].sda)
373 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
374 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
375 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 376 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 377
ba5c1e9b
CO
378 spin_lock_init(&vcpu->arch.local_int.lock);
379 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
380 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 381 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
382 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
383 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 384 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 385 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 386
b0c632db
HC
387 rc = kvm_vcpu_init(vcpu, kvm, id);
388 if (rc)
7b06bf2f 389 goto out_free_sie_block;
b0c632db
HC
390 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
391 vcpu->arch.sie_block);
392
b0c632db 393 return vcpu;
7b06bf2f
WY
394out_free_sie_block:
395 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
396out_free_cpu:
397 kfree(vcpu);
4d47555a 398out:
b0c632db
HC
399 return ERR_PTR(rc);
400}
401
b0c632db
HC
402int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
403{
404 /* kvm common code refers to this, but never calls it */
405 BUG();
406 return 0;
407}
408
409static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
410{
b0c632db 411 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
412 return 0;
413}
414
415int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
416{
b0c632db 417 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
418 return 0;
419}
420
421int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
422{
b0c632db 423 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
424 return 0;
425}
426
427int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
428 struct kvm_sregs *sregs)
429{
b0c632db
HC
430 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
431 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 432 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
433 return 0;
434}
435
436int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
437 struct kvm_sregs *sregs)
438{
b0c632db
HC
439 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
440 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
441 return 0;
442}
443
444int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
445{
b0c632db
HC
446 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
447 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 448 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
449 return 0;
450}
451
452int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
453{
b0c632db
HC
454 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
455 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
456 return 0;
457}
458
459static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
460{
461 int rc = 0;
462
9e6dabef 463 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 464 rc = -EBUSY;
d7b0b5eb
CO
465 else {
466 vcpu->run->psw_mask = psw.mask;
467 vcpu->run->psw_addr = psw.addr;
468 }
b0c632db
HC
469 return rc;
470}
471
472int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
473 struct kvm_translation *tr)
474{
475 return -EINVAL; /* not implemented yet */
476}
477
d0bfb940
JK
478int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
479 struct kvm_guest_debug *dbg)
b0c632db
HC
480{
481 return -EINVAL; /* not implemented yet */
482}
483
62d9f0db
MT
484int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
485 struct kvm_mp_state *mp_state)
486{
487 return -EINVAL; /* not implemented yet */
488}
489
490int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
491 struct kvm_mp_state *mp_state)
492{
493 return -EINVAL; /* not implemented yet */
494}
495
e168bf8d 496static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 497{
e168bf8d
CO
498 int rc;
499
b0c632db
HC
500 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
501
502 if (need_resched())
503 schedule();
504
71cde587
CB
505 if (test_thread_flag(TIF_MCCK_PENDING))
506 s390_handle_mcck();
507
0ff31867
CO
508 kvm_s390_deliver_pending_interrupts(vcpu);
509
b0c632db
HC
510 vcpu->arch.sie_block->icptcode = 0;
511 local_irq_disable();
512 kvm_guest_enter();
513 local_irq_enable();
514 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
515 atomic_read(&vcpu->arch.sie_block->cpuflags));
e168bf8d
CO
516 rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
517 if (rc) {
518 if (kvm_is_ucontrol(vcpu->kvm)) {
519 rc = SIE_INTERCEPT_UCONTROL;
520 } else {
521 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
522 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
523 rc = 0;
524 }
1f0d0f09 525 }
b0c632db
HC
526 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
527 vcpu->arch.sie_block->icptcode);
528 local_irq_disable();
529 kvm_guest_exit();
530 local_irq_enable();
531
532 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 533 return rc;
b0c632db
HC
534}
535
536int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
537{
8f2abe6a 538 int rc;
b0c632db
HC
539 sigset_t sigsaved;
540
9ace903d 541rerun_vcpu:
b0c632db
HC
542 if (vcpu->sigset_active)
543 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
544
9e6dabef 545 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 546
ba5c1e9b
CO
547 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
548
8f2abe6a
CB
549 switch (kvm_run->exit_reason) {
550 case KVM_EXIT_S390_SIEIC:
8f2abe6a 551 case KVM_EXIT_UNKNOWN:
9ace903d 552 case KVM_EXIT_INTR:
8f2abe6a 553 case KVM_EXIT_S390_RESET:
e168bf8d 554 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
555 break;
556 default:
557 BUG();
558 }
559
d7b0b5eb
CO
560 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
561 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
562
dab4079d 563 might_fault();
8f2abe6a
CB
564
565 do {
e168bf8d
CO
566 rc = __vcpu_run(vcpu);
567 if (rc)
568 break;
8f2abe6a
CB
569 rc = kvm_handle_sie_intercept(vcpu);
570 } while (!signal_pending(current) && !rc);
571
9ace903d
CE
572 if (rc == SIE_INTERCEPT_RERUNVCPU)
573 goto rerun_vcpu;
574
b1d16c49
CE
575 if (signal_pending(current) && !rc) {
576 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 577 rc = -EINTR;
b1d16c49 578 }
8f2abe6a 579
e168bf8d
CO
580#ifdef CONFIG_KVM_S390_UCONTROL
581 if (rc == SIE_INTERCEPT_UCONTROL) {
582 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
583 kvm_run->s390_ucontrol.trans_exc_code =
584 current->thread.gmap_addr;
585 kvm_run->s390_ucontrol.pgm_code = 0x10;
586 rc = 0;
587 }
588#endif
589
b8e660b8 590 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
591 /* intercept cannot be handled in-kernel, prepare kvm-run */
592 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
593 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
594 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
595 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
596 rc = 0;
597 }
598
599 if (rc == -EREMOTE) {
600 /* intercept was handled, but userspace support is needed
601 * kvm_run has been prepared by the handler */
602 rc = 0;
603 }
b0c632db 604
d7b0b5eb
CO
605 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
606 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
607
b0c632db
HC
608 if (vcpu->sigset_active)
609 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
610
b0c632db 611 vcpu->stat.exit_userspace++;
7e8e6ab4 612 return rc;
b0c632db
HC
613}
614
092670cd 615static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
616 unsigned long n, int prefix)
617{
618 if (prefix)
619 return copy_to_guest(vcpu, guestdest, from, n);
620 else
621 return copy_to_guest_absolute(vcpu, guestdest, from, n);
622}
623
624/*
625 * store status at address
626 * we use have two special cases:
627 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
628 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
629 */
971eb77f 630int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 631{
092670cd 632 unsigned char archmode = 1;
b0c632db
HC
633 int prefix;
634
635 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
636 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
637 return -EFAULT;
638 addr = SAVE_AREA_BASE;
639 prefix = 0;
640 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
641 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
642 return -EFAULT;
643 addr = SAVE_AREA_BASE;
644 prefix = 1;
645 } else
646 prefix = 0;
647
f64ca217 648 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
649 vcpu->arch.guest_fpregs.fprs, 128, prefix))
650 return -EFAULT;
651
f64ca217 652 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
653 vcpu->arch.guest_gprs, 128, prefix))
654 return -EFAULT;
655
f64ca217 656 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
657 &vcpu->arch.sie_block->gpsw, 16, prefix))
658 return -EFAULT;
659
f64ca217 660 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
661 &vcpu->arch.sie_block->prefix, 4, prefix))
662 return -EFAULT;
663
664 if (__guestcopy(vcpu,
f64ca217 665 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
666 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
667 return -EFAULT;
668
f64ca217 669 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
670 &vcpu->arch.sie_block->todpr, 4, prefix))
671 return -EFAULT;
672
f64ca217 673 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
674 &vcpu->arch.sie_block->cputm, 8, prefix))
675 return -EFAULT;
676
f64ca217 677 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
678 &vcpu->arch.sie_block->ckc, 8, prefix))
679 return -EFAULT;
680
f64ca217 681 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
682 &vcpu->arch.guest_acrs, 64, prefix))
683 return -EFAULT;
684
685 if (__guestcopy(vcpu,
f64ca217 686 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
687 &vcpu->arch.sie_block->gcr, 128, prefix))
688 return -EFAULT;
689 return 0;
690}
691
b0c632db
HC
692long kvm_arch_vcpu_ioctl(struct file *filp,
693 unsigned int ioctl, unsigned long arg)
694{
695 struct kvm_vcpu *vcpu = filp->private_data;
696 void __user *argp = (void __user *)arg;
bc923cc9 697 long r;
b0c632db 698
93736624
AK
699 switch (ioctl) {
700 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
701 struct kvm_s390_interrupt s390int;
702
93736624 703 r = -EFAULT;
ba5c1e9b 704 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
705 break;
706 r = kvm_s390_inject_vcpu(vcpu, &s390int);
707 break;
ba5c1e9b 708 }
b0c632db 709 case KVM_S390_STORE_STATUS:
bc923cc9
AK
710 r = kvm_s390_vcpu_store_status(vcpu, arg);
711 break;
b0c632db
HC
712 case KVM_S390_SET_INITIAL_PSW: {
713 psw_t psw;
714
bc923cc9 715 r = -EFAULT;
b0c632db 716 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
717 break;
718 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
719 break;
b0c632db
HC
720 }
721 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
722 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
723 break;
27e0393f
CO
724#ifdef CONFIG_KVM_S390_UCONTROL
725 case KVM_S390_UCAS_MAP: {
726 struct kvm_s390_ucas_mapping ucasmap;
727
728 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
729 r = -EFAULT;
730 break;
731 }
732
733 if (!kvm_is_ucontrol(vcpu->kvm)) {
734 r = -EINVAL;
735 break;
736 }
737
738 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
739 ucasmap.vcpu_addr, ucasmap.length);
740 break;
741 }
742 case KVM_S390_UCAS_UNMAP: {
743 struct kvm_s390_ucas_mapping ucasmap;
744
745 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
746 r = -EFAULT;
747 break;
748 }
749
750 if (!kvm_is_ucontrol(vcpu->kvm)) {
751 r = -EINVAL;
752 break;
753 }
754
755 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
756 ucasmap.length);
757 break;
758 }
759#endif
b0c632db 760 default:
bc923cc9 761 r = -EINVAL;
b0c632db 762 }
bc923cc9 763 return r;
b0c632db
HC
764}
765
5b1c1493
CO
766int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
767{
768#ifdef CONFIG_KVM_S390_UCONTROL
769 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
770 && (kvm_is_ucontrol(vcpu->kvm))) {
771 vmf->page = virt_to_page(vcpu->arch.sie_block);
772 get_page(vmf->page);
773 return 0;
774 }
775#endif
776 return VM_FAULT_SIGBUS;
777}
778
b0c632db 779/* Section: memory related */
f7784b8e
MT
780int kvm_arch_prepare_memory_region(struct kvm *kvm,
781 struct kvm_memory_slot *memslot,
782 struct kvm_memory_slot old,
783 struct kvm_userspace_memory_region *mem,
784 int user_alloc)
b0c632db
HC
785{
786 /* A few sanity checks. We can have exactly one memory slot which has
787 to start at guest virtual zero and which has to be located at a
788 page boundary in userland and which has to end at a page boundary.
789 The memory in userland is ok to be fragmented into various different
790 vmas. It is okay to mmap() and munmap() stuff in this slot after
791 doing this call at any time */
792
628eb9b8 793 if (mem->slot)
b0c632db
HC
794 return -EINVAL;
795
796 if (mem->guest_phys_addr)
797 return -EINVAL;
798
598841ca 799 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
800 return -EINVAL;
801
598841ca 802 if (mem->memory_size & 0xffffful)
b0c632db
HC
803 return -EINVAL;
804
2668dab7
CO
805 if (!user_alloc)
806 return -EINVAL;
807
f7784b8e
MT
808 return 0;
809}
810
811void kvm_arch_commit_memory_region(struct kvm *kvm,
812 struct kvm_userspace_memory_region *mem,
813 struct kvm_memory_slot old,
814 int user_alloc)
815{
f7850c92 816 int rc;
f7784b8e 817
598841ca
CO
818
819 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
820 mem->guest_phys_addr, mem->memory_size);
821 if (rc)
f7850c92 822 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 823 return;
b0c632db
HC
824}
825
34d4cb8f
MT
826void kvm_arch_flush_shadow(struct kvm *kvm)
827{
828}
829
b0c632db
HC
830static int __init kvm_s390_init(void)
831{
ef50f7ac 832 int ret;
0ee75bea 833 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
834 if (ret)
835 return ret;
836
837 /*
838 * guests can ask for up to 255+1 double words, we need a full page
25985edc 839 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
840 * only set facilities that are known to work in KVM.
841 */
c2f0e8c8 842 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
843 if (!facilities) {
844 kvm_exit();
845 return -ENOMEM;
846 }
14375bc4 847 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 848 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 849 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 850 return 0;
b0c632db
HC
851}
852
853static void __exit kvm_s390_exit(void)
854{
ef50f7ac 855 free_page((unsigned long) facilities);
b0c632db
HC
856 kvm_exit();
857}
858
859module_init(kvm_s390_init);
860module_exit(kvm_s390_exit);