[S390] use gmap address spaces for kvm guest images
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 65 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0
CB
66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
69 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
70 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
71 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 72 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
73 { NULL }
74};
75
ef50f7ac 76static unsigned long long *facilities;
b0c632db
HC
77
78/* Section: not file related */
10474ae8 79int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
80{
81 /* every s390 is virtualization enabled ;-) */
10474ae8 82 return 0;
b0c632db
HC
83}
84
85void kvm_arch_hardware_disable(void *garbage)
86{
87}
88
b0c632db
HC
89int kvm_arch_hardware_setup(void)
90{
91 return 0;
92}
93
94void kvm_arch_hardware_unsetup(void)
95{
96}
97
98void kvm_arch_check_processor_compat(void *rtn)
99{
100}
101
102int kvm_arch_init(void *opaque)
103{
104 return 0;
105}
106
107void kvm_arch_exit(void)
108{
109}
110
111/* Section: device related */
112long kvm_arch_dev_ioctl(struct file *filp,
113 unsigned int ioctl, unsigned long arg)
114{
115 if (ioctl == KVM_S390_ENABLE_SIE)
116 return s390_enable_sie();
117 return -EINVAL;
118}
119
120int kvm_dev_ioctl_check_extension(long ext)
121{
d7b0b5eb
CO
122 int r;
123
2bd0ac4e 124 switch (ext) {
d7b0b5eb
CO
125 case KVM_CAP_S390_PSW:
126 r = 1;
127 break;
2bd0ac4e 128 default:
d7b0b5eb 129 r = 0;
2bd0ac4e 130 }
d7b0b5eb 131 return r;
b0c632db
HC
132}
133
134/* Section: vm related */
135/*
136 * Get (and clear) the dirty memory log for a memory slot.
137 */
138int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
139 struct kvm_dirty_log *log)
140{
141 return 0;
142}
143
144long kvm_arch_vm_ioctl(struct file *filp,
145 unsigned int ioctl, unsigned long arg)
146{
147 struct kvm *kvm = filp->private_data;
148 void __user *argp = (void __user *)arg;
149 int r;
150
151 switch (ioctl) {
ba5c1e9b
CO
152 case KVM_S390_INTERRUPT: {
153 struct kvm_s390_interrupt s390int;
154
155 r = -EFAULT;
156 if (copy_from_user(&s390int, argp, sizeof(s390int)))
157 break;
158 r = kvm_s390_inject_vm(kvm, &s390int);
159 break;
160 }
b0c632db 161 default:
367e1319 162 r = -ENOTTY;
b0c632db
HC
163 }
164
165 return r;
166}
167
d89f5eff 168int kvm_arch_init_vm(struct kvm *kvm)
b0c632db 169{
b0c632db
HC
170 int rc;
171 char debug_name[16];
172
173 rc = s390_enable_sie();
174 if (rc)
d89f5eff 175 goto out_err;
b0c632db
HC
176
177 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
178 if (!kvm->arch.sca)
d89f5eff 179 goto out_err;
b0c632db
HC
180
181 sprintf(debug_name, "kvm-%u", current->pid);
182
183 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
184 if (!kvm->arch.dbf)
185 goto out_nodbf;
186
ba5c1e9b
CO
187 spin_lock_init(&kvm->arch.float_int.lock);
188 INIT_LIST_HEAD(&kvm->arch.float_int.list);
189
b0c632db
HC
190 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
191 VM_EVENT(kvm, 3, "%s", "vm created");
192
598841ca
CO
193 kvm->arch.gmap = gmap_alloc(current->mm);
194 if (!kvm->arch.gmap)
195 goto out_nogmap;
196
d89f5eff 197 return 0;
598841ca
CO
198out_nogmap:
199 debug_unregister(kvm->arch.dbf);
b0c632db
HC
200out_nodbf:
201 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
202out_err:
203 return rc;
b0c632db
HC
204}
205
d329c035
CB
206void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
207{
208 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 209 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211 (__u64) vcpu->arch.sie_block)
212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213 smp_mb();
d329c035 214 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 215 kvm_vcpu_uninit(vcpu);
d329c035
CB
216 kfree(vcpu);
217}
218
219static void kvm_free_vcpus(struct kvm *kvm)
220{
221 unsigned int i;
988a2cae 222 struct kvm_vcpu *vcpu;
d329c035 223
988a2cae
GN
224 kvm_for_each_vcpu(i, vcpu, kvm)
225 kvm_arch_vcpu_destroy(vcpu);
226
227 mutex_lock(&kvm->lock);
228 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229 kvm->vcpus[i] = NULL;
230
231 atomic_set(&kvm->online_vcpus, 0);
232 mutex_unlock(&kvm->lock);
d329c035
CB
233}
234
ad8ba2cd
SY
235void kvm_arch_sync_events(struct kvm *kvm)
236{
237}
238
b0c632db
HC
239void kvm_arch_destroy_vm(struct kvm *kvm)
240{
d329c035 241 kvm_free_vcpus(kvm);
b0c632db 242 free_page((unsigned long)(kvm->arch.sca));
d329c035 243 debug_unregister(kvm->arch.dbf);
598841ca 244 gmap_free(kvm->arch.gmap);
b0c632db
HC
245}
246
247/* Section: vcpu related */
248int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
249{
598841ca 250 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
251 return 0;
252}
253
254void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
255{
6692cef3 256 /* Nothing todo */
b0c632db
HC
257}
258
259void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
260{
261 save_fp_regs(&vcpu->arch.host_fpregs);
262 save_access_regs(vcpu->arch.host_acrs);
263 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
264 restore_fp_regs(&vcpu->arch.guest_fpregs);
265 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
266}
267
268void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
269{
270 save_fp_regs(&vcpu->arch.guest_fpregs);
271 save_access_regs(vcpu->arch.guest_acrs);
272 restore_fp_regs(&vcpu->arch.host_fpregs);
273 restore_access_regs(vcpu->arch.host_acrs);
274}
275
276static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
277{
278 /* this equals initial cpu reset in pop, but we don't switch to ESA */
279 vcpu->arch.sie_block->gpsw.mask = 0UL;
280 vcpu->arch.sie_block->gpsw.addr = 0UL;
281 vcpu->arch.sie_block->prefix = 0UL;
282 vcpu->arch.sie_block->ihcpu = 0xffff;
283 vcpu->arch.sie_block->cputm = 0UL;
284 vcpu->arch.sie_block->ckc = 0UL;
285 vcpu->arch.sie_block->todpr = 0;
286 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
287 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
288 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
289 vcpu->arch.guest_fpregs.fpc = 0;
290 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
291 vcpu->arch.sie_block->gbea = 1;
292}
293
294int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
295{
598841ca 296 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
628eb9b8 297 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
fc34531d 298 vcpu->arch.sie_block->ecb = 6;
b0c632db 299 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 300 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
301 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
302 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
303 (unsigned long) vcpu);
304 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 305 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 306 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
307 return 0;
308}
309
310struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
311 unsigned int id)
312{
313 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
314 int rc = -ENOMEM;
315
316 if (!vcpu)
317 goto out_nomem;
318
180c12fb
CB
319 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
320 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
321
322 if (!vcpu->arch.sie_block)
323 goto out_free_cpu;
324
325 vcpu->arch.sie_block->icpua = id;
326 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
327 if (!kvm->arch.sca->cpu[id].sda)
328 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
329 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
330 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 331 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 332
ba5c1e9b
CO
333 spin_lock_init(&vcpu->arch.local_int.lock);
334 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 336 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
337 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 339 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 340 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 341
b0c632db
HC
342 rc = kvm_vcpu_init(vcpu, kvm, id);
343 if (rc)
7b06bf2f 344 goto out_free_sie_block;
b0c632db
HC
345 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346 vcpu->arch.sie_block);
347
b0c632db 348 return vcpu;
7b06bf2f
WY
349out_free_sie_block:
350 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
351out_free_cpu:
352 kfree(vcpu);
353out_nomem:
354 return ERR_PTR(rc);
355}
356
b0c632db
HC
357int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
358{
359 /* kvm common code refers to this, but never calls it */
360 BUG();
361 return 0;
362}
363
364static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
365{
b0c632db 366 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
367 return 0;
368}
369
370int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371{
b0c632db 372 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
373 return 0;
374}
375
376int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
377{
b0c632db 378 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
379 return 0;
380}
381
382int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
383 struct kvm_sregs *sregs)
384{
b0c632db
HC
385 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
386 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
b0c632db
HC
387 return 0;
388}
389
390int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
391 struct kvm_sregs *sregs)
392{
b0c632db
HC
393 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
394 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
395 return 0;
396}
397
398int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
399{
b0c632db
HC
400 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
401 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
b0c632db
HC
402 return 0;
403}
404
405int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
406{
b0c632db
HC
407 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
408 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
409 return 0;
410}
411
412static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
413{
414 int rc = 0;
415
b0c632db
HC
416 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
417 rc = -EBUSY;
d7b0b5eb
CO
418 else {
419 vcpu->run->psw_mask = psw.mask;
420 vcpu->run->psw_addr = psw.addr;
421 }
b0c632db
HC
422 return rc;
423}
424
425int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
426 struct kvm_translation *tr)
427{
428 return -EINVAL; /* not implemented yet */
429}
430
d0bfb940
JK
431int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
432 struct kvm_guest_debug *dbg)
b0c632db
HC
433{
434 return -EINVAL; /* not implemented yet */
435}
436
62d9f0db
MT
437int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
438 struct kvm_mp_state *mp_state)
439{
440 return -EINVAL; /* not implemented yet */
441}
442
443int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
444 struct kvm_mp_state *mp_state)
445{
446 return -EINVAL; /* not implemented yet */
447}
448
b0c632db
HC
449static void __vcpu_run(struct kvm_vcpu *vcpu)
450{
451 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
452
453 if (need_resched())
454 schedule();
455
71cde587
CB
456 if (test_thread_flag(TIF_MCCK_PENDING))
457 s390_handle_mcck();
458
0ff31867
CO
459 kvm_s390_deliver_pending_interrupts(vcpu);
460
b0c632db
HC
461 vcpu->arch.sie_block->icptcode = 0;
462 local_irq_disable();
463 kvm_guest_enter();
464 local_irq_enable();
598841ca 465 gmap_enable(vcpu->arch.gmap);
b0c632db
HC
466 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
467 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
468 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
469 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
470 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
471 }
b0c632db
HC
472 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
473 vcpu->arch.sie_block->icptcode);
598841ca 474 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
475 local_irq_disable();
476 kvm_guest_exit();
477 local_irq_enable();
478
479 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
480}
481
482int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
483{
8f2abe6a 484 int rc;
b0c632db
HC
485 sigset_t sigsaved;
486
9ace903d 487rerun_vcpu:
628eb9b8
CE
488 if (vcpu->requests)
489 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
490 kvm_s390_vcpu_set_mem(vcpu);
491
b0c632db
HC
492 if (vcpu->sigset_active)
493 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
494
495 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
496
ba5c1e9b
CO
497 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
498
8f2abe6a
CB
499 switch (kvm_run->exit_reason) {
500 case KVM_EXIT_S390_SIEIC:
8f2abe6a 501 case KVM_EXIT_UNKNOWN:
9ace903d 502 case KVM_EXIT_INTR:
8f2abe6a
CB
503 case KVM_EXIT_S390_RESET:
504 break;
505 default:
506 BUG();
507 }
508
d7b0b5eb
CO
509 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
510 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
511
dab4079d 512 might_fault();
8f2abe6a
CB
513
514 do {
515 __vcpu_run(vcpu);
8f2abe6a
CB
516 rc = kvm_handle_sie_intercept(vcpu);
517 } while (!signal_pending(current) && !rc);
518
9ace903d
CE
519 if (rc == SIE_INTERCEPT_RERUNVCPU)
520 goto rerun_vcpu;
521
b1d16c49
CE
522 if (signal_pending(current) && !rc) {
523 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 524 rc = -EINTR;
b1d16c49 525 }
8f2abe6a 526
b8e660b8 527 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
528 /* intercept cannot be handled in-kernel, prepare kvm-run */
529 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
530 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
531 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
532 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
533 rc = 0;
534 }
535
536 if (rc == -EREMOTE) {
537 /* intercept was handled, but userspace support is needed
538 * kvm_run has been prepared by the handler */
539 rc = 0;
540 }
b0c632db 541
d7b0b5eb
CO
542 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
543 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
544
b0c632db
HC
545 if (vcpu->sigset_active)
546 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
547
b0c632db 548 vcpu->stat.exit_userspace++;
7e8e6ab4 549 return rc;
b0c632db
HC
550}
551
552static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
553 unsigned long n, int prefix)
554{
555 if (prefix)
556 return copy_to_guest(vcpu, guestdest, from, n);
557 else
558 return copy_to_guest_absolute(vcpu, guestdest, from, n);
559}
560
561/*
562 * store status at address
563 * we use have two special cases:
564 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
565 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
566 */
971eb77f 567int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db
HC
568{
569 const unsigned char archmode = 1;
570 int prefix;
571
572 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
573 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
574 return -EFAULT;
575 addr = SAVE_AREA_BASE;
576 prefix = 0;
577 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
578 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
579 return -EFAULT;
580 addr = SAVE_AREA_BASE;
581 prefix = 1;
582 } else
583 prefix = 0;
584
f64ca217 585 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
586 vcpu->arch.guest_fpregs.fprs, 128, prefix))
587 return -EFAULT;
588
f64ca217 589 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
590 vcpu->arch.guest_gprs, 128, prefix))
591 return -EFAULT;
592
f64ca217 593 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
594 &vcpu->arch.sie_block->gpsw, 16, prefix))
595 return -EFAULT;
596
f64ca217 597 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
598 &vcpu->arch.sie_block->prefix, 4, prefix))
599 return -EFAULT;
600
601 if (__guestcopy(vcpu,
f64ca217 602 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
603 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
604 return -EFAULT;
605
f64ca217 606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
607 &vcpu->arch.sie_block->todpr, 4, prefix))
608 return -EFAULT;
609
f64ca217 610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
611 &vcpu->arch.sie_block->cputm, 8, prefix))
612 return -EFAULT;
613
f64ca217 614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
615 &vcpu->arch.sie_block->ckc, 8, prefix))
616 return -EFAULT;
617
f64ca217 618 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
619 &vcpu->arch.guest_acrs, 64, prefix))
620 return -EFAULT;
621
622 if (__guestcopy(vcpu,
f64ca217 623 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
624 &vcpu->arch.sie_block->gcr, 128, prefix))
625 return -EFAULT;
626 return 0;
627}
628
b0c632db
HC
629long kvm_arch_vcpu_ioctl(struct file *filp,
630 unsigned int ioctl, unsigned long arg)
631{
632 struct kvm_vcpu *vcpu = filp->private_data;
633 void __user *argp = (void __user *)arg;
bc923cc9 634 long r;
b0c632db 635
93736624
AK
636 switch (ioctl) {
637 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
638 struct kvm_s390_interrupt s390int;
639
93736624 640 r = -EFAULT;
ba5c1e9b 641 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
642 break;
643 r = kvm_s390_inject_vcpu(vcpu, &s390int);
644 break;
ba5c1e9b 645 }
b0c632db 646 case KVM_S390_STORE_STATUS:
bc923cc9
AK
647 r = kvm_s390_vcpu_store_status(vcpu, arg);
648 break;
b0c632db
HC
649 case KVM_S390_SET_INITIAL_PSW: {
650 psw_t psw;
651
bc923cc9 652 r = -EFAULT;
b0c632db 653 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
654 break;
655 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
656 break;
b0c632db
HC
657 }
658 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
659 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
660 break;
b0c632db 661 default:
bc923cc9 662 r = -EINVAL;
b0c632db 663 }
bc923cc9 664 return r;
b0c632db
HC
665}
666
667/* Section: memory related */
f7784b8e
MT
668int kvm_arch_prepare_memory_region(struct kvm *kvm,
669 struct kvm_memory_slot *memslot,
670 struct kvm_memory_slot old,
671 struct kvm_userspace_memory_region *mem,
672 int user_alloc)
b0c632db
HC
673{
674 /* A few sanity checks. We can have exactly one memory slot which has
675 to start at guest virtual zero and which has to be located at a
676 page boundary in userland and which has to end at a page boundary.
677 The memory in userland is ok to be fragmented into various different
678 vmas. It is okay to mmap() and munmap() stuff in this slot after
679 doing this call at any time */
680
628eb9b8 681 if (mem->slot)
b0c632db
HC
682 return -EINVAL;
683
684 if (mem->guest_phys_addr)
685 return -EINVAL;
686
598841ca 687 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
688 return -EINVAL;
689
598841ca 690 if (mem->memory_size & 0xffffful)
b0c632db
HC
691 return -EINVAL;
692
2668dab7
CO
693 if (!user_alloc)
694 return -EINVAL;
695
f7784b8e
MT
696 return 0;
697}
698
699void kvm_arch_commit_memory_region(struct kvm *kvm,
700 struct kvm_userspace_memory_region *mem,
701 struct kvm_memory_slot old,
702 int user_alloc)
703{
598841ca 704 int i, rc;
f7784b8e
MT
705 struct kvm_vcpu *vcpu;
706
598841ca
CO
707
708 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
709 mem->guest_phys_addr, mem->memory_size);
710 if (rc)
711 return;
712
628eb9b8 713 /* request update of sie control block for all available vcpus */
988a2cae
GN
714 kvm_for_each_vcpu(i, vcpu, kvm) {
715 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
716 continue;
717 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
2668dab7 718 }
598841ca 719 return;
b0c632db
HC
720}
721
34d4cb8f
MT
722void kvm_arch_flush_shadow(struct kvm *kvm)
723{
724}
725
b0c632db
HC
726static int __init kvm_s390_init(void)
727{
ef50f7ac 728 int ret;
0ee75bea 729 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
730 if (ret)
731 return ret;
732
733 /*
734 * guests can ask for up to 255+1 double words, we need a full page
25985edc 735 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
736 * only set facilities that are known to work in KVM.
737 */
c2f0e8c8 738 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
739 if (!facilities) {
740 kvm_exit();
741 return -ENOMEM;
742 }
14375bc4 743 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 744 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 745 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 746 return 0;
b0c632db
HC
747}
748
749static void __exit kvm_s390_exit(void)
750{
ef50f7ac 751 free_page((unsigned long) facilities);
b0c632db
HC
752 kvm_exit();
753}
754
755module_init(kvm_s390_init);
756module_exit(kvm_s390_exit);