[S390] kvm guest address space mapping
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 65 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0
CB
66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
69 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
70 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
71 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 72 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
73 { NULL }
74};
75
ef50f7ac 76static unsigned long long *facilities;
b0c632db
HC
77
78/* Section: not file related */
10474ae8 79int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
80{
81 /* every s390 is virtualization enabled ;-) */
10474ae8 82 return 0;
b0c632db
HC
83}
84
85void kvm_arch_hardware_disable(void *garbage)
86{
87}
88
b0c632db
HC
89int kvm_arch_hardware_setup(void)
90{
91 return 0;
92}
93
94void kvm_arch_hardware_unsetup(void)
95{
96}
97
98void kvm_arch_check_processor_compat(void *rtn)
99{
100}
101
102int kvm_arch_init(void *opaque)
103{
104 return 0;
105}
106
107void kvm_arch_exit(void)
108{
109}
110
111/* Section: device related */
112long kvm_arch_dev_ioctl(struct file *filp,
113 unsigned int ioctl, unsigned long arg)
114{
115 if (ioctl == KVM_S390_ENABLE_SIE)
116 return s390_enable_sie();
117 return -EINVAL;
118}
119
120int kvm_dev_ioctl_check_extension(long ext)
121{
d7b0b5eb
CO
122 int r;
123
2bd0ac4e 124 switch (ext) {
d7b0b5eb
CO
125 case KVM_CAP_S390_PSW:
126 r = 1;
127 break;
2bd0ac4e 128 default:
d7b0b5eb 129 r = 0;
2bd0ac4e 130 }
d7b0b5eb 131 return r;
b0c632db
HC
132}
133
134/* Section: vm related */
135/*
136 * Get (and clear) the dirty memory log for a memory slot.
137 */
138int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
139 struct kvm_dirty_log *log)
140{
141 return 0;
142}
143
144long kvm_arch_vm_ioctl(struct file *filp,
145 unsigned int ioctl, unsigned long arg)
146{
147 struct kvm *kvm = filp->private_data;
148 void __user *argp = (void __user *)arg;
149 int r;
150
151 switch (ioctl) {
ba5c1e9b
CO
152 case KVM_S390_INTERRUPT: {
153 struct kvm_s390_interrupt s390int;
154
155 r = -EFAULT;
156 if (copy_from_user(&s390int, argp, sizeof(s390int)))
157 break;
158 r = kvm_s390_inject_vm(kvm, &s390int);
159 break;
160 }
b0c632db 161 default:
367e1319 162 r = -ENOTTY;
b0c632db
HC
163 }
164
165 return r;
166}
167
d89f5eff 168int kvm_arch_init_vm(struct kvm *kvm)
b0c632db 169{
b0c632db
HC
170 int rc;
171 char debug_name[16];
172
173 rc = s390_enable_sie();
174 if (rc)
d89f5eff 175 goto out_err;
b0c632db
HC
176
177 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
178 if (!kvm->arch.sca)
d89f5eff 179 goto out_err;
b0c632db
HC
180
181 sprintf(debug_name, "kvm-%u", current->pid);
182
183 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
184 if (!kvm->arch.dbf)
185 goto out_nodbf;
186
ba5c1e9b
CO
187 spin_lock_init(&kvm->arch.float_int.lock);
188 INIT_LIST_HEAD(&kvm->arch.float_int.list);
189
b0c632db
HC
190 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
191 VM_EVENT(kvm, 3, "%s", "vm created");
192
d89f5eff 193 return 0;
b0c632db
HC
194out_nodbf:
195 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
196out_err:
197 return rc;
b0c632db
HC
198}
199
d329c035
CB
200void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
201{
202 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 203 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
204 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
205 (__u64) vcpu->arch.sie_block)
206 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
207 smp_mb();
d329c035 208 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 209 kvm_vcpu_uninit(vcpu);
d329c035
CB
210 kfree(vcpu);
211}
212
213static void kvm_free_vcpus(struct kvm *kvm)
214{
215 unsigned int i;
988a2cae 216 struct kvm_vcpu *vcpu;
d329c035 217
988a2cae
GN
218 kvm_for_each_vcpu(i, vcpu, kvm)
219 kvm_arch_vcpu_destroy(vcpu);
220
221 mutex_lock(&kvm->lock);
222 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
223 kvm->vcpus[i] = NULL;
224
225 atomic_set(&kvm->online_vcpus, 0);
226 mutex_unlock(&kvm->lock);
d329c035
CB
227}
228
ad8ba2cd
SY
229void kvm_arch_sync_events(struct kvm *kvm)
230{
231}
232
b0c632db
HC
233void kvm_arch_destroy_vm(struct kvm *kvm)
234{
d329c035 235 kvm_free_vcpus(kvm);
b0c632db 236 free_page((unsigned long)(kvm->arch.sca));
d329c035 237 debug_unregister(kvm->arch.dbf);
b0c632db
HC
238}
239
240/* Section: vcpu related */
241int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
242{
243 return 0;
244}
245
246void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
247{
6692cef3 248 /* Nothing todo */
b0c632db
HC
249}
250
251void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
252{
253 save_fp_regs(&vcpu->arch.host_fpregs);
254 save_access_regs(vcpu->arch.host_acrs);
255 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
256 restore_fp_regs(&vcpu->arch.guest_fpregs);
257 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
258}
259
260void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
261{
262 save_fp_regs(&vcpu->arch.guest_fpregs);
263 save_access_regs(vcpu->arch.guest_acrs);
264 restore_fp_regs(&vcpu->arch.host_fpregs);
265 restore_access_regs(vcpu->arch.host_acrs);
266}
267
268static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
269{
270 /* this equals initial cpu reset in pop, but we don't switch to ESA */
271 vcpu->arch.sie_block->gpsw.mask = 0UL;
272 vcpu->arch.sie_block->gpsw.addr = 0UL;
273 vcpu->arch.sie_block->prefix = 0UL;
274 vcpu->arch.sie_block->ihcpu = 0xffff;
275 vcpu->arch.sie_block->cputm = 0UL;
276 vcpu->arch.sie_block->ckc = 0UL;
277 vcpu->arch.sie_block->todpr = 0;
278 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
279 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
280 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
281 vcpu->arch.guest_fpregs.fpc = 0;
282 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
283 vcpu->arch.sie_block->gbea = 1;
284}
285
286int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
287{
288 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
628eb9b8 289 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
fc34531d 290 vcpu->arch.sie_block->ecb = 6;
b0c632db 291 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 292 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
293 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
294 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
295 (unsigned long) vcpu);
296 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 297 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 298 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
299 return 0;
300}
301
302struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
303 unsigned int id)
304{
305 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
306 int rc = -ENOMEM;
307
308 if (!vcpu)
309 goto out_nomem;
310
180c12fb
CB
311 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
312 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
313
314 if (!vcpu->arch.sie_block)
315 goto out_free_cpu;
316
317 vcpu->arch.sie_block->icpua = id;
318 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
319 if (!kvm->arch.sca->cpu[id].sda)
320 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
321 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
322 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 323 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 324
ba5c1e9b
CO
325 spin_lock_init(&vcpu->arch.local_int.lock);
326 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
327 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 328 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
329 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
330 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 331 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 332 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 333
b0c632db
HC
334 rc = kvm_vcpu_init(vcpu, kvm, id);
335 if (rc)
7b06bf2f 336 goto out_free_sie_block;
b0c632db
HC
337 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
338 vcpu->arch.sie_block);
339
b0c632db 340 return vcpu;
7b06bf2f
WY
341out_free_sie_block:
342 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
343out_free_cpu:
344 kfree(vcpu);
345out_nomem:
346 return ERR_PTR(rc);
347}
348
b0c632db
HC
349int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
350{
351 /* kvm common code refers to this, but never calls it */
352 BUG();
353 return 0;
354}
355
356static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
357{
b0c632db 358 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
359 return 0;
360}
361
362int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
363{
b0c632db 364 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
365 return 0;
366}
367
368int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
369{
b0c632db 370 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
371 return 0;
372}
373
374int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
375 struct kvm_sregs *sregs)
376{
b0c632db
HC
377 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
378 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
b0c632db
HC
379 return 0;
380}
381
382int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
383 struct kvm_sregs *sregs)
384{
b0c632db
HC
385 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
386 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
387 return 0;
388}
389
390int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
391{
b0c632db
HC
392 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
393 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
b0c632db
HC
394 return 0;
395}
396
397int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
398{
b0c632db
HC
399 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
400 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
401 return 0;
402}
403
404static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
405{
406 int rc = 0;
407
b0c632db
HC
408 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
409 rc = -EBUSY;
d7b0b5eb
CO
410 else {
411 vcpu->run->psw_mask = psw.mask;
412 vcpu->run->psw_addr = psw.addr;
413 }
b0c632db
HC
414 return rc;
415}
416
417int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
418 struct kvm_translation *tr)
419{
420 return -EINVAL; /* not implemented yet */
421}
422
d0bfb940
JK
423int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
424 struct kvm_guest_debug *dbg)
b0c632db
HC
425{
426 return -EINVAL; /* not implemented yet */
427}
428
62d9f0db
MT
429int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
430 struct kvm_mp_state *mp_state)
431{
432 return -EINVAL; /* not implemented yet */
433}
434
435int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
436 struct kvm_mp_state *mp_state)
437{
438 return -EINVAL; /* not implemented yet */
439}
440
b0c632db
HC
441static void __vcpu_run(struct kvm_vcpu *vcpu)
442{
443 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
444
445 if (need_resched())
446 schedule();
447
71cde587
CB
448 if (test_thread_flag(TIF_MCCK_PENDING))
449 s390_handle_mcck();
450
0ff31867
CO
451 kvm_s390_deliver_pending_interrupts(vcpu);
452
b0c632db
HC
453 vcpu->arch.sie_block->icptcode = 0;
454 local_irq_disable();
455 kvm_guest_enter();
456 local_irq_enable();
457 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
458 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
459 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
460 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
461 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
462 }
b0c632db
HC
463 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
464 vcpu->arch.sie_block->icptcode);
465 local_irq_disable();
466 kvm_guest_exit();
467 local_irq_enable();
468
469 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
470}
471
472int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
473{
8f2abe6a 474 int rc;
b0c632db
HC
475 sigset_t sigsaved;
476
9ace903d 477rerun_vcpu:
628eb9b8
CE
478 if (vcpu->requests)
479 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
480 kvm_s390_vcpu_set_mem(vcpu);
481
51e4d5ab 482 /* verify, that memory has been registered */
628eb9b8 483 if (!vcpu->arch.sie_block->gmslm) {
51e4d5ab 484 vcpu_put(vcpu);
628eb9b8 485 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
51e4d5ab
CO
486 return -EINVAL;
487 }
488
b0c632db
HC
489 if (vcpu->sigset_active)
490 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
491
492 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
493
ba5c1e9b
CO
494 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
495
8f2abe6a
CB
496 switch (kvm_run->exit_reason) {
497 case KVM_EXIT_S390_SIEIC:
8f2abe6a 498 case KVM_EXIT_UNKNOWN:
9ace903d 499 case KVM_EXIT_INTR:
8f2abe6a
CB
500 case KVM_EXIT_S390_RESET:
501 break;
502 default:
503 BUG();
504 }
505
d7b0b5eb
CO
506 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
507 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
508
dab4079d 509 might_fault();
8f2abe6a
CB
510
511 do {
512 __vcpu_run(vcpu);
8f2abe6a
CB
513 rc = kvm_handle_sie_intercept(vcpu);
514 } while (!signal_pending(current) && !rc);
515
9ace903d
CE
516 if (rc == SIE_INTERCEPT_RERUNVCPU)
517 goto rerun_vcpu;
518
b1d16c49
CE
519 if (signal_pending(current) && !rc) {
520 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 521 rc = -EINTR;
b1d16c49 522 }
8f2abe6a 523
b8e660b8 524 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
525 /* intercept cannot be handled in-kernel, prepare kvm-run */
526 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
527 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
528 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
529 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
530 rc = 0;
531 }
532
533 if (rc == -EREMOTE) {
534 /* intercept was handled, but userspace support is needed
535 * kvm_run has been prepared by the handler */
536 rc = 0;
537 }
b0c632db 538
d7b0b5eb
CO
539 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
540 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
541
b0c632db
HC
542 if (vcpu->sigset_active)
543 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
544
b0c632db 545 vcpu->stat.exit_userspace++;
7e8e6ab4 546 return rc;
b0c632db
HC
547}
548
549static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
550 unsigned long n, int prefix)
551{
552 if (prefix)
553 return copy_to_guest(vcpu, guestdest, from, n);
554 else
555 return copy_to_guest_absolute(vcpu, guestdest, from, n);
556}
557
558/*
559 * store status at address
560 * we use have two special cases:
561 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
562 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
563 */
971eb77f 564int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db
HC
565{
566 const unsigned char archmode = 1;
567 int prefix;
568
569 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
570 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
571 return -EFAULT;
572 addr = SAVE_AREA_BASE;
573 prefix = 0;
574 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
575 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
576 return -EFAULT;
577 addr = SAVE_AREA_BASE;
578 prefix = 1;
579 } else
580 prefix = 0;
581
f64ca217 582 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
583 vcpu->arch.guest_fpregs.fprs, 128, prefix))
584 return -EFAULT;
585
f64ca217 586 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
587 vcpu->arch.guest_gprs, 128, prefix))
588 return -EFAULT;
589
f64ca217 590 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
591 &vcpu->arch.sie_block->gpsw, 16, prefix))
592 return -EFAULT;
593
f64ca217 594 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
595 &vcpu->arch.sie_block->prefix, 4, prefix))
596 return -EFAULT;
597
598 if (__guestcopy(vcpu,
f64ca217 599 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
600 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
601 return -EFAULT;
602
f64ca217 603 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
604 &vcpu->arch.sie_block->todpr, 4, prefix))
605 return -EFAULT;
606
f64ca217 607 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
608 &vcpu->arch.sie_block->cputm, 8, prefix))
609 return -EFAULT;
610
f64ca217 611 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
612 &vcpu->arch.sie_block->ckc, 8, prefix))
613 return -EFAULT;
614
f64ca217 615 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
616 &vcpu->arch.guest_acrs, 64, prefix))
617 return -EFAULT;
618
619 if (__guestcopy(vcpu,
f64ca217 620 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
621 &vcpu->arch.sie_block->gcr, 128, prefix))
622 return -EFAULT;
623 return 0;
624}
625
b0c632db
HC
626long kvm_arch_vcpu_ioctl(struct file *filp,
627 unsigned int ioctl, unsigned long arg)
628{
629 struct kvm_vcpu *vcpu = filp->private_data;
630 void __user *argp = (void __user *)arg;
bc923cc9 631 long r;
b0c632db 632
93736624
AK
633 switch (ioctl) {
634 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
635 struct kvm_s390_interrupt s390int;
636
93736624 637 r = -EFAULT;
ba5c1e9b 638 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
639 break;
640 r = kvm_s390_inject_vcpu(vcpu, &s390int);
641 break;
ba5c1e9b 642 }
b0c632db 643 case KVM_S390_STORE_STATUS:
bc923cc9
AK
644 r = kvm_s390_vcpu_store_status(vcpu, arg);
645 break;
b0c632db
HC
646 case KVM_S390_SET_INITIAL_PSW: {
647 psw_t psw;
648
bc923cc9 649 r = -EFAULT;
b0c632db 650 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
651 break;
652 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
653 break;
b0c632db
HC
654 }
655 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
656 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
657 break;
b0c632db 658 default:
bc923cc9 659 r = -EINVAL;
b0c632db 660 }
bc923cc9 661 return r;
b0c632db
HC
662}
663
664/* Section: memory related */
f7784b8e
MT
665int kvm_arch_prepare_memory_region(struct kvm *kvm,
666 struct kvm_memory_slot *memslot,
667 struct kvm_memory_slot old,
668 struct kvm_userspace_memory_region *mem,
669 int user_alloc)
b0c632db
HC
670{
671 /* A few sanity checks. We can have exactly one memory slot which has
672 to start at guest virtual zero and which has to be located at a
673 page boundary in userland and which has to end at a page boundary.
674 The memory in userland is ok to be fragmented into various different
675 vmas. It is okay to mmap() and munmap() stuff in this slot after
676 doing this call at any time */
677
628eb9b8 678 if (mem->slot)
b0c632db
HC
679 return -EINVAL;
680
681 if (mem->guest_phys_addr)
682 return -EINVAL;
683
684 if (mem->userspace_addr & (PAGE_SIZE - 1))
685 return -EINVAL;
686
687 if (mem->memory_size & (PAGE_SIZE - 1))
688 return -EINVAL;
689
2668dab7
CO
690 if (!user_alloc)
691 return -EINVAL;
692
f7784b8e
MT
693 return 0;
694}
695
696void kvm_arch_commit_memory_region(struct kvm *kvm,
697 struct kvm_userspace_memory_region *mem,
698 struct kvm_memory_slot old,
699 int user_alloc)
700{
701 int i;
702 struct kvm_vcpu *vcpu;
703
628eb9b8 704 /* request update of sie control block for all available vcpus */
988a2cae
GN
705 kvm_for_each_vcpu(i, vcpu, kvm) {
706 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
707 continue;
708 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
2668dab7 709 }
b0c632db
HC
710}
711
34d4cb8f
MT
712void kvm_arch_flush_shadow(struct kvm *kvm)
713{
714}
715
b0c632db
HC
716static int __init kvm_s390_init(void)
717{
ef50f7ac 718 int ret;
0ee75bea 719 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
720 if (ret)
721 return ret;
722
723 /*
724 * guests can ask for up to 255+1 double words, we need a full page
25985edc 725 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
726 * only set facilities that are known to work in KVM.
727 */
c2f0e8c8 728 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
729 if (!facilities) {
730 kvm_exit();
731 return -ENOMEM;
732 }
14375bc4 733 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 734 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 735 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 736 return 0;
b0c632db
HC
737}
738
739static void __exit kvm_s390_exit(void)
740{
ef50f7ac 741 free_page((unsigned long) facilities);
b0c632db
HC
742 kvm_exit();
743}
744
745module_init(kvm_s390_init);
746module_exit(kvm_s390_exit);