2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/switch_to.h>
35 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
37 struct kvm_stats_debugfs_item debugfs_entries[] = {
38 { "userspace_handled", VCPU_STAT(exit_userspace) },
39 { "exit_null", VCPU_STAT(exit_null) },
40 { "exit_validity", VCPU_STAT(exit_validity) },
41 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
42 { "exit_external_request", VCPU_STAT(exit_external_request) },
43 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
44 { "exit_instruction", VCPU_STAT(exit_instruction) },
45 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
46 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
47 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
48 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
49 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
50 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
51 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
52 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
53 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
54 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
55 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
56 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
57 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
58 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
59 { "instruction_spx", VCPU_STAT(instruction_spx) },
60 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
61 { "instruction_stap", VCPU_STAT(instruction_stap) },
62 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
63 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
64 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
65 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
66 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
67 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
68 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
69 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
70 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
71 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
72 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
73 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
74 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
75 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
76 { "diagnose_10", VCPU_STAT(diagnose_10) },
77 { "diagnose_44", VCPU_STAT(diagnose_44) },
78 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
82 static unsigned long long *facilities;
84 /* Section: not file related */
85 int kvm_arch_hardware_enable(void *garbage)
87 /* every s390 is virtualization enabled ;-) */
91 void kvm_arch_hardware_disable(void *garbage)
95 int kvm_arch_hardware_setup(void)
100 void kvm_arch_hardware_unsetup(void)
104 void kvm_arch_check_processor_compat(void *rtn)
108 int kvm_arch_init(void *opaque)
113 void kvm_arch_exit(void)
117 /* Section: device related */
118 long kvm_arch_dev_ioctl(struct file *filp,
119 unsigned int ioctl, unsigned long arg)
121 if (ioctl == KVM_S390_ENABLE_SIE)
122 return s390_enable_sie();
126 int kvm_dev_ioctl_check_extension(long ext)
131 case KVM_CAP_S390_PSW:
132 case KVM_CAP_S390_GMAP:
133 case KVM_CAP_SYNC_MMU:
134 #ifdef CONFIG_KVM_S390_UCONTROL
135 case KVM_CAP_S390_UCONTROL:
137 case KVM_CAP_SYNC_REGS:
138 case KVM_CAP_ONE_REG:
141 case KVM_CAP_NR_VCPUS:
142 case KVM_CAP_MAX_VCPUS:
145 case KVM_CAP_S390_COW:
146 r = sclp_get_fac85() & 0x2;
154 /* Section: vm related */
156 * Get (and clear) the dirty memory log for a memory slot.
158 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
159 struct kvm_dirty_log *log)
164 long kvm_arch_vm_ioctl(struct file *filp,
165 unsigned int ioctl, unsigned long arg)
167 struct kvm *kvm = filp->private_data;
168 void __user *argp = (void __user *)arg;
172 case KVM_S390_INTERRUPT: {
173 struct kvm_s390_interrupt s390int;
176 if (copy_from_user(&s390int, argp, sizeof(s390int)))
178 r = kvm_s390_inject_vm(kvm, &s390int);
188 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
194 #ifdef CONFIG_KVM_S390_UCONTROL
195 if (type & ~KVM_VM_S390_UCONTROL)
197 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
204 rc = s390_enable_sie();
210 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
214 sprintf(debug_name, "kvm-%u", current->pid);
216 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
220 spin_lock_init(&kvm->arch.float_int.lock);
221 INIT_LIST_HEAD(&kvm->arch.float_int.list);
223 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
224 VM_EVENT(kvm, 3, "%s", "vm created");
226 if (type & KVM_VM_S390_UCONTROL) {
227 kvm->arch.gmap = NULL;
229 kvm->arch.gmap = gmap_alloc(current->mm);
235 debug_unregister(kvm->arch.dbf);
237 free_page((unsigned long)(kvm->arch.sca));
242 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
244 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
245 if (!kvm_is_ucontrol(vcpu->kvm)) {
246 clear_bit(63 - vcpu->vcpu_id,
247 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
248 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
249 (__u64) vcpu->arch.sie_block)
250 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
254 if (kvm_is_ucontrol(vcpu->kvm))
255 gmap_free(vcpu->arch.gmap);
257 free_page((unsigned long)(vcpu->arch.sie_block));
258 kvm_vcpu_uninit(vcpu);
262 static void kvm_free_vcpus(struct kvm *kvm)
265 struct kvm_vcpu *vcpu;
267 kvm_for_each_vcpu(i, vcpu, kvm)
268 kvm_arch_vcpu_destroy(vcpu);
270 mutex_lock(&kvm->lock);
271 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
272 kvm->vcpus[i] = NULL;
274 atomic_set(&kvm->online_vcpus, 0);
275 mutex_unlock(&kvm->lock);
278 void kvm_arch_sync_events(struct kvm *kvm)
282 void kvm_arch_destroy_vm(struct kvm *kvm)
285 free_page((unsigned long)(kvm->arch.sca));
286 debug_unregister(kvm->arch.dbf);
287 if (!kvm_is_ucontrol(kvm))
288 gmap_free(kvm->arch.gmap);
291 /* Section: vcpu related */
292 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
294 if (kvm_is_ucontrol(vcpu->kvm)) {
295 vcpu->arch.gmap = gmap_alloc(current->mm);
296 if (!vcpu->arch.gmap)
301 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
302 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
309 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
314 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
316 save_fp_regs(&vcpu->arch.host_fpregs);
317 save_access_regs(vcpu->arch.host_acrs);
318 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
319 restore_fp_regs(&vcpu->arch.guest_fpregs);
320 restore_access_regs(vcpu->run->s.regs.acrs);
321 gmap_enable(vcpu->arch.gmap);
322 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
325 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
327 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
328 gmap_disable(vcpu->arch.gmap);
329 save_fp_regs(&vcpu->arch.guest_fpregs);
330 save_access_regs(vcpu->run->s.regs.acrs);
331 restore_fp_regs(&vcpu->arch.host_fpregs);
332 restore_access_regs(vcpu->arch.host_acrs);
335 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
337 /* this equals initial cpu reset in pop, but we don't switch to ESA */
338 vcpu->arch.sie_block->gpsw.mask = 0UL;
339 vcpu->arch.sie_block->gpsw.addr = 0UL;
340 kvm_s390_set_prefix(vcpu, 0);
341 vcpu->arch.sie_block->cputm = 0UL;
342 vcpu->arch.sie_block->ckc = 0UL;
343 vcpu->arch.sie_block->todpr = 0;
344 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
345 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
346 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
347 vcpu->arch.guest_fpregs.fpc = 0;
348 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
349 vcpu->arch.sie_block->gbea = 1;
352 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
354 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
357 vcpu->arch.sie_block->ecb = 6;
358 vcpu->arch.sie_block->eca = 0xC1002001U;
359 vcpu->arch.sie_block->fac = (int) (long) facilities;
360 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
361 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
362 (unsigned long) vcpu);
363 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
364 get_cpu_id(&vcpu->arch.cpu_id);
365 vcpu->arch.cpu_id.version = 0xff;
369 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
372 struct kvm_vcpu *vcpu;
375 if (id >= KVM_MAX_VCPUS)
380 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
384 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
385 get_zeroed_page(GFP_KERNEL);
387 if (!vcpu->arch.sie_block)
390 vcpu->arch.sie_block->icpua = id;
391 if (!kvm_is_ucontrol(kvm)) {
392 if (!kvm->arch.sca) {
396 if (!kvm->arch.sca->cpu[id].sda)
397 kvm->arch.sca->cpu[id].sda =
398 (__u64) vcpu->arch.sie_block;
399 vcpu->arch.sie_block->scaoh =
400 (__u32)(((__u64)kvm->arch.sca) >> 32);
401 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
402 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
405 spin_lock_init(&vcpu->arch.local_int.lock);
406 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
407 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
408 spin_lock(&kvm->arch.float_int.lock);
409 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
410 init_waitqueue_head(&vcpu->arch.local_int.wq);
411 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
412 spin_unlock(&kvm->arch.float_int.lock);
414 rc = kvm_vcpu_init(vcpu, kvm, id);
416 goto out_free_sie_block;
417 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
418 vcpu->arch.sie_block);
422 free_page((unsigned long)(vcpu->arch.sie_block));
429 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
431 /* kvm common code refers to this, but never calls it */
436 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
438 /* kvm common code refers to this, but never calls it */
443 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
444 struct kvm_one_reg *reg)
449 case KVM_REG_S390_TODPR:
450 r = put_user(vcpu->arch.sie_block->todpr,
451 (u32 __user *)reg->addr);
453 case KVM_REG_S390_EPOCHDIFF:
454 r = put_user(vcpu->arch.sie_block->epoch,
455 (u64 __user *)reg->addr);
457 case KVM_REG_S390_CPU_TIMER:
458 r = put_user(vcpu->arch.sie_block->cputm,
459 (u64 __user *)reg->addr);
461 case KVM_REG_S390_CLOCK_COMP:
462 r = put_user(vcpu->arch.sie_block->ckc,
463 (u64 __user *)reg->addr);
472 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
473 struct kvm_one_reg *reg)
478 case KVM_REG_S390_TODPR:
479 r = get_user(vcpu->arch.sie_block->todpr,
480 (u32 __user *)reg->addr);
482 case KVM_REG_S390_EPOCHDIFF:
483 r = get_user(vcpu->arch.sie_block->epoch,
484 (u64 __user *)reg->addr);
486 case KVM_REG_S390_CPU_TIMER:
487 r = get_user(vcpu->arch.sie_block->cputm,
488 (u64 __user *)reg->addr);
490 case KVM_REG_S390_CLOCK_COMP:
491 r = get_user(vcpu->arch.sie_block->ckc,
492 (u64 __user *)reg->addr);
501 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
503 kvm_s390_vcpu_initial_reset(vcpu);
507 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
509 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
513 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
515 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
519 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
520 struct kvm_sregs *sregs)
522 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
523 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
524 restore_access_regs(vcpu->run->s.regs.acrs);
528 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
529 struct kvm_sregs *sregs)
531 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
532 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
536 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
538 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
539 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
540 restore_fp_regs(&vcpu->arch.guest_fpregs);
544 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
546 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
547 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
551 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
555 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
558 vcpu->run->psw_mask = psw.mask;
559 vcpu->run->psw_addr = psw.addr;
564 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
565 struct kvm_translation *tr)
567 return -EINVAL; /* not implemented yet */
570 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
571 struct kvm_guest_debug *dbg)
573 return -EINVAL; /* not implemented yet */
576 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
577 struct kvm_mp_state *mp_state)
579 return -EINVAL; /* not implemented yet */
582 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
583 struct kvm_mp_state *mp_state)
585 return -EINVAL; /* not implemented yet */
588 static int __vcpu_run(struct kvm_vcpu *vcpu)
592 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
597 if (test_thread_flag(TIF_MCCK_PENDING))
600 if (!kvm_is_ucontrol(vcpu->kvm))
601 kvm_s390_deliver_pending_interrupts(vcpu);
603 vcpu->arch.sie_block->icptcode = 0;
607 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
608 atomic_read(&vcpu->arch.sie_block->cpuflags));
609 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
611 if (kvm_is_ucontrol(vcpu->kvm)) {
612 rc = SIE_INTERCEPT_UCONTROL;
614 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
615 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
619 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
620 vcpu->arch.sie_block->icptcode);
625 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
629 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
635 if (vcpu->sigset_active)
636 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
638 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
640 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
642 switch (kvm_run->exit_reason) {
643 case KVM_EXIT_S390_SIEIC:
644 case KVM_EXIT_UNKNOWN:
646 case KVM_EXIT_S390_RESET:
647 case KVM_EXIT_S390_UCONTROL:
653 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
654 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
655 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
656 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
657 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
659 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
660 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
661 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
662 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
668 rc = __vcpu_run(vcpu);
671 if (kvm_is_ucontrol(vcpu->kvm))
674 rc = kvm_handle_sie_intercept(vcpu);
675 } while (!signal_pending(current) && !rc);
677 if (rc == SIE_INTERCEPT_RERUNVCPU)
680 if (signal_pending(current) && !rc) {
681 kvm_run->exit_reason = KVM_EXIT_INTR;
685 #ifdef CONFIG_KVM_S390_UCONTROL
686 if (rc == SIE_INTERCEPT_UCONTROL) {
687 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
688 kvm_run->s390_ucontrol.trans_exc_code =
689 current->thread.gmap_addr;
690 kvm_run->s390_ucontrol.pgm_code = 0x10;
695 if (rc == -EOPNOTSUPP) {
696 /* intercept cannot be handled in-kernel, prepare kvm-run */
697 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
698 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
699 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
700 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
704 if (rc == -EREMOTE) {
705 /* intercept was handled, but userspace support is needed
706 * kvm_run has been prepared by the handler */
710 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
711 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
712 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
713 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
715 if (vcpu->sigset_active)
716 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
718 vcpu->stat.exit_userspace++;
722 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
723 unsigned long n, int prefix)
726 return copy_to_guest(vcpu, guestdest, from, n);
728 return copy_to_guest_absolute(vcpu, guestdest, from, n);
732 * store status at address
733 * we use have two special cases:
734 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
735 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
737 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
739 unsigned char archmode = 1;
742 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
743 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
745 addr = SAVE_AREA_BASE;
747 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
748 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
750 addr = SAVE_AREA_BASE;
755 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
756 vcpu->arch.guest_fpregs.fprs, 128, prefix))
759 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
760 vcpu->run->s.regs.gprs, 128, prefix))
763 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
764 &vcpu->arch.sie_block->gpsw, 16, prefix))
767 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
768 &vcpu->arch.sie_block->prefix, 4, prefix))
771 if (__guestcopy(vcpu,
772 addr + offsetof(struct save_area, fp_ctrl_reg),
773 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
776 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
777 &vcpu->arch.sie_block->todpr, 4, prefix))
780 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
781 &vcpu->arch.sie_block->cputm, 8, prefix))
784 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
785 &vcpu->arch.sie_block->ckc, 8, prefix))
788 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
789 &vcpu->run->s.regs.acrs, 64, prefix))
792 if (__guestcopy(vcpu,
793 addr + offsetof(struct save_area, ctrl_regs),
794 &vcpu->arch.sie_block->gcr, 128, prefix))
799 long kvm_arch_vcpu_ioctl(struct file *filp,
800 unsigned int ioctl, unsigned long arg)
802 struct kvm_vcpu *vcpu = filp->private_data;
803 void __user *argp = (void __user *)arg;
807 case KVM_S390_INTERRUPT: {
808 struct kvm_s390_interrupt s390int;
811 if (copy_from_user(&s390int, argp, sizeof(s390int)))
813 r = kvm_s390_inject_vcpu(vcpu, &s390int);
816 case KVM_S390_STORE_STATUS:
817 r = kvm_s390_vcpu_store_status(vcpu, arg);
819 case KVM_S390_SET_INITIAL_PSW: {
823 if (copy_from_user(&psw, argp, sizeof(psw)))
825 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
828 case KVM_S390_INITIAL_RESET:
829 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
831 case KVM_SET_ONE_REG:
832 case KVM_GET_ONE_REG: {
833 struct kvm_one_reg reg;
835 if (copy_from_user(®, argp, sizeof(reg)))
837 if (ioctl == KVM_SET_ONE_REG)
838 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
840 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
843 #ifdef CONFIG_KVM_S390_UCONTROL
844 case KVM_S390_UCAS_MAP: {
845 struct kvm_s390_ucas_mapping ucasmap;
847 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
852 if (!kvm_is_ucontrol(vcpu->kvm)) {
857 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
858 ucasmap.vcpu_addr, ucasmap.length);
861 case KVM_S390_UCAS_UNMAP: {
862 struct kvm_s390_ucas_mapping ucasmap;
864 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
869 if (!kvm_is_ucontrol(vcpu->kvm)) {
874 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
879 case KVM_S390_VCPU_FAULT: {
880 r = gmap_fault(arg, vcpu->arch.gmap);
881 if (!IS_ERR_VALUE(r))
891 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
893 #ifdef CONFIG_KVM_S390_UCONTROL
894 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
895 && (kvm_is_ucontrol(vcpu->kvm))) {
896 vmf->page = virt_to_page(vcpu->arch.sie_block);
901 return VM_FAULT_SIGBUS;
904 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
905 struct kvm_memory_slot *dont)
909 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
914 /* Section: memory related */
915 int kvm_arch_prepare_memory_region(struct kvm *kvm,
916 struct kvm_memory_slot *memslot,
917 struct kvm_memory_slot old,
918 struct kvm_userspace_memory_region *mem,
921 /* A few sanity checks. We can have exactly one memory slot which has
922 to start at guest virtual zero and which has to be located at a
923 page boundary in userland and which has to end at a page boundary.
924 The memory in userland is ok to be fragmented into various different
925 vmas. It is okay to mmap() and munmap() stuff in this slot after
926 doing this call at any time */
931 if (mem->guest_phys_addr)
934 if (mem->userspace_addr & 0xffffful)
937 if (mem->memory_size & 0xffffful)
946 void kvm_arch_commit_memory_region(struct kvm *kvm,
947 struct kvm_userspace_memory_region *mem,
948 struct kvm_memory_slot old,
954 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
955 mem->guest_phys_addr, mem->memory_size);
957 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
961 void kvm_arch_flush_shadow(struct kvm *kvm)
965 static int __init kvm_s390_init(void)
968 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
973 * guests can ask for up to 255+1 double words, we need a full page
974 * to hold the maximum amount of facilities. On the other hand, we
975 * only set facilities that are known to work in KVM.
977 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
982 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
983 facilities[0] &= 0xff00fff3f47c0000ULL;
984 facilities[1] &= 0x201c000000000000ULL;
988 static void __exit kvm_s390_exit(void)
990 free_page((unsigned long) facilities);
994 module_init(kvm_s390_init);
995 module_exit(kvm_s390_exit);