2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <linux/vmalloc.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/lowcore.h>
32 #include <asm/pgtable.h>
34 #include <asm/switch_to.h>
40 #define KMSG_COMPONENT "kvm-s390"
42 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 #define CREATE_TRACE_POINTS
46 #include "trace-s390.h"
48 #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
50 #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
53 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
55 struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
57 { "exit_null", VCPU_STAT(exit_null) },
58 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
62 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
67 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
68 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
69 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
70 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
72 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
73 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
74 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
81 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
82 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
83 { "instruction_spx", VCPU_STAT(instruction_spx) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
85 { "instruction_stap", VCPU_STAT(instruction_stap) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
87 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
88 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
90 { "instruction_essa", VCPU_STAT(instruction_essa) },
91 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
93 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
94 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
95 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
96 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
97 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
98 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
110 { "diagnose_10", VCPU_STAT(diagnose_10) },
111 { "diagnose_44", VCPU_STAT(diagnose_44) },
112 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
113 { "diagnose_258", VCPU_STAT(diagnose_258) },
114 { "diagnose_308", VCPU_STAT(diagnose_308) },
115 { "diagnose_500", VCPU_STAT(diagnose_500) },
119 /* upper facilities limit for kvm */
120 unsigned long kvm_s390_fac_list_mask[] = {
121 0xffe6fffbfcfdfc40UL,
122 0x005e800000000000UL,
125 unsigned long kvm_s390_fac_list_mask_size(void)
127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask);
131 static struct gmap_notifier gmap_notifier;
132 debug_info_t *kvm_s390_dbf;
134 /* Section: not file related */
135 int kvm_arch_hardware_enable(void)
137 /* every s390 is virtualization enabled ;-) */
141 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
149 static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
153 struct kvm_vcpu *vcpu;
155 unsigned long long *delta = v;
157 list_for_each_entry(kvm, &vm_list, vm_list) {
158 kvm->arch.epoch -= *delta;
159 kvm_for_each_vcpu(i, vcpu, kvm) {
160 vcpu->arch.sie_block->epoch -= *delta;
166 static struct notifier_block kvm_clock_notifier = {
167 .notifier_call = kvm_clock_sync,
170 int kvm_arch_hardware_setup(void)
172 gmap_notifier.notifier_call = kvm_gmap_notifier;
173 gmap_register_ipte_notifier(&gmap_notifier);
174 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
175 &kvm_clock_notifier);
179 void kvm_arch_hardware_unsetup(void)
181 gmap_unregister_ipte_notifier(&gmap_notifier);
182 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
183 &kvm_clock_notifier);
186 int kvm_arch_init(void *opaque)
188 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
192 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
193 debug_unregister(kvm_s390_dbf);
197 /* Register floating interrupt controller interface. */
198 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
201 void kvm_arch_exit(void)
203 debug_unregister(kvm_s390_dbf);
206 /* Section: device related */
207 long kvm_arch_dev_ioctl(struct file *filp,
208 unsigned int ioctl, unsigned long arg)
210 if (ioctl == KVM_S390_ENABLE_SIE)
211 return s390_enable_sie();
215 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
220 case KVM_CAP_S390_PSW:
221 case KVM_CAP_S390_GMAP:
222 case KVM_CAP_SYNC_MMU:
223 #ifdef CONFIG_KVM_S390_UCONTROL
224 case KVM_CAP_S390_UCONTROL:
226 case KVM_CAP_ASYNC_PF:
227 case KVM_CAP_SYNC_REGS:
228 case KVM_CAP_ONE_REG:
229 case KVM_CAP_ENABLE_CAP:
230 case KVM_CAP_S390_CSS_SUPPORT:
231 case KVM_CAP_IOEVENTFD:
232 case KVM_CAP_DEVICE_CTRL:
233 case KVM_CAP_ENABLE_CAP_VM:
234 case KVM_CAP_S390_IRQCHIP:
235 case KVM_CAP_VM_ATTRIBUTES:
236 case KVM_CAP_MP_STATE:
237 case KVM_CAP_S390_INJECT_IRQ:
238 case KVM_CAP_S390_USER_SIGP:
239 case KVM_CAP_S390_USER_STSI:
240 case KVM_CAP_S390_SKEYS:
241 case KVM_CAP_S390_IRQ_STATE:
244 case KVM_CAP_S390_MEM_OP:
247 case KVM_CAP_NR_VCPUS:
248 case KVM_CAP_MAX_VCPUS:
249 r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
250 : KVM_S390_BSCA_CPU_SLOTS;
252 case KVM_CAP_NR_MEMSLOTS:
253 r = KVM_USER_MEM_SLOTS;
255 case KVM_CAP_S390_COW:
256 r = MACHINE_HAS_ESOP;
258 case KVM_CAP_S390_VECTOR_REGISTERS:
261 case KVM_CAP_S390_RI:
262 r = test_facility(64);
270 static void kvm_s390_sync_dirty_log(struct kvm *kvm,
271 struct kvm_memory_slot *memslot)
273 gfn_t cur_gfn, last_gfn;
274 unsigned long address;
275 struct gmap *gmap = kvm->arch.gmap;
277 down_read(&gmap->mm->mmap_sem);
278 /* Loop over all guest pages */
279 last_gfn = memslot->base_gfn + memslot->npages;
280 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
281 address = gfn_to_hva_memslot(memslot, cur_gfn);
283 if (gmap_test_and_clear_dirty(address, gmap))
284 mark_page_dirty(kvm, cur_gfn);
286 up_read(&gmap->mm->mmap_sem);
289 /* Section: vm related */
290 static void sca_del_vcpu(struct kvm_vcpu *vcpu);
293 * Get (and clear) the dirty memory log for a memory slot.
295 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
296 struct kvm_dirty_log *log)
300 struct kvm_memslots *slots;
301 struct kvm_memory_slot *memslot;
304 mutex_lock(&kvm->slots_lock);
307 if (log->slot >= KVM_USER_MEM_SLOTS)
310 slots = kvm_memslots(kvm);
311 memslot = id_to_memslot(slots, log->slot);
313 if (!memslot->dirty_bitmap)
316 kvm_s390_sync_dirty_log(kvm, memslot);
317 r = kvm_get_dirty_log(kvm, log, &is_dirty);
321 /* Clear the dirty log */
323 n = kvm_dirty_bitmap_bytes(memslot);
324 memset(memslot->dirty_bitmap, 0, n);
328 mutex_unlock(&kvm->slots_lock);
332 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
340 case KVM_CAP_S390_IRQCHIP:
341 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
342 kvm->arch.use_irqchip = 1;
345 case KVM_CAP_S390_USER_SIGP:
346 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
347 kvm->arch.user_sigp = 1;
350 case KVM_CAP_S390_VECTOR_REGISTERS:
351 mutex_lock(&kvm->lock);
352 if (atomic_read(&kvm->online_vcpus)) {
354 } else if (MACHINE_HAS_VX) {
355 set_kvm_facility(kvm->arch.model.fac->mask, 129);
356 set_kvm_facility(kvm->arch.model.fac->list, 129);
360 mutex_unlock(&kvm->lock);
361 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
362 r ? "(not available)" : "(success)");
364 case KVM_CAP_S390_RI:
366 mutex_lock(&kvm->lock);
367 if (atomic_read(&kvm->online_vcpus)) {
369 } else if (test_facility(64)) {
370 set_kvm_facility(kvm->arch.model.fac->mask, 64);
371 set_kvm_facility(kvm->arch.model.fac->list, 64);
374 mutex_unlock(&kvm->lock);
375 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
376 r ? "(not available)" : "(success)");
378 case KVM_CAP_S390_USER_STSI:
379 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
380 kvm->arch.user_stsi = 1;
390 static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
394 switch (attr->attr) {
395 case KVM_S390_VM_MEM_LIMIT_SIZE:
397 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
398 kvm->arch.mem_limit);
399 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
409 static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
413 switch (attr->attr) {
414 case KVM_S390_VM_MEM_ENABLE_CMMA:
415 /* enable CMMA only for z10 and later (EDAT_1) */
417 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
421 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
422 mutex_lock(&kvm->lock);
423 if (atomic_read(&kvm->online_vcpus) == 0) {
424 kvm->arch.use_cmma = 1;
427 mutex_unlock(&kvm->lock);
429 case KVM_S390_VM_MEM_CLR_CMMA:
431 if (!kvm->arch.use_cmma)
434 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
435 mutex_lock(&kvm->lock);
436 idx = srcu_read_lock(&kvm->srcu);
437 s390_reset_cmma(kvm->arch.gmap->mm);
438 srcu_read_unlock(&kvm->srcu, idx);
439 mutex_unlock(&kvm->lock);
442 case KVM_S390_VM_MEM_LIMIT_SIZE: {
443 unsigned long new_limit;
445 if (kvm_is_ucontrol(kvm))
448 if (get_user(new_limit, (u64 __user *)attr->addr))
451 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
452 new_limit > kvm->arch.mem_limit)
458 /* gmap_alloc takes last usable address */
459 if (new_limit != KVM_S390_NO_MEM_LIMIT)
463 mutex_lock(&kvm->lock);
464 if (atomic_read(&kvm->online_vcpus) == 0) {
465 /* gmap_alloc will round the limit up */
466 struct gmap *new = gmap_alloc(current->mm, new_limit);
471 gmap_free(kvm->arch.gmap);
473 kvm->arch.gmap = new;
477 mutex_unlock(&kvm->lock);
478 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
479 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
480 (void *) kvm->arch.gmap->asce);
490 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
492 static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
494 struct kvm_vcpu *vcpu;
497 if (!test_kvm_facility(kvm, 76))
500 mutex_lock(&kvm->lock);
501 switch (attr->attr) {
502 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
504 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
505 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
506 kvm->arch.crypto.aes_kw = 1;
507 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
509 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
511 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
512 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
513 kvm->arch.crypto.dea_kw = 1;
514 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
516 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
517 kvm->arch.crypto.aes_kw = 0;
518 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
519 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
520 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
522 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
523 kvm->arch.crypto.dea_kw = 0;
524 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
525 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
526 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
529 mutex_unlock(&kvm->lock);
533 kvm_for_each_vcpu(i, vcpu, kvm) {
534 kvm_s390_vcpu_crypto_setup(vcpu);
537 mutex_unlock(&kvm->lock);
541 static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
545 if (copy_from_user(>od_high, (void __user *)attr->addr,
551 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
556 static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
560 if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
563 kvm_s390_set_tod_clock(kvm, gtod);
564 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
568 static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
575 switch (attr->attr) {
576 case KVM_S390_VM_TOD_HIGH:
577 ret = kvm_s390_set_tod_high(kvm, attr);
579 case KVM_S390_VM_TOD_LOW:
580 ret = kvm_s390_set_tod_low(kvm, attr);
589 static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
593 if (copy_to_user((void __user *)attr->addr, >od_high,
596 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
601 static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
605 gtod = kvm_s390_get_tod_clock_fast(kvm);
606 if (copy_to_user((void __user *)attr->addr, >od, sizeof(gtod)))
608 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
613 static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
620 switch (attr->attr) {
621 case KVM_S390_VM_TOD_HIGH:
622 ret = kvm_s390_get_tod_high(kvm, attr);
624 case KVM_S390_VM_TOD_LOW:
625 ret = kvm_s390_get_tod_low(kvm, attr);
634 static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
636 struct kvm_s390_vm_cpu_processor *proc;
639 mutex_lock(&kvm->lock);
640 if (atomic_read(&kvm->online_vcpus)) {
644 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
649 if (!copy_from_user(proc, (void __user *)attr->addr,
651 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
652 sizeof(struct cpuid));
653 kvm->arch.model.ibc = proc->ibc;
654 memcpy(kvm->arch.model.fac->list, proc->fac_list,
655 S390_ARCH_FAC_LIST_SIZE_BYTE);
660 mutex_unlock(&kvm->lock);
664 static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
668 switch (attr->attr) {
669 case KVM_S390_VM_CPU_PROCESSOR:
670 ret = kvm_s390_set_processor(kvm, attr);
676 static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
678 struct kvm_s390_vm_cpu_processor *proc;
681 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
686 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
687 proc->ibc = kvm->arch.model.ibc;
688 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
689 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
696 static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
698 struct kvm_s390_vm_cpu_machine *mach;
701 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
706 get_cpu_id((struct cpuid *) &mach->cpuid);
707 mach->ibc = sclp.ibc;
708 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
709 S390_ARCH_FAC_LIST_SIZE_BYTE);
710 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
711 S390_ARCH_FAC_LIST_SIZE_BYTE);
712 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
719 static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
723 switch (attr->attr) {
724 case KVM_S390_VM_CPU_PROCESSOR:
725 ret = kvm_s390_get_processor(kvm, attr);
727 case KVM_S390_VM_CPU_MACHINE:
728 ret = kvm_s390_get_machine(kvm, attr);
734 static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
738 switch (attr->group) {
739 case KVM_S390_VM_MEM_CTRL:
740 ret = kvm_s390_set_mem_control(kvm, attr);
742 case KVM_S390_VM_TOD:
743 ret = kvm_s390_set_tod(kvm, attr);
745 case KVM_S390_VM_CPU_MODEL:
746 ret = kvm_s390_set_cpu_model(kvm, attr);
748 case KVM_S390_VM_CRYPTO:
749 ret = kvm_s390_vm_set_crypto(kvm, attr);
759 static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
763 switch (attr->group) {
764 case KVM_S390_VM_MEM_CTRL:
765 ret = kvm_s390_get_mem_control(kvm, attr);
767 case KVM_S390_VM_TOD:
768 ret = kvm_s390_get_tod(kvm, attr);
770 case KVM_S390_VM_CPU_MODEL:
771 ret = kvm_s390_get_cpu_model(kvm, attr);
781 static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
785 switch (attr->group) {
786 case KVM_S390_VM_MEM_CTRL:
787 switch (attr->attr) {
788 case KVM_S390_VM_MEM_ENABLE_CMMA:
789 case KVM_S390_VM_MEM_CLR_CMMA:
790 case KVM_S390_VM_MEM_LIMIT_SIZE:
798 case KVM_S390_VM_TOD:
799 switch (attr->attr) {
800 case KVM_S390_VM_TOD_LOW:
801 case KVM_S390_VM_TOD_HIGH:
809 case KVM_S390_VM_CPU_MODEL:
810 switch (attr->attr) {
811 case KVM_S390_VM_CPU_PROCESSOR:
812 case KVM_S390_VM_CPU_MACHINE:
820 case KVM_S390_VM_CRYPTO:
821 switch (attr->attr) {
822 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
823 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
824 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
825 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
841 static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
845 unsigned long curkey;
848 if (args->flags != 0)
851 /* Is this guest using storage keys? */
852 if (!mm_use_skey(current->mm))
853 return KVM_S390_GET_SKEYS_NONE;
855 /* Enforce sane limit on memory allocation */
856 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
859 keys = kmalloc_array(args->count, sizeof(uint8_t),
860 GFP_KERNEL | __GFP_NOWARN);
862 keys = vmalloc(sizeof(uint8_t) * args->count);
866 for (i = 0; i < args->count; i++) {
867 hva = gfn_to_hva(kvm, args->start_gfn + i);
868 if (kvm_is_error_hva(hva)) {
873 curkey = get_guest_storage_key(current->mm, hva);
874 if (IS_ERR_VALUE(curkey)) {
881 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
882 sizeof(uint8_t) * args->count);
890 static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
896 if (args->flags != 0)
899 /* Enforce sane limit on memory allocation */
900 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
903 keys = kmalloc_array(args->count, sizeof(uint8_t),
904 GFP_KERNEL | __GFP_NOWARN);
906 keys = vmalloc(sizeof(uint8_t) * args->count);
910 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
911 sizeof(uint8_t) * args->count);
917 /* Enable storage key handling for the guest */
918 r = s390_enable_skey();
922 for (i = 0; i < args->count; i++) {
923 hva = gfn_to_hva(kvm, args->start_gfn + i);
924 if (kvm_is_error_hva(hva)) {
929 /* Lowest order bit is reserved */
930 if (keys[i] & 0x01) {
935 r = set_guest_storage_key(current->mm, hva,
936 (unsigned long)keys[i], 0);
945 long kvm_arch_vm_ioctl(struct file *filp,
946 unsigned int ioctl, unsigned long arg)
948 struct kvm *kvm = filp->private_data;
949 void __user *argp = (void __user *)arg;
950 struct kvm_device_attr attr;
954 case KVM_S390_INTERRUPT: {
955 struct kvm_s390_interrupt s390int;
958 if (copy_from_user(&s390int, argp, sizeof(s390int)))
960 r = kvm_s390_inject_vm(kvm, &s390int);
963 case KVM_ENABLE_CAP: {
964 struct kvm_enable_cap cap;
966 if (copy_from_user(&cap, argp, sizeof(cap)))
968 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
971 case KVM_CREATE_IRQCHIP: {
972 struct kvm_irq_routing_entry routing;
975 if (kvm->arch.use_irqchip) {
976 /* Set up dummy routing. */
977 memset(&routing, 0, sizeof(routing));
978 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
982 case KVM_SET_DEVICE_ATTR: {
984 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
986 r = kvm_s390_vm_set_attr(kvm, &attr);
989 case KVM_GET_DEVICE_ATTR: {
991 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
993 r = kvm_s390_vm_get_attr(kvm, &attr);
996 case KVM_HAS_DEVICE_ATTR: {
998 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1000 r = kvm_s390_vm_has_attr(kvm, &attr);
1003 case KVM_S390_GET_SKEYS: {
1004 struct kvm_s390_skeys args;
1007 if (copy_from_user(&args, argp,
1008 sizeof(struct kvm_s390_skeys)))
1010 r = kvm_s390_get_skeys(kvm, &args);
1013 case KVM_S390_SET_SKEYS: {
1014 struct kvm_s390_skeys args;
1017 if (copy_from_user(&args, argp,
1018 sizeof(struct kvm_s390_skeys)))
1020 r = kvm_s390_set_skeys(kvm, &args);
1030 static int kvm_s390_query_ap_config(u8 *config)
1032 u32 fcn_code = 0x04000000UL;
1035 memset(config, 0, 128);
1039 ".long 0xb2af0000\n" /* PQAP(QCI) */
1045 : "r" (fcn_code), "r" (config)
1046 : "cc", "0", "2", "memory"
1052 static int kvm_s390_apxa_installed(void)
1057 if (test_facility(12)) {
1058 cc = kvm_s390_query_ap_config(config);
1061 pr_err("PQAP(QCI) failed with cc=%d", cc);
1063 return config[0] & 0x40;
1069 static void kvm_s390_set_crycb_format(struct kvm *kvm)
1071 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1073 if (kvm_s390_apxa_installed())
1074 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1076 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1079 static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1082 cpu_id->version = 0xff;
1085 static int kvm_s390_crypto_init(struct kvm *kvm)
1087 if (!test_kvm_facility(kvm, 76))
1090 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1091 GFP_KERNEL | GFP_DMA);
1092 if (!kvm->arch.crypto.crycb)
1095 kvm_s390_set_crycb_format(kvm);
1097 /* Enable AES/DEA protected key functions by default */
1098 kvm->arch.crypto.aes_kw = 1;
1099 kvm->arch.crypto.dea_kw = 1;
1100 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1101 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1102 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1103 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
1108 static void sca_dispose(struct kvm *kvm)
1110 if (kvm->arch.use_esca)
1111 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
1113 free_page((unsigned long)(kvm->arch.sca));
1114 kvm->arch.sca = NULL;
1117 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
1120 char debug_name[16];
1121 static unsigned long sca_offset;
1124 #ifdef CONFIG_KVM_S390_UCONTROL
1125 if (type & ~KVM_VM_S390_UCONTROL)
1127 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1134 rc = s390_enable_sie();
1140 kvm->arch.use_esca = 0; /* start with basic SCA */
1141 rwlock_init(&kvm->arch.sca_lock);
1142 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
1145 spin_lock(&kvm_lock);
1147 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
1149 kvm->arch.sca = (struct bsca_block *)
1150 ((char *) kvm->arch.sca + sca_offset);
1151 spin_unlock(&kvm_lock);
1153 sprintf(debug_name, "kvm-%u", current->pid);
1155 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
1160 * The architectural maximum amount of facilities is 16 kbit. To store
1161 * this amount, 2 kbyte of memory is required. Thus we need a full
1162 * page to hold the guest facility list (arch.model.fac->list) and the
1163 * facility mask (arch.model.fac->mask). Its address size has to be
1164 * 31 bits and word aligned.
1166 kvm->arch.model.fac =
1167 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1168 if (!kvm->arch.model.fac)
1171 /* Populate the facility mask initially. */
1172 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
1173 S390_ARCH_FAC_LIST_SIZE_BYTE);
1174 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1175 if (i < kvm_s390_fac_list_mask_size())
1176 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
1178 kvm->arch.model.fac->mask[i] = 0UL;
1181 /* Populate the facility list initially. */
1182 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1183 S390_ARCH_FAC_LIST_SIZE_BYTE);
1185 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
1186 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
1188 if (kvm_s390_crypto_init(kvm) < 0)
1191 spin_lock_init(&kvm->arch.float_int.lock);
1192 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1193 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
1194 init_waitqueue_head(&kvm->arch.ipte_wq);
1195 mutex_init(&kvm->arch.ipte_mutex);
1197 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1198 VM_EVENT(kvm, 3, "vm created with type %lu", type);
1200 if (type & KVM_VM_S390_UCONTROL) {
1201 kvm->arch.gmap = NULL;
1202 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
1204 if (sclp.hamax == U64_MAX)
1205 kvm->arch.mem_limit = TASK_MAX_SIZE;
1207 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1209 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
1210 if (!kvm->arch.gmap)
1212 kvm->arch.gmap->private = kvm;
1213 kvm->arch.gmap->pfault_enabled = 0;
1216 kvm->arch.css_support = 0;
1217 kvm->arch.use_irqchip = 0;
1218 kvm->arch.epoch = 0;
1220 spin_lock_init(&kvm->arch.start_stop_lock);
1221 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
1225 kfree(kvm->arch.crypto.crycb);
1226 free_page((unsigned long)kvm->arch.model.fac);
1227 debug_unregister(kvm->arch.dbf);
1229 KVM_EVENT(3, "creation of vm failed: %d", rc);
1233 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1235 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
1236 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
1237 kvm_s390_clear_local_irqs(vcpu);
1238 kvm_clear_async_pf_completion_queue(vcpu);
1239 if (!kvm_is_ucontrol(vcpu->kvm))
1242 if (kvm_is_ucontrol(vcpu->kvm))
1243 gmap_free(vcpu->arch.gmap);
1245 if (vcpu->kvm->arch.use_cmma)
1246 kvm_s390_vcpu_unsetup_cmma(vcpu);
1247 kfree(vcpu->arch.guest_fpregs.fprs);
1248 free_page((unsigned long)(vcpu->arch.sie_block));
1250 kvm_vcpu_uninit(vcpu);
1251 kmem_cache_free(kvm_vcpu_cache, vcpu);
1254 static void kvm_free_vcpus(struct kvm *kvm)
1257 struct kvm_vcpu *vcpu;
1259 kvm_for_each_vcpu(i, vcpu, kvm)
1260 kvm_arch_vcpu_destroy(vcpu);
1262 mutex_lock(&kvm->lock);
1263 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1264 kvm->vcpus[i] = NULL;
1266 atomic_set(&kvm->online_vcpus, 0);
1267 mutex_unlock(&kvm->lock);
1270 void kvm_arch_destroy_vm(struct kvm *kvm)
1272 kvm_free_vcpus(kvm);
1273 free_page((unsigned long)kvm->arch.model.fac);
1275 debug_unregister(kvm->arch.dbf);
1276 kfree(kvm->arch.crypto.crycb);
1277 if (!kvm_is_ucontrol(kvm))
1278 gmap_free(kvm->arch.gmap);
1279 kvm_s390_destroy_adapters(kvm);
1280 kvm_s390_clear_float_irqs(kvm);
1281 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
1284 /* Section: vcpu related */
1285 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1287 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1288 if (!vcpu->arch.gmap)
1290 vcpu->arch.gmap->private = vcpu->kvm;
1295 static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1297 read_lock(&vcpu->kvm->arch.sca_lock);
1298 if (vcpu->kvm->arch.use_esca) {
1299 struct esca_block *sca = vcpu->kvm->arch.sca;
1301 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1302 sca->cpu[vcpu->vcpu_id].sda = 0;
1304 struct bsca_block *sca = vcpu->kvm->arch.sca;
1306 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1307 sca->cpu[vcpu->vcpu_id].sda = 0;
1309 read_unlock(&vcpu->kvm->arch.sca_lock);
1312 static void sca_add_vcpu(struct kvm_vcpu *vcpu)
1314 read_lock(&vcpu->kvm->arch.sca_lock);
1315 if (vcpu->kvm->arch.use_esca) {
1316 struct esca_block *sca = vcpu->kvm->arch.sca;
1318 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1319 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1320 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
1321 vcpu->arch.sie_block->ecb2 |= 0x04U;
1322 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
1324 struct bsca_block *sca = vcpu->kvm->arch.sca;
1326 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
1327 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1328 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
1329 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
1331 read_unlock(&vcpu->kvm->arch.sca_lock);
1334 /* Basic SCA to Extended SCA data copy routines */
1335 static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1338 d->sigp_ctrl.c = s->sigp_ctrl.c;
1339 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1342 static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1346 d->ipte_control = s->ipte_control;
1348 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1349 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1352 static int sca_switch_to_extended(struct kvm *kvm)
1354 struct bsca_block *old_sca = kvm->arch.sca;
1355 struct esca_block *new_sca;
1356 struct kvm_vcpu *vcpu;
1357 unsigned int vcpu_idx;
1360 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1364 scaoh = (u32)((u64)(new_sca) >> 32);
1365 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1367 kvm_s390_vcpu_block_all(kvm);
1368 write_lock(&kvm->arch.sca_lock);
1370 sca_copy_b_to_e(new_sca, old_sca);
1372 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1373 vcpu->arch.sie_block->scaoh = scaoh;
1374 vcpu->arch.sie_block->scaol = scaol;
1375 vcpu->arch.sie_block->ecb2 |= 0x04U;
1377 kvm->arch.sca = new_sca;
1378 kvm->arch.use_esca = 1;
1380 write_unlock(&kvm->arch.sca_lock);
1381 kvm_s390_vcpu_unblock_all(kvm);
1383 free_page((unsigned long)old_sca);
1385 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1386 old_sca, kvm->arch.sca);
1390 static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1394 if (id < KVM_S390_BSCA_CPU_SLOTS)
1399 mutex_lock(&kvm->lock);
1400 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1401 mutex_unlock(&kvm->lock);
1403 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
1406 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1408 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1409 kvm_clear_async_pf_completion_queue(vcpu);
1410 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1416 if (test_kvm_facility(vcpu->kvm, 64))
1417 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
1418 if (test_kvm_facility(vcpu->kvm, 129))
1419 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
1421 if (kvm_is_ucontrol(vcpu->kvm))
1422 return __kvm_ucontrol_vcpu_init(vcpu);
1428 * Backs up the current FP/VX register save area on a particular
1429 * destination. Used to switch between different register save
1432 static inline void save_fpu_to(struct fpu *dst)
1434 dst->fpc = current->thread.fpu.fpc;
1435 dst->regs = current->thread.fpu.regs;
1439 * Switches the FP/VX register save area from which to lazy
1440 * restore register contents.
1442 static inline void load_fpu_from(struct fpu *from)
1444 current->thread.fpu.fpc = from->fpc;
1445 current->thread.fpu.regs = from->regs;
1448 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1450 /* Save host register state */
1452 save_fpu_to(&vcpu->arch.host_fpregs);
1454 if (test_kvm_facility(vcpu->kvm, 129)) {
1455 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1457 * Use the register save area in the SIE-control block
1458 * for register restore and save in kvm_arch_vcpu_put()
1460 current->thread.fpu.vxrs =
1461 (__vector128 *)&vcpu->run->s.regs.vrs;
1463 load_fpu_from(&vcpu->arch.guest_fpregs);
1465 if (test_fp_ctl(current->thread.fpu.fpc))
1466 /* User space provided an invalid FPC, let's clear it */
1467 current->thread.fpu.fpc = 0;
1469 save_access_regs(vcpu->arch.host_acrs);
1470 restore_access_regs(vcpu->run->s.regs.acrs);
1471 gmap_enable(vcpu->arch.gmap);
1472 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1475 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1477 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1478 gmap_disable(vcpu->arch.gmap);
1482 if (test_kvm_facility(vcpu->kvm, 129))
1484 * kvm_arch_vcpu_load() set up the register save area to
1485 * the &vcpu->run->s.regs.vrs and, thus, the vector registers
1486 * are already saved. Only the floating-point control must be
1489 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
1491 save_fpu_to(&vcpu->arch.guest_fpregs);
1492 load_fpu_from(&vcpu->arch.host_fpregs);
1494 save_access_regs(vcpu->run->s.regs.acrs);
1495 restore_access_regs(vcpu->arch.host_acrs);
1498 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1500 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1501 vcpu->arch.sie_block->gpsw.mask = 0UL;
1502 vcpu->arch.sie_block->gpsw.addr = 0UL;
1503 kvm_s390_set_prefix(vcpu, 0);
1504 vcpu->arch.sie_block->cputm = 0UL;
1505 vcpu->arch.sie_block->ckc = 0UL;
1506 vcpu->arch.sie_block->todpr = 0;
1507 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1508 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1509 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1510 vcpu->arch.guest_fpregs.fpc = 0;
1511 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1512 vcpu->arch.sie_block->gbea = 1;
1513 vcpu->arch.sie_block->pp = 0;
1514 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1515 kvm_clear_async_pf_completion_queue(vcpu);
1516 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1517 kvm_s390_vcpu_stop(vcpu);
1518 kvm_s390_clear_local_irqs(vcpu);
1521 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1523 mutex_lock(&vcpu->kvm->lock);
1525 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1527 mutex_unlock(&vcpu->kvm->lock);
1528 if (!kvm_is_ucontrol(vcpu->kvm)) {
1529 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
1535 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1537 if (!test_kvm_facility(vcpu->kvm, 76))
1540 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1542 if (vcpu->kvm->arch.crypto.aes_kw)
1543 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1544 if (vcpu->kvm->arch.crypto.dea_kw)
1545 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1547 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1550 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1552 free_page(vcpu->arch.sie_block->cbrlo);
1553 vcpu->arch.sie_block->cbrlo = 0;
1556 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1558 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1559 if (!vcpu->arch.sie_block->cbrlo)
1562 vcpu->arch.sie_block->ecb2 |= 0x80;
1563 vcpu->arch.sie_block->ecb2 &= ~0x08;
1567 static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1569 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1571 vcpu->arch.cpu_id = model->cpu_id;
1572 vcpu->arch.sie_block->ibc = model->ibc;
1573 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1576 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1580 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1584 if (test_kvm_facility(vcpu->kvm, 78))
1585 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1586 else if (test_kvm_facility(vcpu->kvm, 8))
1587 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1589 kvm_s390_vcpu_setup_model(vcpu);
1591 vcpu->arch.sie_block->ecb = 6;
1592 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
1593 vcpu->arch.sie_block->ecb |= 0x10;
1595 vcpu->arch.sie_block->ecb2 = 8;
1596 vcpu->arch.sie_block->eca = 0xC1002000U;
1598 vcpu->arch.sie_block->eca |= 1;
1599 if (sclp.has_sigpif)
1600 vcpu->arch.sie_block->eca |= 0x10000000U;
1601 if (test_kvm_facility(vcpu->kvm, 64))
1602 vcpu->arch.sie_block->ecb3 |= 0x01;
1603 if (test_kvm_facility(vcpu->kvm, 129)) {
1604 vcpu->arch.sie_block->eca |= 0x00020000;
1605 vcpu->arch.sie_block->ecd |= 0x20000000;
1607 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
1608 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
1610 if (vcpu->kvm->arch.use_cmma) {
1611 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1615 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1616 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
1618 kvm_s390_vcpu_crypto_setup(vcpu);
1623 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1626 struct kvm_vcpu *vcpu;
1627 struct sie_page *sie_page;
1630 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
1635 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1639 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1643 vcpu->arch.sie_block = &sie_page->sie_block;
1644 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1646 vcpu->arch.sie_block->icpua = id;
1647 spin_lock_init(&vcpu->arch.local_int.lock);
1648 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
1649 vcpu->arch.local_int.wq = &vcpu->wq;
1650 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
1653 * Allocate a save area for floating-point registers. If the vector
1654 * extension is available, register contents are saved in the SIE
1655 * control block. The allocated save area is still required in
1656 * particular places, for example, in kvm_s390_vcpu_store_status().
1658 vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
1660 if (!vcpu->arch.guest_fpregs.fprs)
1661 goto out_free_sie_block;
1663 rc = kvm_vcpu_init(vcpu, kvm, id);
1666 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
1667 vcpu->arch.sie_block);
1668 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
1672 kfree(vcpu->arch.guest_fpregs.fprs);
1674 free_page((unsigned long)(vcpu->arch.sie_block));
1676 kmem_cache_free(kvm_vcpu_cache, vcpu);
1681 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1683 return kvm_s390_vcpu_has_irq(vcpu, 0);
1686 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
1688 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1692 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1694 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1697 static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1699 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1703 static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1705 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1709 * Kick a guest cpu out of SIE and wait until SIE is not running.
1710 * If the CPU is not running (e.g. waiting as idle) the function will
1711 * return immediately. */
1712 void exit_sie(struct kvm_vcpu *vcpu)
1714 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1715 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1719 /* Kick a guest cpu out of SIE to process a request synchronously */
1720 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
1722 kvm_make_request(req, vcpu);
1723 kvm_s390_vcpu_request(vcpu);
1726 static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1729 struct kvm *kvm = gmap->private;
1730 struct kvm_vcpu *vcpu;
1732 kvm_for_each_vcpu(i, vcpu, kvm) {
1733 /* match against both prefix pages */
1734 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
1735 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1736 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
1741 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1743 /* kvm common code refers to this, but never calls it */
1748 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1749 struct kvm_one_reg *reg)
1754 case KVM_REG_S390_TODPR:
1755 r = put_user(vcpu->arch.sie_block->todpr,
1756 (u32 __user *)reg->addr);
1758 case KVM_REG_S390_EPOCHDIFF:
1759 r = put_user(vcpu->arch.sie_block->epoch,
1760 (u64 __user *)reg->addr);
1762 case KVM_REG_S390_CPU_TIMER:
1763 r = put_user(vcpu->arch.sie_block->cputm,
1764 (u64 __user *)reg->addr);
1766 case KVM_REG_S390_CLOCK_COMP:
1767 r = put_user(vcpu->arch.sie_block->ckc,
1768 (u64 __user *)reg->addr);
1770 case KVM_REG_S390_PFTOKEN:
1771 r = put_user(vcpu->arch.pfault_token,
1772 (u64 __user *)reg->addr);
1774 case KVM_REG_S390_PFCOMPARE:
1775 r = put_user(vcpu->arch.pfault_compare,
1776 (u64 __user *)reg->addr);
1778 case KVM_REG_S390_PFSELECT:
1779 r = put_user(vcpu->arch.pfault_select,
1780 (u64 __user *)reg->addr);
1782 case KVM_REG_S390_PP:
1783 r = put_user(vcpu->arch.sie_block->pp,
1784 (u64 __user *)reg->addr);
1786 case KVM_REG_S390_GBEA:
1787 r = put_user(vcpu->arch.sie_block->gbea,
1788 (u64 __user *)reg->addr);
1797 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1798 struct kvm_one_reg *reg)
1803 case KVM_REG_S390_TODPR:
1804 r = get_user(vcpu->arch.sie_block->todpr,
1805 (u32 __user *)reg->addr);
1807 case KVM_REG_S390_EPOCHDIFF:
1808 r = get_user(vcpu->arch.sie_block->epoch,
1809 (u64 __user *)reg->addr);
1811 case KVM_REG_S390_CPU_TIMER:
1812 r = get_user(vcpu->arch.sie_block->cputm,
1813 (u64 __user *)reg->addr);
1815 case KVM_REG_S390_CLOCK_COMP:
1816 r = get_user(vcpu->arch.sie_block->ckc,
1817 (u64 __user *)reg->addr);
1819 case KVM_REG_S390_PFTOKEN:
1820 r = get_user(vcpu->arch.pfault_token,
1821 (u64 __user *)reg->addr);
1822 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1823 kvm_clear_async_pf_completion_queue(vcpu);
1825 case KVM_REG_S390_PFCOMPARE:
1826 r = get_user(vcpu->arch.pfault_compare,
1827 (u64 __user *)reg->addr);
1829 case KVM_REG_S390_PFSELECT:
1830 r = get_user(vcpu->arch.pfault_select,
1831 (u64 __user *)reg->addr);
1833 case KVM_REG_S390_PP:
1834 r = get_user(vcpu->arch.sie_block->pp,
1835 (u64 __user *)reg->addr);
1837 case KVM_REG_S390_GBEA:
1838 r = get_user(vcpu->arch.sie_block->gbea,
1839 (u64 __user *)reg->addr);
1848 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1850 kvm_s390_vcpu_initial_reset(vcpu);
1854 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1856 memcpy(&vcpu->run->s.regs.gprs, ®s->gprs, sizeof(regs->gprs));
1860 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1862 memcpy(®s->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
1866 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1867 struct kvm_sregs *sregs)
1869 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
1870 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
1871 restore_access_regs(vcpu->run->s.regs.acrs);
1875 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1876 struct kvm_sregs *sregs)
1878 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
1879 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
1883 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1885 if (test_fp_ctl(fpu->fpc))
1887 memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
1888 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1890 load_fpu_from(&vcpu->arch.guest_fpregs);
1894 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1896 memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1897 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
1901 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1905 if (!is_vcpu_stopped(vcpu))
1908 vcpu->run->psw_mask = psw.mask;
1909 vcpu->run->psw_addr = psw.addr;
1914 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1915 struct kvm_translation *tr)
1917 return -EINVAL; /* not implemented yet */
1920 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1921 KVM_GUESTDBG_USE_HW_BP | \
1922 KVM_GUESTDBG_ENABLE)
1924 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1925 struct kvm_guest_debug *dbg)
1929 vcpu->guest_debug = 0;
1930 kvm_s390_clear_bp_data(vcpu);
1932 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
1935 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1936 vcpu->guest_debug = dbg->control;
1937 /* enforce guest PER */
1938 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1940 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1941 rc = kvm_s390_import_bp_data(vcpu, dbg);
1943 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1944 vcpu->arch.guestdbg.last_bp = 0;
1948 vcpu->guest_debug = 0;
1949 kvm_s390_clear_bp_data(vcpu);
1950 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1956 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1957 struct kvm_mp_state *mp_state)
1959 /* CHECK_STOP and LOAD are not supported yet */
1960 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1961 KVM_MP_STATE_OPERATING;
1964 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1965 struct kvm_mp_state *mp_state)
1969 /* user space knows about this interface - let it control the state */
1970 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1972 switch (mp_state->mp_state) {
1973 case KVM_MP_STATE_STOPPED:
1974 kvm_s390_vcpu_stop(vcpu);
1976 case KVM_MP_STATE_OPERATING:
1977 kvm_s390_vcpu_start(vcpu);
1979 case KVM_MP_STATE_LOAD:
1980 case KVM_MP_STATE_CHECK_STOP:
1981 /* fall through - CHECK_STOP and LOAD are not supported yet */
1989 static bool ibs_enabled(struct kvm_vcpu *vcpu)
1991 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1994 static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1997 kvm_s390_vcpu_request_handled(vcpu);
1998 if (!vcpu->requests)
2001 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2002 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2003 * This ensures that the ipte instruction for this request has
2004 * already finished. We might race against a second unmapper that
2005 * wants to set the blocking bit. Lets just retry the request loop.
2007 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2009 rc = gmap_ipte_notify(vcpu->arch.gmap,
2010 kvm_s390_get_prefix(vcpu),
2017 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2018 vcpu->arch.sie_block->ihcpu = 0xffff;
2022 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2023 if (!ibs_enabled(vcpu)) {
2024 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
2025 atomic_or(CPUSTAT_IBS,
2026 &vcpu->arch.sie_block->cpuflags);
2031 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2032 if (ibs_enabled(vcpu)) {
2033 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
2034 atomic_andnot(CPUSTAT_IBS,
2035 &vcpu->arch.sie_block->cpuflags);
2040 /* nothing to do, just clear the request */
2041 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2046 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2048 struct kvm_vcpu *vcpu;
2051 mutex_lock(&kvm->lock);
2053 kvm->arch.epoch = tod - get_tod_clock();
2054 kvm_s390_vcpu_block_all(kvm);
2055 kvm_for_each_vcpu(i, vcpu, kvm)
2056 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2057 kvm_s390_vcpu_unblock_all(kvm);
2059 mutex_unlock(&kvm->lock);
2063 * kvm_arch_fault_in_page - fault-in guest page if necessary
2064 * @vcpu: The corresponding virtual cpu
2065 * @gpa: Guest physical address
2066 * @writable: Whether the page should be writable or not
2068 * Make sure that a guest page has been faulted-in on the host.
2070 * Return: Zero on success, negative error code otherwise.
2072 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
2074 return gmap_fault(vcpu->arch.gmap, gpa,
2075 writable ? FAULT_FLAG_WRITE : 0);
2078 static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2079 unsigned long token)
2081 struct kvm_s390_interrupt inti;
2082 struct kvm_s390_irq irq;
2085 irq.u.ext.ext_params2 = token;
2086 irq.type = KVM_S390_INT_PFAULT_INIT;
2087 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
2089 inti.type = KVM_S390_INT_PFAULT_DONE;
2090 inti.parm64 = token;
2091 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2095 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2096 struct kvm_async_pf *work)
2098 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2099 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2102 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2103 struct kvm_async_pf *work)
2105 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2106 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2109 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2110 struct kvm_async_pf *work)
2112 /* s390 will always inject the page directly */
2115 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2118 * s390 will always inject the page directly,
2119 * but we still want check_async_completion to cleanup
2124 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2127 struct kvm_arch_async_pf arch;
2130 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2132 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2133 vcpu->arch.pfault_compare)
2135 if (psw_extint_disabled(vcpu))
2137 if (kvm_s390_vcpu_has_irq(vcpu, 0))
2139 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2141 if (!vcpu->arch.gmap->pfault_enabled)
2144 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2145 hva += current->thread.gmap_addr & ~PAGE_MASK;
2146 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
2149 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2153 static int vcpu_pre_run(struct kvm_vcpu *vcpu)
2158 * On s390 notifications for arriving pages will be delivered directly
2159 * to the guest but the house keeping for completed pfaults is
2160 * handled outside the worker.
2162 kvm_check_async_pf_completion(vcpu);
2164 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2165 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
2170 if (test_cpu_flag(CIF_MCCK_PENDING))
2173 if (!kvm_is_ucontrol(vcpu->kvm)) {
2174 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2179 rc = kvm_s390_handle_requests(vcpu);
2183 if (guestdbg_enabled(vcpu)) {
2184 kvm_s390_backup_guest_per_regs(vcpu);
2185 kvm_s390_patch_guest_per_regs(vcpu);
2188 vcpu->arch.sie_block->icptcode = 0;
2189 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2190 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2191 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2196 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2198 psw_t *psw = &vcpu->arch.sie_block->gpsw;
2202 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2203 trace_kvm_s390_sie_fault(vcpu);
2206 * We want to inject an addressing exception, which is defined as a
2207 * suppressing or terminating exception. However, since we came here
2208 * by a DAT access exception, the PSW still points to the faulting
2209 * instruction since DAT exceptions are nullifying. So we've got
2210 * to look up the current opcode to get the length of the instruction
2211 * to be able to forward the PSW.
2213 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
2215 return kvm_s390_inject_prog_cond(vcpu, rc);
2216 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2218 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2221 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2223 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2224 vcpu->arch.sie_block->icptcode);
2225 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2227 if (guestdbg_enabled(vcpu))
2228 kvm_s390_restore_guest_per_regs(vcpu);
2230 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2231 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
2233 if (vcpu->arch.sie_block->icptcode > 0) {
2234 int rc = kvm_handle_sie_intercept(vcpu);
2236 if (rc != -EOPNOTSUPP)
2238 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2239 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2240 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2241 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2243 } else if (exit_reason != -EFAULT) {
2244 vcpu->stat.exit_null++;
2246 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2247 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2248 vcpu->run->s390_ucontrol.trans_exc_code =
2249 current->thread.gmap_addr;
2250 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2252 } else if (current->thread.gmap_pfault) {
2253 trace_kvm_s390_major_guest_pfault(vcpu);
2254 current->thread.gmap_pfault = 0;
2255 if (kvm_arch_setup_async_pf(vcpu))
2257 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
2259 return vcpu_post_run_fault_in_sie(vcpu);
2262 static int __vcpu_run(struct kvm_vcpu *vcpu)
2264 int rc, exit_reason;
2267 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2268 * ning the guest), so that memslots (and other stuff) are protected
2270 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2273 rc = vcpu_pre_run(vcpu);
2277 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2279 * As PF_VCPU will be used in fault handler, between
2280 * guest_enter and guest_exit should be no uaccess.
2282 local_irq_disable();
2283 __kvm_guest_enter();
2285 exit_reason = sie64a(vcpu->arch.sie_block,
2286 vcpu->run->s.regs.gprs);
2287 local_irq_disable();
2290 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2292 rc = vcpu_post_run(vcpu, exit_reason);
2293 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
2295 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2299 static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2301 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2302 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2303 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2304 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2305 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2306 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
2307 /* some control register changes require a tlb flush */
2308 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2310 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2311 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2312 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2313 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2314 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2315 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2317 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2318 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2319 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2320 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
2321 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2322 kvm_clear_async_pf_completion_queue(vcpu);
2324 kvm_run->kvm_dirty_regs = 0;
2327 static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2329 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2330 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2331 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2332 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2333 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2334 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2335 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2336 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2337 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2338 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2339 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2340 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2343 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2348 if (guestdbg_exit_pending(vcpu)) {
2349 kvm_s390_prepare_debug_exit(vcpu);
2353 if (vcpu->sigset_active)
2354 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2356 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2357 kvm_s390_vcpu_start(vcpu);
2358 } else if (is_vcpu_stopped(vcpu)) {
2359 pr_err_ratelimited("can't run stopped vcpu %d\n",
2364 sync_regs(vcpu, kvm_run);
2367 rc = __vcpu_run(vcpu);
2369 if (signal_pending(current) && !rc) {
2370 kvm_run->exit_reason = KVM_EXIT_INTR;
2374 if (guestdbg_exit_pending(vcpu) && !rc) {
2375 kvm_s390_prepare_debug_exit(vcpu);
2379 if (rc == -EREMOTE) {
2380 /* userspace support is needed, kvm_run has been prepared */
2384 store_regs(vcpu, kvm_run);
2386 if (vcpu->sigset_active)
2387 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2389 vcpu->stat.exit_userspace++;
2394 * store status at address
2395 * we use have two special cases:
2396 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2397 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2399 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
2401 unsigned char archmode = 1;
2406 px = kvm_s390_get_prefix(vcpu);
2407 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2408 if (write_guest_abs(vcpu, 163, &archmode, 1))
2411 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2412 if (write_guest_real(vcpu, 163, &archmode, 1))
2416 gpa -= __LC_FPREGS_SAVE_AREA;
2417 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2418 vcpu->arch.guest_fpregs.fprs, 128);
2419 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
2420 vcpu->run->s.regs.gprs, 128);
2421 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
2422 &vcpu->arch.sie_block->gpsw, 16);
2423 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
2425 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
2426 &vcpu->arch.guest_fpregs.fpc, 4);
2427 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
2428 &vcpu->arch.sie_block->todpr, 4);
2429 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
2430 &vcpu->arch.sie_block->cputm, 8);
2431 clkcomp = vcpu->arch.sie_block->ckc >> 8;
2432 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
2434 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
2435 &vcpu->run->s.regs.acrs, 64);
2436 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
2437 &vcpu->arch.sie_block->gcr, 128);
2438 return rc ? -EFAULT : 0;
2441 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2444 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2445 * copying in vcpu load/put. Lets update our copies before we save
2446 * it into the save area
2449 if (test_kvm_facility(vcpu->kvm, 129)) {
2451 * If the vector extension is available, the vector registers
2452 * which overlaps with floating-point registers are saved in
2453 * the SIE-control block. Hence, extract the floating-point
2454 * registers and the FPC value and store them in the
2455 * guest_fpregs structure.
2457 vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
2458 convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
2459 current->thread.fpu.vxrs);
2461 save_fpu_to(&vcpu->arch.guest_fpregs);
2462 save_access_regs(vcpu->run->s.regs.acrs);
2464 return kvm_s390_store_status_unloaded(vcpu, addr);
2468 * store additional status at address
2470 int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2473 /* Only bits 0-53 are used for address formation */
2474 if (!(gpa & ~0x3ff))
2477 return write_guest_abs(vcpu, gpa & ~0x3ff,
2478 (void *)&vcpu->run->s.regs.vrs, 512);
2481 int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2483 if (!test_kvm_facility(vcpu->kvm, 129))
2487 * The guest VXRS are in the host VXRs due to the lazy
2488 * copying in vcpu load/put. We can simply call save_fpu_regs()
2489 * to save the current register state because we are in the
2490 * middle of a load/put cycle.
2492 * Let's update our copies before we save it into the save area.
2496 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2499 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2501 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2502 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
2505 static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2508 struct kvm_vcpu *vcpu;
2510 kvm_for_each_vcpu(i, vcpu, kvm) {
2511 __disable_ibs_on_vcpu(vcpu);
2515 static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2517 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2518 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
2521 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2523 int i, online_vcpus, started_vcpus = 0;
2525 if (!is_vcpu_stopped(vcpu))
2528 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
2529 /* Only one cpu at a time may enter/leave the STOPPED state. */
2530 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2531 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2533 for (i = 0; i < online_vcpus; i++) {
2534 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2538 if (started_vcpus == 0) {
2539 /* we're the only active VCPU -> speed it up */
2540 __enable_ibs_on_vcpu(vcpu);
2541 } else if (started_vcpus == 1) {
2543 * As we are starting a second VCPU, we have to disable
2544 * the IBS facility on all VCPUs to remove potentially
2545 * oustanding ENABLE requests.
2547 __disable_ibs_on_all_vcpus(vcpu->kvm);
2550 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2552 * Another VCPU might have used IBS while we were offline.
2553 * Let's play safe and flush the VCPU at startup.
2555 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2556 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2560 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2562 int i, online_vcpus, started_vcpus = 0;
2563 struct kvm_vcpu *started_vcpu = NULL;
2565 if (is_vcpu_stopped(vcpu))
2568 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
2569 /* Only one cpu at a time may enter/leave the STOPPED state. */
2570 spin_lock(&vcpu->kvm->arch.start_stop_lock);
2571 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2573 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2574 kvm_s390_clear_stop_irq(vcpu);
2576 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2577 __disable_ibs_on_vcpu(vcpu);
2579 for (i = 0; i < online_vcpus; i++) {
2580 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2582 started_vcpu = vcpu->kvm->vcpus[i];
2586 if (started_vcpus == 1) {
2588 * As we only have one VCPU left, we want to enable the
2589 * IBS facility for that VCPU to speed it up.
2591 __enable_ibs_on_vcpu(started_vcpu);
2594 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
2598 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2599 struct kvm_enable_cap *cap)
2607 case KVM_CAP_S390_CSS_SUPPORT:
2608 if (!vcpu->kvm->arch.css_support) {
2609 vcpu->kvm->arch.css_support = 1;
2610 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
2611 trace_kvm_s390_enable_css(vcpu->kvm);
2622 static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2623 struct kvm_s390_mem_op *mop)
2625 void __user *uaddr = (void __user *)mop->buf;
2626 void *tmpbuf = NULL;
2628 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2629 | KVM_S390_MEMOP_F_CHECK_ONLY;
2631 if (mop->flags & ~supported_flags)
2634 if (mop->size > MEM_OP_MAX_SIZE)
2637 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2638 tmpbuf = vmalloc(mop->size);
2643 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2646 case KVM_S390_MEMOP_LOGICAL_READ:
2647 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2648 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2651 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2653 if (copy_to_user(uaddr, tmpbuf, mop->size))
2657 case KVM_S390_MEMOP_LOGICAL_WRITE:
2658 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2659 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2662 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2666 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2672 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2674 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2675 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2681 long kvm_arch_vcpu_ioctl(struct file *filp,
2682 unsigned int ioctl, unsigned long arg)
2684 struct kvm_vcpu *vcpu = filp->private_data;
2685 void __user *argp = (void __user *)arg;
2690 case KVM_S390_IRQ: {
2691 struct kvm_s390_irq s390irq;
2694 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2696 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2699 case KVM_S390_INTERRUPT: {
2700 struct kvm_s390_interrupt s390int;
2701 struct kvm_s390_irq s390irq;
2704 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2706 if (s390int_to_s390irq(&s390int, &s390irq))
2708 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2711 case KVM_S390_STORE_STATUS:
2712 idx = srcu_read_lock(&vcpu->kvm->srcu);
2713 r = kvm_s390_vcpu_store_status(vcpu, arg);
2714 srcu_read_unlock(&vcpu->kvm->srcu, idx);
2716 case KVM_S390_SET_INITIAL_PSW: {
2720 if (copy_from_user(&psw, argp, sizeof(psw)))
2722 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2725 case KVM_S390_INITIAL_RESET:
2726 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2728 case KVM_SET_ONE_REG:
2729 case KVM_GET_ONE_REG: {
2730 struct kvm_one_reg reg;
2732 if (copy_from_user(®, argp, sizeof(reg)))
2734 if (ioctl == KVM_SET_ONE_REG)
2735 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, ®);
2737 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, ®);
2740 #ifdef CONFIG_KVM_S390_UCONTROL
2741 case KVM_S390_UCAS_MAP: {
2742 struct kvm_s390_ucas_mapping ucasmap;
2744 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2749 if (!kvm_is_ucontrol(vcpu->kvm)) {
2754 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2755 ucasmap.vcpu_addr, ucasmap.length);
2758 case KVM_S390_UCAS_UNMAP: {
2759 struct kvm_s390_ucas_mapping ucasmap;
2761 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2766 if (!kvm_is_ucontrol(vcpu->kvm)) {
2771 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2776 case KVM_S390_VCPU_FAULT: {
2777 r = gmap_fault(vcpu->arch.gmap, arg, 0);
2780 case KVM_ENABLE_CAP:
2782 struct kvm_enable_cap cap;
2784 if (copy_from_user(&cap, argp, sizeof(cap)))
2786 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2789 case KVM_S390_MEM_OP: {
2790 struct kvm_s390_mem_op mem_op;
2792 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2793 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2798 case KVM_S390_SET_IRQ_STATE: {
2799 struct kvm_s390_irq_state irq_state;
2802 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2804 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2805 irq_state.len == 0 ||
2806 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2810 r = kvm_s390_set_irq_state(vcpu,
2811 (void __user *) irq_state.buf,
2815 case KVM_S390_GET_IRQ_STATE: {
2816 struct kvm_s390_irq_state irq_state;
2819 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2821 if (irq_state.len == 0) {
2825 r = kvm_s390_get_irq_state(vcpu,
2826 (__u8 __user *) irq_state.buf,
2836 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2838 #ifdef CONFIG_KVM_S390_UCONTROL
2839 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2840 && (kvm_is_ucontrol(vcpu->kvm))) {
2841 vmf->page = virt_to_page(vcpu->arch.sie_block);
2842 get_page(vmf->page);
2846 return VM_FAULT_SIGBUS;
2849 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2850 unsigned long npages)
2855 /* Section: memory related */
2856 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2857 struct kvm_memory_slot *memslot,
2858 const struct kvm_userspace_memory_region *mem,
2859 enum kvm_mr_change change)
2861 /* A few sanity checks. We can have memory slots which have to be
2862 located/ended at a segment boundary (1MB). The memory in userland is
2863 ok to be fragmented into various different vmas. It is okay to mmap()
2864 and munmap() stuff in this slot after doing this call at any time */
2866 if (mem->userspace_addr & 0xffffful)
2869 if (mem->memory_size & 0xffffful)
2872 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
2878 void kvm_arch_commit_memory_region(struct kvm *kvm,
2879 const struct kvm_userspace_memory_region *mem,
2880 const struct kvm_memory_slot *old,
2881 const struct kvm_memory_slot *new,
2882 enum kvm_mr_change change)
2886 /* If the basics of the memslot do not change, we do not want
2887 * to update the gmap. Every update causes several unnecessary
2888 * segment translation exceptions. This is usually handled just
2889 * fine by the normal fault handler + gmap, but it will also
2890 * cause faults on the prefix page of running guest CPUs.
2892 if (old->userspace_addr == mem->userspace_addr &&
2893 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2894 old->npages * PAGE_SIZE == mem->memory_size)
2897 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2898 mem->guest_phys_addr, mem->memory_size);
2900 pr_warn("failed to commit memory region\n");
2904 static int __init kvm_s390_init(void)
2906 if (!sclp.has_sief2) {
2907 pr_info("SIE not available\n");
2911 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2914 static void __exit kvm_s390_exit(void)
2919 module_init(kvm_s390_init);
2920 module_exit(kvm_s390_exit);
2923 * Enable autoloading of the kvm module.
2924 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2925 * since x86 takes a different approach.
2927 #include <linux/miscdevice.h>
2928 MODULE_ALIAS_MISCDEV(KVM_MINOR);
2929 MODULE_ALIAS("devname:kvm");