2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
66 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
67 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
68 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
69 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
70 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
71 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
72 { "diagnose_44", VCPU_STAT(diagnose_44) },
76 static unsigned long long *facilities;
78 /* Section: not file related */
79 int kvm_arch_hardware_enable(void *garbage)
81 /* every s390 is virtualization enabled ;-) */
85 void kvm_arch_hardware_disable(void *garbage)
89 int kvm_arch_hardware_setup(void)
94 void kvm_arch_hardware_unsetup(void)
98 void kvm_arch_check_processor_compat(void *rtn)
102 int kvm_arch_init(void *opaque)
107 void kvm_arch_exit(void)
111 /* Section: device related */
112 long kvm_arch_dev_ioctl(struct file *filp,
113 unsigned int ioctl, unsigned long arg)
115 if (ioctl == KVM_S390_ENABLE_SIE)
116 return s390_enable_sie();
120 int kvm_dev_ioctl_check_extension(long ext)
125 case KVM_CAP_S390_PSW:
134 /* Section: vm related */
136 * Get (and clear) the dirty memory log for a memory slot.
138 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
139 struct kvm_dirty_log *log)
144 long kvm_arch_vm_ioctl(struct file *filp,
145 unsigned int ioctl, unsigned long arg)
147 struct kvm *kvm = filp->private_data;
148 void __user *argp = (void __user *)arg;
152 case KVM_S390_INTERRUPT: {
153 struct kvm_s390_interrupt s390int;
156 if (copy_from_user(&s390int, argp, sizeof(s390int)))
158 r = kvm_s390_inject_vm(kvm, &s390int);
168 int kvm_arch_init_vm(struct kvm *kvm)
173 rc = s390_enable_sie();
177 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
181 sprintf(debug_name, "kvm-%u", current->pid);
183 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
187 spin_lock_init(&kvm->arch.float_int.lock);
188 INIT_LIST_HEAD(&kvm->arch.float_int.list);
190 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
191 VM_EVENT(kvm, 3, "%s", "vm created");
193 kvm->arch.gmap = gmap_alloc(current->mm);
199 debug_unregister(kvm->arch.dbf);
201 free_page((unsigned long)(kvm->arch.sca));
206 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
209 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211 (__u64) vcpu->arch.sie_block)
212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
214 free_page((unsigned long)(vcpu->arch.sie_block));
215 kvm_vcpu_uninit(vcpu);
219 static void kvm_free_vcpus(struct kvm *kvm)
222 struct kvm_vcpu *vcpu;
224 kvm_for_each_vcpu(i, vcpu, kvm)
225 kvm_arch_vcpu_destroy(vcpu);
227 mutex_lock(&kvm->lock);
228 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229 kvm->vcpus[i] = NULL;
231 atomic_set(&kvm->online_vcpus, 0);
232 mutex_unlock(&kvm->lock);
235 void kvm_arch_sync_events(struct kvm *kvm)
239 void kvm_arch_destroy_vm(struct kvm *kvm)
242 free_page((unsigned long)(kvm->arch.sca));
243 debug_unregister(kvm->arch.dbf);
244 gmap_free(kvm->arch.gmap);
247 /* Section: vcpu related */
248 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
250 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
254 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
259 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
261 save_fp_regs(&vcpu->arch.host_fpregs);
262 save_access_regs(vcpu->arch.host_acrs);
263 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
264 restore_fp_regs(&vcpu->arch.guest_fpregs);
265 restore_access_regs(vcpu->arch.guest_acrs);
268 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
270 save_fp_regs(&vcpu->arch.guest_fpregs);
271 save_access_regs(vcpu->arch.guest_acrs);
272 restore_fp_regs(&vcpu->arch.host_fpregs);
273 restore_access_regs(vcpu->arch.host_acrs);
276 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278 /* this equals initial cpu reset in pop, but we don't switch to ESA */
279 vcpu->arch.sie_block->gpsw.mask = 0UL;
280 vcpu->arch.sie_block->gpsw.addr = 0UL;
281 vcpu->arch.sie_block->prefix = 0UL;
282 vcpu->arch.sie_block->ihcpu = 0xffff;
283 vcpu->arch.sie_block->cputm = 0UL;
284 vcpu->arch.sie_block->ckc = 0UL;
285 vcpu->arch.sie_block->todpr = 0;
286 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
287 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
288 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
289 vcpu->arch.guest_fpregs.fpc = 0;
290 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
291 vcpu->arch.sie_block->gbea = 1;
294 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
296 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | CPUSTAT_SM);
297 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
298 vcpu->arch.sie_block->ecb = 6;
299 vcpu->arch.sie_block->eca = 0xC1002001U;
300 vcpu->arch.sie_block->fac = (int) (long) facilities;
301 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
302 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
303 (unsigned long) vcpu);
304 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
305 get_cpu_id(&vcpu->arch.cpu_id);
306 vcpu->arch.cpu_id.version = 0xff;
310 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
313 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
319 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
320 get_zeroed_page(GFP_KERNEL);
322 if (!vcpu->arch.sie_block)
325 vcpu->arch.sie_block->icpua = id;
326 BUG_ON(!kvm->arch.sca);
327 if (!kvm->arch.sca->cpu[id].sda)
328 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
329 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
330 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
331 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
333 spin_lock_init(&vcpu->arch.local_int.lock);
334 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
336 spin_lock(&kvm->arch.float_int.lock);
337 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338 init_waitqueue_head(&vcpu->arch.local_int.wq);
339 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
340 spin_unlock(&kvm->arch.float_int.lock);
342 rc = kvm_vcpu_init(vcpu, kvm, id);
344 goto out_free_sie_block;
345 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346 vcpu->arch.sie_block);
350 free_page((unsigned long)(vcpu->arch.sie_block));
357 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
359 /* kvm common code refers to this, but never calls it */
364 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
366 kvm_s390_vcpu_initial_reset(vcpu);
370 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
372 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
376 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
378 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
382 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
383 struct kvm_sregs *sregs)
385 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
386 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
390 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
391 struct kvm_sregs *sregs)
393 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
394 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
398 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
400 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
401 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
405 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
407 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
408 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
412 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
416 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
419 vcpu->run->psw_mask = psw.mask;
420 vcpu->run->psw_addr = psw.addr;
425 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
426 struct kvm_translation *tr)
428 return -EINVAL; /* not implemented yet */
431 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
432 struct kvm_guest_debug *dbg)
434 return -EINVAL; /* not implemented yet */
437 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
438 struct kvm_mp_state *mp_state)
440 return -EINVAL; /* not implemented yet */
443 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
444 struct kvm_mp_state *mp_state)
446 return -EINVAL; /* not implemented yet */
449 static void __vcpu_run(struct kvm_vcpu *vcpu)
451 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
456 if (test_thread_flag(TIF_MCCK_PENDING))
459 kvm_s390_deliver_pending_interrupts(vcpu);
461 vcpu->arch.sie_block->icptcode = 0;
465 gmap_enable(vcpu->arch.gmap);
466 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
467 atomic_read(&vcpu->arch.sie_block->cpuflags));
468 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
469 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
470 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
472 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
473 vcpu->arch.sie_block->icptcode);
474 gmap_disable(vcpu->arch.gmap);
479 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
482 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
489 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
490 kvm_s390_vcpu_set_mem(vcpu);
492 if (vcpu->sigset_active)
493 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
495 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
497 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
499 switch (kvm_run->exit_reason) {
500 case KVM_EXIT_S390_SIEIC:
501 case KVM_EXIT_UNKNOWN:
503 case KVM_EXIT_S390_RESET:
509 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
510 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
516 rc = kvm_handle_sie_intercept(vcpu);
517 } while (!signal_pending(current) && !rc);
519 if (rc == SIE_INTERCEPT_RERUNVCPU)
522 if (signal_pending(current) && !rc) {
523 kvm_run->exit_reason = KVM_EXIT_INTR;
527 if (rc == -EOPNOTSUPP) {
528 /* intercept cannot be handled in-kernel, prepare kvm-run */
529 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
530 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
531 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
532 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
536 if (rc == -EREMOTE) {
537 /* intercept was handled, but userspace support is needed
538 * kvm_run has been prepared by the handler */
542 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
543 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
545 if (vcpu->sigset_active)
546 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
548 vcpu->stat.exit_userspace++;
552 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
553 unsigned long n, int prefix)
556 return copy_to_guest(vcpu, guestdest, from, n);
558 return copy_to_guest_absolute(vcpu, guestdest, from, n);
562 * store status at address
563 * we use have two special cases:
564 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
565 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
567 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
569 const unsigned char archmode = 1;
572 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
573 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
575 addr = SAVE_AREA_BASE;
577 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
578 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
580 addr = SAVE_AREA_BASE;
585 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
586 vcpu->arch.guest_fpregs.fprs, 128, prefix))
589 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
590 vcpu->arch.guest_gprs, 128, prefix))
593 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
594 &vcpu->arch.sie_block->gpsw, 16, prefix))
597 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
598 &vcpu->arch.sie_block->prefix, 4, prefix))
601 if (__guestcopy(vcpu,
602 addr + offsetof(struct save_area, fp_ctrl_reg),
603 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
607 &vcpu->arch.sie_block->todpr, 4, prefix))
610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
611 &vcpu->arch.sie_block->cputm, 8, prefix))
614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
615 &vcpu->arch.sie_block->ckc, 8, prefix))
618 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
619 &vcpu->arch.guest_acrs, 64, prefix))
622 if (__guestcopy(vcpu,
623 addr + offsetof(struct save_area, ctrl_regs),
624 &vcpu->arch.sie_block->gcr, 128, prefix))
629 long kvm_arch_vcpu_ioctl(struct file *filp,
630 unsigned int ioctl, unsigned long arg)
632 struct kvm_vcpu *vcpu = filp->private_data;
633 void __user *argp = (void __user *)arg;
637 case KVM_S390_INTERRUPT: {
638 struct kvm_s390_interrupt s390int;
641 if (copy_from_user(&s390int, argp, sizeof(s390int)))
643 r = kvm_s390_inject_vcpu(vcpu, &s390int);
646 case KVM_S390_STORE_STATUS:
647 r = kvm_s390_vcpu_store_status(vcpu, arg);
649 case KVM_S390_SET_INITIAL_PSW: {
653 if (copy_from_user(&psw, argp, sizeof(psw)))
655 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
658 case KVM_S390_INITIAL_RESET:
659 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
667 /* Section: memory related */
668 int kvm_arch_prepare_memory_region(struct kvm *kvm,
669 struct kvm_memory_slot *memslot,
670 struct kvm_memory_slot old,
671 struct kvm_userspace_memory_region *mem,
674 /* A few sanity checks. We can have exactly one memory slot which has
675 to start at guest virtual zero and which has to be located at a
676 page boundary in userland and which has to end at a page boundary.
677 The memory in userland is ok to be fragmented into various different
678 vmas. It is okay to mmap() and munmap() stuff in this slot after
679 doing this call at any time */
684 if (mem->guest_phys_addr)
687 if (mem->userspace_addr & 0xffffful)
690 if (mem->memory_size & 0xffffful)
699 void kvm_arch_commit_memory_region(struct kvm *kvm,
700 struct kvm_userspace_memory_region *mem,
701 struct kvm_memory_slot old,
705 struct kvm_vcpu *vcpu;
708 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
709 mem->guest_phys_addr, mem->memory_size);
713 /* request update of sie control block for all available vcpus */
714 kvm_for_each_vcpu(i, vcpu, kvm) {
715 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
717 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
722 void kvm_arch_flush_shadow(struct kvm *kvm)
726 static int __init kvm_s390_init(void)
729 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
734 * guests can ask for up to 255+1 double words, we need a full page
735 * to hold the maximum amount of facilities. On the other hand, we
736 * only set facilities that are known to work in KVM.
738 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
743 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
744 facilities[0] &= 0xff00fff3f47c0000ULL;
745 facilities[1] &= 0x201c000000000000ULL;
749 static void __exit kvm_s390_exit(void)
751 free_page((unsigned long) facilities);
755 module_init(kvm_s390_init);
756 module_exit(kvm_s390_exit);