s390/kvm: Provide a way to prevent reentering SIE
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
5786fffa
CH
35#define CREATE_TRACE_POINTS
36#include "trace.h"
ade38c31 37#include "trace-s390.h"
5786fffa 38
b0c632db
HC
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 43 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 80 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 81 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
83 { NULL }
84};
85
ef50f7ac 86static unsigned long long *facilities;
b0c632db
HC
87
88/* Section: not file related */
10474ae8 89int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
90{
91 /* every s390 is virtualization enabled ;-) */
10474ae8 92 return 0;
b0c632db
HC
93}
94
95void kvm_arch_hardware_disable(void *garbage)
96{
97}
98
b0c632db
HC
99int kvm_arch_hardware_setup(void)
100{
101 return 0;
102}
103
104void kvm_arch_hardware_unsetup(void)
105{
106}
107
108void kvm_arch_check_processor_compat(void *rtn)
109{
110}
111
112int kvm_arch_init(void *opaque)
113{
114 return 0;
115}
116
117void kvm_arch_exit(void)
118{
119}
120
121/* Section: device related */
122long kvm_arch_dev_ioctl(struct file *filp,
123 unsigned int ioctl, unsigned long arg)
124{
125 if (ioctl == KVM_S390_ENABLE_SIE)
126 return s390_enable_sie();
127 return -EINVAL;
128}
129
130int kvm_dev_ioctl_check_extension(long ext)
131{
d7b0b5eb
CO
132 int r;
133
2bd0ac4e 134 switch (ext) {
d7b0b5eb 135 case KVM_CAP_S390_PSW:
b6cf8788 136 case KVM_CAP_S390_GMAP:
52e16b18 137 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
138#ifdef CONFIG_KVM_S390_UCONTROL
139 case KVM_CAP_S390_UCONTROL:
140#endif
60b413c9 141 case KVM_CAP_SYNC_REGS:
14eebd91 142 case KVM_CAP_ONE_REG:
d6712df9 143 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 144 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 145 case KVM_CAP_IOEVENTFD:
d7b0b5eb
CO
146 r = 1;
147 break;
e726b1bd
CB
148 case KVM_CAP_NR_VCPUS:
149 case KVM_CAP_MAX_VCPUS:
150 r = KVM_MAX_VCPUS;
151 break;
e1e2e605
NW
152 case KVM_CAP_NR_MEMSLOTS:
153 r = KVM_USER_MEM_SLOTS;
154 break;
1526bf9c 155 case KVM_CAP_S390_COW:
abf09bed 156 r = MACHINE_HAS_ESOP;
1526bf9c 157 break;
2bd0ac4e 158 default:
d7b0b5eb 159 r = 0;
2bd0ac4e 160 }
d7b0b5eb 161 return r;
b0c632db
HC
162}
163
164/* Section: vm related */
165/*
166 * Get (and clear) the dirty memory log for a memory slot.
167 */
168int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
169 struct kvm_dirty_log *log)
170{
171 return 0;
172}
173
174long kvm_arch_vm_ioctl(struct file *filp,
175 unsigned int ioctl, unsigned long arg)
176{
177 struct kvm *kvm = filp->private_data;
178 void __user *argp = (void __user *)arg;
179 int r;
180
181 switch (ioctl) {
ba5c1e9b
CO
182 case KVM_S390_INTERRUPT: {
183 struct kvm_s390_interrupt s390int;
184
185 r = -EFAULT;
186 if (copy_from_user(&s390int, argp, sizeof(s390int)))
187 break;
188 r = kvm_s390_inject_vm(kvm, &s390int);
189 break;
190 }
b0c632db 191 default:
367e1319 192 r = -ENOTTY;
b0c632db
HC
193 }
194
195 return r;
196}
197
e08b9637 198int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 199{
b0c632db
HC
200 int rc;
201 char debug_name[16];
202
e08b9637
CO
203 rc = -EINVAL;
204#ifdef CONFIG_KVM_S390_UCONTROL
205 if (type & ~KVM_VM_S390_UCONTROL)
206 goto out_err;
207 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
208 goto out_err;
209#else
210 if (type)
211 goto out_err;
212#endif
213
b0c632db
HC
214 rc = s390_enable_sie();
215 if (rc)
d89f5eff 216 goto out_err;
b0c632db 217
b290411a
CO
218 rc = -ENOMEM;
219
b0c632db
HC
220 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
221 if (!kvm->arch.sca)
d89f5eff 222 goto out_err;
b0c632db
HC
223
224 sprintf(debug_name, "kvm-%u", current->pid);
225
226 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
227 if (!kvm->arch.dbf)
228 goto out_nodbf;
229
ba5c1e9b
CO
230 spin_lock_init(&kvm->arch.float_int.lock);
231 INIT_LIST_HEAD(&kvm->arch.float_int.list);
232
b0c632db
HC
233 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
234 VM_EVENT(kvm, 3, "%s", "vm created");
235
e08b9637
CO
236 if (type & KVM_VM_S390_UCONTROL) {
237 kvm->arch.gmap = NULL;
238 } else {
239 kvm->arch.gmap = gmap_alloc(current->mm);
240 if (!kvm->arch.gmap)
241 goto out_nogmap;
242 }
fa6b7fe9
CH
243
244 kvm->arch.css_support = 0;
245
d89f5eff 246 return 0;
598841ca
CO
247out_nogmap:
248 debug_unregister(kvm->arch.dbf);
b0c632db
HC
249out_nodbf:
250 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
251out_err:
252 return rc;
b0c632db
HC
253}
254
d329c035
CB
255void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
256{
257 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 258 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
259 if (!kvm_is_ucontrol(vcpu->kvm)) {
260 clear_bit(63 - vcpu->vcpu_id,
261 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
262 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
263 (__u64) vcpu->arch.sie_block)
264 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
265 }
abf4a71e 266 smp_mb();
27e0393f
CO
267
268 if (kvm_is_ucontrol(vcpu->kvm))
269 gmap_free(vcpu->arch.gmap);
270
d329c035 271 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 272 kvm_vcpu_uninit(vcpu);
d329c035
CB
273 kfree(vcpu);
274}
275
276static void kvm_free_vcpus(struct kvm *kvm)
277{
278 unsigned int i;
988a2cae 279 struct kvm_vcpu *vcpu;
d329c035 280
988a2cae
GN
281 kvm_for_each_vcpu(i, vcpu, kvm)
282 kvm_arch_vcpu_destroy(vcpu);
283
284 mutex_lock(&kvm->lock);
285 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
286 kvm->vcpus[i] = NULL;
287
288 atomic_set(&kvm->online_vcpus, 0);
289 mutex_unlock(&kvm->lock);
d329c035
CB
290}
291
ad8ba2cd
SY
292void kvm_arch_sync_events(struct kvm *kvm)
293{
294}
295
b0c632db
HC
296void kvm_arch_destroy_vm(struct kvm *kvm)
297{
d329c035 298 kvm_free_vcpus(kvm);
b0c632db 299 free_page((unsigned long)(kvm->arch.sca));
d329c035 300 debug_unregister(kvm->arch.dbf);
27e0393f
CO
301 if (!kvm_is_ucontrol(kvm))
302 gmap_free(kvm->arch.gmap);
b0c632db
HC
303}
304
305/* Section: vcpu related */
306int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
307{
27e0393f
CO
308 if (kvm_is_ucontrol(vcpu->kvm)) {
309 vcpu->arch.gmap = gmap_alloc(current->mm);
310 if (!vcpu->arch.gmap)
311 return -ENOMEM;
312 return 0;
313 }
314
598841ca 315 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
316 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
317 KVM_SYNC_GPRS |
9eed0735
CB
318 KVM_SYNC_ACRS |
319 KVM_SYNC_CRS;
b0c632db
HC
320 return 0;
321}
322
323void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
324{
6692cef3 325 /* Nothing todo */
b0c632db
HC
326}
327
328void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
329{
330 save_fp_regs(&vcpu->arch.host_fpregs);
331 save_access_regs(vcpu->arch.host_acrs);
332 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
333 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 334 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 335 gmap_enable(vcpu->arch.gmap);
9e6dabef 336 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
337}
338
339void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
340{
9e6dabef 341 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 342 gmap_disable(vcpu->arch.gmap);
b0c632db 343 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 344 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
345 restore_fp_regs(&vcpu->arch.host_fpregs);
346 restore_access_regs(vcpu->arch.host_acrs);
347}
348
349static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
350{
351 /* this equals initial cpu reset in pop, but we don't switch to ESA */
352 vcpu->arch.sie_block->gpsw.mask = 0UL;
353 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 354 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
355 vcpu->arch.sie_block->cputm = 0UL;
356 vcpu->arch.sie_block->ckc = 0UL;
357 vcpu->arch.sie_block->todpr = 0;
358 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
359 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
360 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
361 vcpu->arch.guest_fpregs.fpc = 0;
362 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
363 vcpu->arch.sie_block->gbea = 1;
61bde82c 364 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
365}
366
42897d86
MT
367int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
368{
369 return 0;
370}
371
b0c632db
HC
372int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
373{
9e6dabef
CH
374 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
375 CPUSTAT_SM |
376 CPUSTAT_STOPPED);
fc34531d 377 vcpu->arch.sie_block->ecb = 6;
b0c632db 378 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 379 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
380 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
381 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
382 (unsigned long) vcpu);
383 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 384 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 385 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
386 return 0;
387}
388
389struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
390 unsigned int id)
391{
4d47555a
CO
392 struct kvm_vcpu *vcpu;
393 int rc = -EINVAL;
394
395 if (id >= KVM_MAX_VCPUS)
396 goto out;
397
398 rc = -ENOMEM;
b0c632db 399
4d47555a 400 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 401 if (!vcpu)
4d47555a 402 goto out;
b0c632db 403
180c12fb
CB
404 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
405 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
406
407 if (!vcpu->arch.sie_block)
408 goto out_free_cpu;
409
410 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
411 if (!kvm_is_ucontrol(kvm)) {
412 if (!kvm->arch.sca) {
413 WARN_ON_ONCE(1);
414 goto out_free_cpu;
415 }
416 if (!kvm->arch.sca->cpu[id].sda)
417 kvm->arch.sca->cpu[id].sda =
418 (__u64) vcpu->arch.sie_block;
419 vcpu->arch.sie_block->scaoh =
420 (__u32)(((__u64)kvm->arch.sca) >> 32);
421 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
422 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
423 }
b0c632db 424
ba5c1e9b
CO
425 spin_lock_init(&vcpu->arch.local_int.lock);
426 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
427 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 428 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
429 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
430 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 431 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 432 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 433
b0c632db
HC
434 rc = kvm_vcpu_init(vcpu, kvm, id);
435 if (rc)
7b06bf2f 436 goto out_free_sie_block;
b0c632db
HC
437 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
438 vcpu->arch.sie_block);
ade38c31 439 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 440
b0c632db 441 return vcpu;
7b06bf2f
WY
442out_free_sie_block:
443 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
444out_free_cpu:
445 kfree(vcpu);
4d47555a 446out:
b0c632db
HC
447 return ERR_PTR(rc);
448}
449
b0c632db
HC
450int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
451{
452 /* kvm common code refers to this, but never calls it */
453 BUG();
454 return 0;
455}
456
49b99e1e
CB
457void s390_vcpu_block(struct kvm_vcpu *vcpu)
458{
459 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
460}
461
462void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
463{
464 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
465}
466
467/*
468 * Kick a guest cpu out of SIE and wait until SIE is not running.
469 * If the CPU is not running (e.g. waiting as idle) the function will
470 * return immediately. */
471void exit_sie(struct kvm_vcpu *vcpu)
472{
473 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
474 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
475 cpu_relax();
476}
477
478/* Kick a guest cpu out of SIE and prevent SIE-reentry */
479void exit_sie_sync(struct kvm_vcpu *vcpu)
480{
481 s390_vcpu_block(vcpu);
482 exit_sie(vcpu);
483}
484
b6d33834
CD
485int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
486{
487 /* kvm common code refers to this, but never calls it */
488 BUG();
489 return 0;
490}
491
14eebd91
CO
492static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
493 struct kvm_one_reg *reg)
494{
495 int r = -EINVAL;
496
497 switch (reg->id) {
29b7c71b
CO
498 case KVM_REG_S390_TODPR:
499 r = put_user(vcpu->arch.sie_block->todpr,
500 (u32 __user *)reg->addr);
501 break;
502 case KVM_REG_S390_EPOCHDIFF:
503 r = put_user(vcpu->arch.sie_block->epoch,
504 (u64 __user *)reg->addr);
505 break;
46a6dd1c
J
506 case KVM_REG_S390_CPU_TIMER:
507 r = put_user(vcpu->arch.sie_block->cputm,
508 (u64 __user *)reg->addr);
509 break;
510 case KVM_REG_S390_CLOCK_COMP:
511 r = put_user(vcpu->arch.sie_block->ckc,
512 (u64 __user *)reg->addr);
513 break;
14eebd91
CO
514 default:
515 break;
516 }
517
518 return r;
519}
520
521static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
522 struct kvm_one_reg *reg)
523{
524 int r = -EINVAL;
525
526 switch (reg->id) {
29b7c71b
CO
527 case KVM_REG_S390_TODPR:
528 r = get_user(vcpu->arch.sie_block->todpr,
529 (u32 __user *)reg->addr);
530 break;
531 case KVM_REG_S390_EPOCHDIFF:
532 r = get_user(vcpu->arch.sie_block->epoch,
533 (u64 __user *)reg->addr);
534 break;
46a6dd1c
J
535 case KVM_REG_S390_CPU_TIMER:
536 r = get_user(vcpu->arch.sie_block->cputm,
537 (u64 __user *)reg->addr);
538 break;
539 case KVM_REG_S390_CLOCK_COMP:
540 r = get_user(vcpu->arch.sie_block->ckc,
541 (u64 __user *)reg->addr);
542 break;
14eebd91
CO
543 default:
544 break;
545 }
546
547 return r;
548}
b6d33834 549
b0c632db
HC
550static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
551{
b0c632db 552 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
553 return 0;
554}
555
556int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
557{
5a32c1af 558 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
559 return 0;
560}
561
562int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
563{
5a32c1af 564 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
565 return 0;
566}
567
568int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
569 struct kvm_sregs *sregs)
570{
59674c1a 571 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 572 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 573 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
574 return 0;
575}
576
577int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
578 struct kvm_sregs *sregs)
579{
59674c1a 580 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 581 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
582 return 0;
583}
584
585int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
586{
b0c632db 587 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 588 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 589 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
590 return 0;
591}
592
593int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
594{
b0c632db
HC
595 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
596 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
597 return 0;
598}
599
600static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
601{
602 int rc = 0;
603
9e6dabef 604 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 605 rc = -EBUSY;
d7b0b5eb
CO
606 else {
607 vcpu->run->psw_mask = psw.mask;
608 vcpu->run->psw_addr = psw.addr;
609 }
b0c632db
HC
610 return rc;
611}
612
613int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
614 struct kvm_translation *tr)
615{
616 return -EINVAL; /* not implemented yet */
617}
618
d0bfb940
JK
619int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
620 struct kvm_guest_debug *dbg)
b0c632db
HC
621{
622 return -EINVAL; /* not implemented yet */
623}
624
62d9f0db
MT
625int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
626 struct kvm_mp_state *mp_state)
627{
628 return -EINVAL; /* not implemented yet */
629}
630
631int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
632 struct kvm_mp_state *mp_state)
633{
634 return -EINVAL; /* not implemented yet */
635}
636
e168bf8d 637static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 638{
e168bf8d
CO
639 int rc;
640
5a32c1af 641 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
642
643 if (need_resched())
644 schedule();
645
71cde587
CB
646 if (test_thread_flag(TIF_MCCK_PENDING))
647 s390_handle_mcck();
648
d6b6d166
CO
649 if (!kvm_is_ucontrol(vcpu->kvm))
650 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 651
b0c632db 652 vcpu->arch.sie_block->icptcode = 0;
83987ace 653 preempt_disable();
b0c632db 654 kvm_guest_enter();
83987ace 655 preempt_enable();
b0c632db
HC
656 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
657 atomic_read(&vcpu->arch.sie_block->cpuflags));
5786fffa
CH
658 trace_kvm_s390_sie_enter(vcpu,
659 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 660 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
661 if (rc) {
662 if (kvm_is_ucontrol(vcpu->kvm)) {
663 rc = SIE_INTERCEPT_UCONTROL;
664 } else {
665 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 666 trace_kvm_s390_sie_fault(vcpu);
db4a29cb 667 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
e168bf8d 668 }
1f0d0f09 669 }
b0c632db
HC
670 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
671 vcpu->arch.sie_block->icptcode);
5786fffa 672 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
b0c632db 673 kvm_guest_exit();
b0c632db 674
5a32c1af 675 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 676 return rc;
b0c632db
HC
677}
678
679int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
680{
8f2abe6a 681 int rc;
b0c632db
HC
682 sigset_t sigsaved;
683
9ace903d 684rerun_vcpu:
b0c632db
HC
685 if (vcpu->sigset_active)
686 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
687
9e6dabef 688 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 689
ba5c1e9b
CO
690 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
691
8f2abe6a
CB
692 switch (kvm_run->exit_reason) {
693 case KVM_EXIT_S390_SIEIC:
8f2abe6a 694 case KVM_EXIT_UNKNOWN:
9ace903d 695 case KVM_EXIT_INTR:
8f2abe6a 696 case KVM_EXIT_S390_RESET:
e168bf8d 697 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 698 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
699 break;
700 default:
701 BUG();
702 }
703
d7b0b5eb
CO
704 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
705 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
706 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
707 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
708 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
709 }
9eed0735
CB
710 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
711 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
712 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
713 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
714 }
d7b0b5eb 715
dab4079d 716 might_fault();
8f2abe6a
CB
717
718 do {
e168bf8d
CO
719 rc = __vcpu_run(vcpu);
720 if (rc)
721 break;
c0d744a9
CO
722 if (kvm_is_ucontrol(vcpu->kvm))
723 rc = -EOPNOTSUPP;
724 else
725 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
726 } while (!signal_pending(current) && !rc);
727
9ace903d
CE
728 if (rc == SIE_INTERCEPT_RERUNVCPU)
729 goto rerun_vcpu;
730
b1d16c49
CE
731 if (signal_pending(current) && !rc) {
732 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 733 rc = -EINTR;
b1d16c49 734 }
8f2abe6a 735
e168bf8d
CO
736#ifdef CONFIG_KVM_S390_UCONTROL
737 if (rc == SIE_INTERCEPT_UCONTROL) {
738 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
739 kvm_run->s390_ucontrol.trans_exc_code =
740 current->thread.gmap_addr;
741 kvm_run->s390_ucontrol.pgm_code = 0x10;
742 rc = 0;
743 }
744#endif
745
b8e660b8 746 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
747 /* intercept cannot be handled in-kernel, prepare kvm-run */
748 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
749 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
750 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
751 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
752 rc = 0;
753 }
754
755 if (rc == -EREMOTE) {
756 /* intercept was handled, but userspace support is needed
757 * kvm_run has been prepared by the handler */
758 rc = 0;
759 }
b0c632db 760
d7b0b5eb
CO
761 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
762 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 763 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 764 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 765
b0c632db
HC
766 if (vcpu->sigset_active)
767 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
768
b0c632db 769 vcpu->stat.exit_userspace++;
7e8e6ab4 770 return rc;
b0c632db
HC
771}
772
092670cd 773static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
774 unsigned long n, int prefix)
775{
776 if (prefix)
777 return copy_to_guest(vcpu, guestdest, from, n);
778 else
779 return copy_to_guest_absolute(vcpu, guestdest, from, n);
780}
781
782/*
783 * store status at address
784 * we use have two special cases:
785 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
786 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
787 */
971eb77f 788int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 789{
092670cd 790 unsigned char archmode = 1;
b0c632db
HC
791 int prefix;
792
793 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
794 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
795 return -EFAULT;
796 addr = SAVE_AREA_BASE;
797 prefix = 0;
798 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
799 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
800 return -EFAULT;
801 addr = SAVE_AREA_BASE;
802 prefix = 1;
803 } else
804 prefix = 0;
805
15bc8d84
CB
806 /*
807 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
808 * copying in vcpu load/put. Lets update our copies before we save
809 * it into the save area
810 */
811 save_fp_regs(&vcpu->arch.guest_fpregs);
812 save_access_regs(vcpu->run->s.regs.acrs);
813
f64ca217 814 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
815 vcpu->arch.guest_fpregs.fprs, 128, prefix))
816 return -EFAULT;
817
f64ca217 818 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 819 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
820 return -EFAULT;
821
f64ca217 822 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
823 &vcpu->arch.sie_block->gpsw, 16, prefix))
824 return -EFAULT;
825
f64ca217 826 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
827 &vcpu->arch.sie_block->prefix, 4, prefix))
828 return -EFAULT;
829
830 if (__guestcopy(vcpu,
f64ca217 831 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
832 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
833 return -EFAULT;
834
f64ca217 835 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
836 &vcpu->arch.sie_block->todpr, 4, prefix))
837 return -EFAULT;
838
f64ca217 839 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
840 &vcpu->arch.sie_block->cputm, 8, prefix))
841 return -EFAULT;
842
f64ca217 843 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
844 &vcpu->arch.sie_block->ckc, 8, prefix))
845 return -EFAULT;
846
f64ca217 847 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 848 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
849 return -EFAULT;
850
851 if (__guestcopy(vcpu,
f64ca217 852 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
853 &vcpu->arch.sie_block->gcr, 128, prefix))
854 return -EFAULT;
855 return 0;
856}
857
d6712df9
CH
858static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
859 struct kvm_enable_cap *cap)
860{
861 int r;
862
863 if (cap->flags)
864 return -EINVAL;
865
866 switch (cap->cap) {
fa6b7fe9
CH
867 case KVM_CAP_S390_CSS_SUPPORT:
868 if (!vcpu->kvm->arch.css_support) {
869 vcpu->kvm->arch.css_support = 1;
870 trace_kvm_s390_enable_css(vcpu->kvm);
871 }
872 r = 0;
873 break;
d6712df9
CH
874 default:
875 r = -EINVAL;
876 break;
877 }
878 return r;
879}
880
b0c632db
HC
881long kvm_arch_vcpu_ioctl(struct file *filp,
882 unsigned int ioctl, unsigned long arg)
883{
884 struct kvm_vcpu *vcpu = filp->private_data;
885 void __user *argp = (void __user *)arg;
bc923cc9 886 long r;
b0c632db 887
93736624
AK
888 switch (ioctl) {
889 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
890 struct kvm_s390_interrupt s390int;
891
93736624 892 r = -EFAULT;
ba5c1e9b 893 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
894 break;
895 r = kvm_s390_inject_vcpu(vcpu, &s390int);
896 break;
ba5c1e9b 897 }
b0c632db 898 case KVM_S390_STORE_STATUS:
bc923cc9
AK
899 r = kvm_s390_vcpu_store_status(vcpu, arg);
900 break;
b0c632db
HC
901 case KVM_S390_SET_INITIAL_PSW: {
902 psw_t psw;
903
bc923cc9 904 r = -EFAULT;
b0c632db 905 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
906 break;
907 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
908 break;
b0c632db
HC
909 }
910 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
911 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
912 break;
14eebd91
CO
913 case KVM_SET_ONE_REG:
914 case KVM_GET_ONE_REG: {
915 struct kvm_one_reg reg;
916 r = -EFAULT;
917 if (copy_from_user(&reg, argp, sizeof(reg)))
918 break;
919 if (ioctl == KVM_SET_ONE_REG)
920 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
921 else
922 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
923 break;
924 }
27e0393f
CO
925#ifdef CONFIG_KVM_S390_UCONTROL
926 case KVM_S390_UCAS_MAP: {
927 struct kvm_s390_ucas_mapping ucasmap;
928
929 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
930 r = -EFAULT;
931 break;
932 }
933
934 if (!kvm_is_ucontrol(vcpu->kvm)) {
935 r = -EINVAL;
936 break;
937 }
938
939 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
940 ucasmap.vcpu_addr, ucasmap.length);
941 break;
942 }
943 case KVM_S390_UCAS_UNMAP: {
944 struct kvm_s390_ucas_mapping ucasmap;
945
946 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
947 r = -EFAULT;
948 break;
949 }
950
951 if (!kvm_is_ucontrol(vcpu->kvm)) {
952 r = -EINVAL;
953 break;
954 }
955
956 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
957 ucasmap.length);
958 break;
959 }
960#endif
ccc7910f
CO
961 case KVM_S390_VCPU_FAULT: {
962 r = gmap_fault(arg, vcpu->arch.gmap);
963 if (!IS_ERR_VALUE(r))
964 r = 0;
965 break;
966 }
d6712df9
CH
967 case KVM_ENABLE_CAP:
968 {
969 struct kvm_enable_cap cap;
970 r = -EFAULT;
971 if (copy_from_user(&cap, argp, sizeof(cap)))
972 break;
973 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
974 break;
975 }
b0c632db 976 default:
3e6afcf1 977 r = -ENOTTY;
b0c632db 978 }
bc923cc9 979 return r;
b0c632db
HC
980}
981
5b1c1493
CO
982int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
983{
984#ifdef CONFIG_KVM_S390_UCONTROL
985 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
986 && (kvm_is_ucontrol(vcpu->kvm))) {
987 vmf->page = virt_to_page(vcpu->arch.sie_block);
988 get_page(vmf->page);
989 return 0;
990 }
991#endif
992 return VM_FAULT_SIGBUS;
993}
994
db3fe4eb
TY
995void kvm_arch_free_memslot(struct kvm_memory_slot *free,
996 struct kvm_memory_slot *dont)
997{
998}
999
1000int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1001{
1002 return 0;
1003}
1004
b0c632db 1005/* Section: memory related */
f7784b8e
MT
1006int kvm_arch_prepare_memory_region(struct kvm *kvm,
1007 struct kvm_memory_slot *memslot,
7b6195a9
TY
1008 struct kvm_userspace_memory_region *mem,
1009 enum kvm_mr_change change)
b0c632db 1010{
dd2887e7
NW
1011 /* A few sanity checks. We can have memory slots which have to be
1012 located/ended at a segment boundary (1MB). The memory in userland is
1013 ok to be fragmented into various different vmas. It is okay to mmap()
1014 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1015
598841ca 1016 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1017 return -EINVAL;
1018
598841ca 1019 if (mem->memory_size & 0xffffful)
b0c632db
HC
1020 return -EINVAL;
1021
f7784b8e
MT
1022 return 0;
1023}
1024
1025void kvm_arch_commit_memory_region(struct kvm *kvm,
1026 struct kvm_userspace_memory_region *mem,
8482644a
TY
1027 const struct kvm_memory_slot *old,
1028 enum kvm_mr_change change)
f7784b8e 1029{
f7850c92 1030 int rc;
f7784b8e 1031
2cef4deb
CB
1032 /* If the basics of the memslot do not change, we do not want
1033 * to update the gmap. Every update causes several unnecessary
1034 * segment translation exceptions. This is usually handled just
1035 * fine by the normal fault handler + gmap, but it will also
1036 * cause faults on the prefix page of running guest CPUs.
1037 */
1038 if (old->userspace_addr == mem->userspace_addr &&
1039 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1040 old->npages * PAGE_SIZE == mem->memory_size)
1041 return;
598841ca
CO
1042
1043 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1044 mem->guest_phys_addr, mem->memory_size);
1045 if (rc)
f7850c92 1046 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1047 return;
b0c632db
HC
1048}
1049
2df72e9b
MT
1050void kvm_arch_flush_shadow_all(struct kvm *kvm)
1051{
1052}
1053
1054void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1055 struct kvm_memory_slot *slot)
34d4cb8f
MT
1056{
1057}
1058
b0c632db
HC
1059static int __init kvm_s390_init(void)
1060{
ef50f7ac 1061 int ret;
0ee75bea 1062 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1063 if (ret)
1064 return ret;
1065
1066 /*
1067 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1068 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1069 * only set facilities that are known to work in KVM.
1070 */
c2f0e8c8 1071 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
1072 if (!facilities) {
1073 kvm_exit();
1074 return -ENOMEM;
1075 }
14375bc4 1076 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 1077 facilities[0] &= 0xff00fff3f47c0000ULL;
87cac8f8 1078 facilities[1] &= 0x001c000000000000ULL;
ef50f7ac 1079 return 0;
b0c632db
HC
1080}
1081
1082static void __exit kvm_s390_exit(void)
1083{
ef50f7ac 1084 free_page((unsigned long) facilities);
b0c632db
HC
1085 kvm_exit();
1086}
1087
1088module_init(kvm_s390_init);
1089module_exit(kvm_s390_exit);