KVM: s390: Do not set CC3 for EQBS and SQBS
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
78c4b59f 31#include <asm/facility.h>
1526bf9c 32#include <asm/sclp.h>
8f2abe6a 33#include "kvm-s390.h"
b0c632db
HC
34#include "gaccess.h"
35
5786fffa
CH
36#define CREATE_TRACE_POINTS
37#include "trace.h"
ade38c31 38#include "trace-s390.h"
5786fffa 39
b0c632db
HC
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 44 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
45 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
49 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
53 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 55 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
64 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 73 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 82 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 83 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 84 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
85 { NULL }
86};
87
78c4b59f 88unsigned long *vfacilities;
2c70fe44 89static struct gmap_notifier gmap_notifier;
b0c632db 90
78c4b59f
MM
91/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
b0c632db 97/* Section: not file related */
10474ae8 98int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
99{
100 /* every s390 is virtualization enabled ;-) */
10474ae8 101 return 0;
b0c632db
HC
102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
2c70fe44
CB
108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
b0c632db
HC
110int kvm_arch_hardware_setup(void)
111{
2c70fe44
CB
112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
2c70fe44 119 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
d7b0b5eb
CO
146 int r;
147
2bd0ac4e 148 switch (ext) {
d7b0b5eb 149 case KVM_CAP_S390_PSW:
b6cf8788 150 case KVM_CAP_S390_GMAP:
52e16b18 151 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
60b413c9 155 case KVM_CAP_SYNC_REGS:
14eebd91 156 case KVM_CAP_ONE_REG:
d6712df9 157 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 158 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 159 case KVM_CAP_IOEVENTFD:
d7b0b5eb
CO
160 r = 1;
161 break;
e726b1bd
CB
162 case KVM_CAP_NR_VCPUS:
163 case KVM_CAP_MAX_VCPUS:
164 r = KVM_MAX_VCPUS;
165 break;
e1e2e605
NW
166 case KVM_CAP_NR_MEMSLOTS:
167 r = KVM_USER_MEM_SLOTS;
168 break;
1526bf9c 169 case KVM_CAP_S390_COW:
abf09bed 170 r = MACHINE_HAS_ESOP;
1526bf9c 171 break;
2bd0ac4e 172 default:
d7b0b5eb 173 r = 0;
2bd0ac4e 174 }
d7b0b5eb 175 return r;
b0c632db
HC
176}
177
178/* Section: vm related */
179/*
180 * Get (and clear) the dirty memory log for a memory slot.
181 */
182int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
183 struct kvm_dirty_log *log)
184{
185 return 0;
186}
187
188long kvm_arch_vm_ioctl(struct file *filp,
189 unsigned int ioctl, unsigned long arg)
190{
191 struct kvm *kvm = filp->private_data;
192 void __user *argp = (void __user *)arg;
193 int r;
194
195 switch (ioctl) {
ba5c1e9b
CO
196 case KVM_S390_INTERRUPT: {
197 struct kvm_s390_interrupt s390int;
198
199 r = -EFAULT;
200 if (copy_from_user(&s390int, argp, sizeof(s390int)))
201 break;
202 r = kvm_s390_inject_vm(kvm, &s390int);
203 break;
204 }
b0c632db 205 default:
367e1319 206 r = -ENOTTY;
b0c632db
HC
207 }
208
209 return r;
210}
211
e08b9637 212int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 213{
b0c632db
HC
214 int rc;
215 char debug_name[16];
216
e08b9637
CO
217 rc = -EINVAL;
218#ifdef CONFIG_KVM_S390_UCONTROL
219 if (type & ~KVM_VM_S390_UCONTROL)
220 goto out_err;
221 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
222 goto out_err;
223#else
224 if (type)
225 goto out_err;
226#endif
227
b0c632db
HC
228 rc = s390_enable_sie();
229 if (rc)
d89f5eff 230 goto out_err;
b0c632db 231
b290411a
CO
232 rc = -ENOMEM;
233
b0c632db
HC
234 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
235 if (!kvm->arch.sca)
d89f5eff 236 goto out_err;
b0c632db
HC
237
238 sprintf(debug_name, "kvm-%u", current->pid);
239
240 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
241 if (!kvm->arch.dbf)
242 goto out_nodbf;
243
ba5c1e9b
CO
244 spin_lock_init(&kvm->arch.float_int.lock);
245 INIT_LIST_HEAD(&kvm->arch.float_int.list);
246
b0c632db
HC
247 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
248 VM_EVENT(kvm, 3, "%s", "vm created");
249
e08b9637
CO
250 if (type & KVM_VM_S390_UCONTROL) {
251 kvm->arch.gmap = NULL;
252 } else {
253 kvm->arch.gmap = gmap_alloc(current->mm);
254 if (!kvm->arch.gmap)
255 goto out_nogmap;
2c70fe44 256 kvm->arch.gmap->private = kvm;
e08b9637 257 }
fa6b7fe9
CH
258
259 kvm->arch.css_support = 0;
260
d89f5eff 261 return 0;
598841ca
CO
262out_nogmap:
263 debug_unregister(kvm->arch.dbf);
b0c632db
HC
264out_nodbf:
265 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
266out_err:
267 return rc;
b0c632db
HC
268}
269
d329c035
CB
270void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
271{
272 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 273 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
274 if (!kvm_is_ucontrol(vcpu->kvm)) {
275 clear_bit(63 - vcpu->vcpu_id,
276 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
277 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
278 (__u64) vcpu->arch.sie_block)
279 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
280 }
abf4a71e 281 smp_mb();
27e0393f
CO
282
283 if (kvm_is_ucontrol(vcpu->kvm))
284 gmap_free(vcpu->arch.gmap);
285
d329c035 286 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 287 kvm_vcpu_uninit(vcpu);
b110feaf 288 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
289}
290
291static void kvm_free_vcpus(struct kvm *kvm)
292{
293 unsigned int i;
988a2cae 294 struct kvm_vcpu *vcpu;
d329c035 295
988a2cae
GN
296 kvm_for_each_vcpu(i, vcpu, kvm)
297 kvm_arch_vcpu_destroy(vcpu);
298
299 mutex_lock(&kvm->lock);
300 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
301 kvm->vcpus[i] = NULL;
302
303 atomic_set(&kvm->online_vcpus, 0);
304 mutex_unlock(&kvm->lock);
d329c035
CB
305}
306
ad8ba2cd
SY
307void kvm_arch_sync_events(struct kvm *kvm)
308{
309}
310
b0c632db
HC
311void kvm_arch_destroy_vm(struct kvm *kvm)
312{
d329c035 313 kvm_free_vcpus(kvm);
b0c632db 314 free_page((unsigned long)(kvm->arch.sca));
d329c035 315 debug_unregister(kvm->arch.dbf);
27e0393f
CO
316 if (!kvm_is_ucontrol(kvm))
317 gmap_free(kvm->arch.gmap);
b0c632db
HC
318}
319
320/* Section: vcpu related */
321int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
322{
27e0393f
CO
323 if (kvm_is_ucontrol(vcpu->kvm)) {
324 vcpu->arch.gmap = gmap_alloc(current->mm);
325 if (!vcpu->arch.gmap)
326 return -ENOMEM;
2c70fe44 327 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
328 return 0;
329 }
330
598841ca 331 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
332 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
333 KVM_SYNC_GPRS |
9eed0735
CB
334 KVM_SYNC_ACRS |
335 KVM_SYNC_CRS;
b0c632db
HC
336 return 0;
337}
338
339void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
340{
6692cef3 341 /* Nothing todo */
b0c632db
HC
342}
343
344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
345{
4725c860
MS
346 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
347 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 348 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
349 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
350 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 351 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 352 gmap_enable(vcpu->arch.gmap);
9e6dabef 353 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
354}
355
356void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
357{
9e6dabef 358 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 359 gmap_disable(vcpu->arch.gmap);
4725c860
MS
360 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
361 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 362 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
363 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
364 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
365 restore_access_regs(vcpu->arch.host_acrs);
366}
367
368static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
369{
370 /* this equals initial cpu reset in pop, but we don't switch to ESA */
371 vcpu->arch.sie_block->gpsw.mask = 0UL;
372 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 373 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
374 vcpu->arch.sie_block->cputm = 0UL;
375 vcpu->arch.sie_block->ckc = 0UL;
376 vcpu->arch.sie_block->todpr = 0;
377 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
378 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
379 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
380 vcpu->arch.guest_fpregs.fpc = 0;
381 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
382 vcpu->arch.sie_block->gbea = 1;
61bde82c 383 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
384}
385
42897d86
MT
386int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
387{
388 return 0;
389}
390
b0c632db
HC
391int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
392{
9e6dabef
CH
393 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
394 CPUSTAT_SM |
69d0d3a3
CB
395 CPUSTAT_STOPPED |
396 CPUSTAT_GED);
fc34531d 397 vcpu->arch.sie_block->ecb = 6;
69d0d3a3 398 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 399 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 400 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
ca872302
CB
401 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
402 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
403 (unsigned long) vcpu);
404 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 405 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 406 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
407 return 0;
408}
409
410struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
411 unsigned int id)
412{
4d47555a
CO
413 struct kvm_vcpu *vcpu;
414 int rc = -EINVAL;
415
416 if (id >= KVM_MAX_VCPUS)
417 goto out;
418
419 rc = -ENOMEM;
b0c632db 420
b110feaf 421 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 422 if (!vcpu)
4d47555a 423 goto out;
b0c632db 424
180c12fb
CB
425 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
426 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
427
428 if (!vcpu->arch.sie_block)
429 goto out_free_cpu;
430
431 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
432 if (!kvm_is_ucontrol(kvm)) {
433 if (!kvm->arch.sca) {
434 WARN_ON_ONCE(1);
435 goto out_free_cpu;
436 }
437 if (!kvm->arch.sca->cpu[id].sda)
438 kvm->arch.sca->cpu[id].sda =
439 (__u64) vcpu->arch.sie_block;
440 vcpu->arch.sie_block->scaoh =
441 (__u32)(((__u64)kvm->arch.sca) >> 32);
442 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
443 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
444 }
b0c632db 445
ba5c1e9b
CO
446 spin_lock_init(&vcpu->arch.local_int.lock);
447 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
448 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 449 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 450 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 451 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 452 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 453 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 454
b0c632db
HC
455 rc = kvm_vcpu_init(vcpu, kvm, id);
456 if (rc)
7b06bf2f 457 goto out_free_sie_block;
b0c632db
HC
458 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
459 vcpu->arch.sie_block);
ade38c31 460 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 461
b0c632db 462 return vcpu;
7b06bf2f
WY
463out_free_sie_block:
464 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 465out_free_cpu:
b110feaf 466 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 467out:
b0c632db
HC
468 return ERR_PTR(rc);
469}
470
b0c632db
HC
471int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
472{
473 /* kvm common code refers to this, but never calls it */
474 BUG();
475 return 0;
476}
477
49b99e1e
CB
478void s390_vcpu_block(struct kvm_vcpu *vcpu)
479{
480 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
481}
482
483void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
484{
485 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
486}
487
488/*
489 * Kick a guest cpu out of SIE and wait until SIE is not running.
490 * If the CPU is not running (e.g. waiting as idle) the function will
491 * return immediately. */
492void exit_sie(struct kvm_vcpu *vcpu)
493{
494 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
495 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
496 cpu_relax();
497}
498
499/* Kick a guest cpu out of SIE and prevent SIE-reentry */
500void exit_sie_sync(struct kvm_vcpu *vcpu)
501{
502 s390_vcpu_block(vcpu);
503 exit_sie(vcpu);
504}
505
2c70fe44
CB
506static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
507{
508 int i;
509 struct kvm *kvm = gmap->private;
510 struct kvm_vcpu *vcpu;
511
512 kvm_for_each_vcpu(i, vcpu, kvm) {
513 /* match against both prefix pages */
514 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
515 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
516 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
517 exit_sie_sync(vcpu);
518 }
519 }
520}
521
b6d33834
CD
522int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
523{
524 /* kvm common code refers to this, but never calls it */
525 BUG();
526 return 0;
527}
528
14eebd91
CO
529static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
530 struct kvm_one_reg *reg)
531{
532 int r = -EINVAL;
533
534 switch (reg->id) {
29b7c71b
CO
535 case KVM_REG_S390_TODPR:
536 r = put_user(vcpu->arch.sie_block->todpr,
537 (u32 __user *)reg->addr);
538 break;
539 case KVM_REG_S390_EPOCHDIFF:
540 r = put_user(vcpu->arch.sie_block->epoch,
541 (u64 __user *)reg->addr);
542 break;
46a6dd1c
J
543 case KVM_REG_S390_CPU_TIMER:
544 r = put_user(vcpu->arch.sie_block->cputm,
545 (u64 __user *)reg->addr);
546 break;
547 case KVM_REG_S390_CLOCK_COMP:
548 r = put_user(vcpu->arch.sie_block->ckc,
549 (u64 __user *)reg->addr);
550 break;
14eebd91
CO
551 default:
552 break;
553 }
554
555 return r;
556}
557
558static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
559 struct kvm_one_reg *reg)
560{
561 int r = -EINVAL;
562
563 switch (reg->id) {
29b7c71b
CO
564 case KVM_REG_S390_TODPR:
565 r = get_user(vcpu->arch.sie_block->todpr,
566 (u32 __user *)reg->addr);
567 break;
568 case KVM_REG_S390_EPOCHDIFF:
569 r = get_user(vcpu->arch.sie_block->epoch,
570 (u64 __user *)reg->addr);
571 break;
46a6dd1c
J
572 case KVM_REG_S390_CPU_TIMER:
573 r = get_user(vcpu->arch.sie_block->cputm,
574 (u64 __user *)reg->addr);
575 break;
576 case KVM_REG_S390_CLOCK_COMP:
577 r = get_user(vcpu->arch.sie_block->ckc,
578 (u64 __user *)reg->addr);
579 break;
14eebd91
CO
580 default:
581 break;
582 }
583
584 return r;
585}
b6d33834 586
b0c632db
HC
587static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
588{
b0c632db 589 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
590 return 0;
591}
592
593int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
594{
5a32c1af 595 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
596 return 0;
597}
598
599int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
600{
5a32c1af 601 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
602 return 0;
603}
604
605int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
606 struct kvm_sregs *sregs)
607{
59674c1a 608 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 609 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 610 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
611 return 0;
612}
613
614int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
615 struct kvm_sregs *sregs)
616{
59674c1a 617 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 618 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
619 return 0;
620}
621
622int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
623{
4725c860
MS
624 if (test_fp_ctl(fpu->fpc))
625 return -EINVAL;
b0c632db 626 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
627 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
628 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
629 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
630 return 0;
631}
632
633int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
634{
b0c632db
HC
635 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
636 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
637 return 0;
638}
639
640static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
641{
642 int rc = 0;
643
9e6dabef 644 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 645 rc = -EBUSY;
d7b0b5eb
CO
646 else {
647 vcpu->run->psw_mask = psw.mask;
648 vcpu->run->psw_addr = psw.addr;
649 }
b0c632db
HC
650 return rc;
651}
652
653int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
654 struct kvm_translation *tr)
655{
656 return -EINVAL; /* not implemented yet */
657}
658
d0bfb940
JK
659int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
660 struct kvm_guest_debug *dbg)
b0c632db
HC
661{
662 return -EINVAL; /* not implemented yet */
663}
664
62d9f0db
MT
665int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
666 struct kvm_mp_state *mp_state)
667{
668 return -EINVAL; /* not implemented yet */
669}
670
671int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
672 struct kvm_mp_state *mp_state)
673{
674 return -EINVAL; /* not implemented yet */
675}
676
2c70fe44
CB
677static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
678{
679 /*
680 * We use MMU_RELOAD just to re-arm the ipte notifier for the
681 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
682 * This ensures that the ipte instruction for this request has
683 * already finished. We might race against a second unmapper that
684 * wants to set the blocking bit. Lets just retry the request loop.
685 */
686 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
687 int rc;
688 rc = gmap_ipte_notify(vcpu->arch.gmap,
689 vcpu->arch.sie_block->prefix,
690 PAGE_SIZE * 2);
691 if (rc)
692 return rc;
693 s390_vcpu_unblock(vcpu);
694 }
695 return 0;
696}
697
3fb4c40f 698static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 699{
3fb4c40f 700 int rc, cpuflags;
e168bf8d 701
5a32c1af 702 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
703
704 if (need_resched())
705 schedule();
706
71cde587
CB
707 if (test_thread_flag(TIF_MCCK_PENDING))
708 s390_handle_mcck();
709
d6b6d166
CO
710 if (!kvm_is_ucontrol(vcpu->kvm))
711 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 712
2c70fe44
CB
713 rc = kvm_s390_handle_requests(vcpu);
714 if (rc)
715 return rc;
716
b0c632db 717 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
718 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
719 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
720 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 721
3fb4c40f
TH
722 return 0;
723}
724
725static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
726{
727 int rc;
2b29a9fd
DD
728
729 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
730 vcpu->arch.sie_block->icptcode);
731 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
732
3fb4c40f 733 if (exit_reason >= 0) {
7c470539 734 rc = 0;
210b1607
TH
735 } else if (kvm_is_ucontrol(vcpu->kvm)) {
736 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
737 vcpu->run->s390_ucontrol.trans_exc_code =
738 current->thread.gmap_addr;
739 vcpu->run->s390_ucontrol.pgm_code = 0x10;
740 rc = -EREMOTE;
1f0d0f09 741 }
b0c632db 742
5a32c1af 743 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 744
a76ccff6
TH
745 if (rc == 0) {
746 if (kvm_is_ucontrol(vcpu->kvm))
747 rc = -EOPNOTSUPP;
748 else
749 rc = kvm_handle_sie_intercept(vcpu);
750 }
751
3fb4c40f
TH
752 return rc;
753}
754
755static int __vcpu_run(struct kvm_vcpu *vcpu)
756{
757 int rc, exit_reason;
758
800c1065
TH
759 /*
760 * We try to hold kvm->srcu during most of vcpu_run (except when run-
761 * ning the guest), so that memslots (and other stuff) are protected
762 */
763 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
764
a76ccff6
TH
765 do {
766 rc = vcpu_pre_run(vcpu);
767 if (rc)
768 break;
3fb4c40f 769
800c1065 770 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
771 /*
772 * As PF_VCPU will be used in fault handler, between
773 * guest_enter and guest_exit should be no uaccess.
774 */
775 preempt_disable();
776 kvm_guest_enter();
777 preempt_enable();
778 exit_reason = sie64a(vcpu->arch.sie_block,
779 vcpu->run->s.regs.gprs);
780 kvm_guest_exit();
800c1065 781 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
782
783 rc = vcpu_post_run(vcpu, exit_reason);
784 } while (!signal_pending(current) && !rc);
3fb4c40f 785
800c1065 786 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 787 return rc;
b0c632db
HC
788}
789
790int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
791{
8f2abe6a 792 int rc;
b0c632db
HC
793 sigset_t sigsaved;
794
b0c632db
HC
795 if (vcpu->sigset_active)
796 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
797
9e6dabef 798 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 799
ba5c1e9b
CO
800 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
801
8f2abe6a
CB
802 switch (kvm_run->exit_reason) {
803 case KVM_EXIT_S390_SIEIC:
8f2abe6a 804 case KVM_EXIT_UNKNOWN:
9ace903d 805 case KVM_EXIT_INTR:
8f2abe6a 806 case KVM_EXIT_S390_RESET:
e168bf8d 807 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 808 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
809 break;
810 default:
811 BUG();
812 }
813
d7b0b5eb
CO
814 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
815 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
816 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
817 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
818 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
819 }
9eed0735
CB
820 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
821 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
822 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
823 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
824 }
d7b0b5eb 825
dab4079d 826 might_fault();
a76ccff6 827 rc = __vcpu_run(vcpu);
9ace903d 828
b1d16c49
CE
829 if (signal_pending(current) && !rc) {
830 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 831 rc = -EINTR;
b1d16c49 832 }
8f2abe6a 833
b8e660b8 834 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
835 /* intercept cannot be handled in-kernel, prepare kvm-run */
836 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
837 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
838 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
839 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
840 rc = 0;
841 }
842
843 if (rc == -EREMOTE) {
844 /* intercept was handled, but userspace support is needed
845 * kvm_run has been prepared by the handler */
846 rc = 0;
847 }
b0c632db 848
d7b0b5eb
CO
849 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
850 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 851 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 852 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 853
b0c632db
HC
854 if (vcpu->sigset_active)
855 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
856
b0c632db 857 vcpu->stat.exit_userspace++;
7e8e6ab4 858 return rc;
b0c632db
HC
859}
860
092670cd 861static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
862 unsigned long n, int prefix)
863{
864 if (prefix)
865 return copy_to_guest(vcpu, guestdest, from, n);
866 else
867 return copy_to_guest_absolute(vcpu, guestdest, from, n);
868}
869
870/*
871 * store status at address
872 * we use have two special cases:
873 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
874 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
875 */
971eb77f 876int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 877{
092670cd 878 unsigned char archmode = 1;
b0c632db
HC
879 int prefix;
880
881 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
882 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
883 return -EFAULT;
884 addr = SAVE_AREA_BASE;
885 prefix = 0;
886 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
887 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
888 return -EFAULT;
889 addr = SAVE_AREA_BASE;
890 prefix = 1;
891 } else
892 prefix = 0;
893
15bc8d84
CB
894 /*
895 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
896 * copying in vcpu load/put. Lets update our copies before we save
897 * it into the save area
898 */
4725c860
MS
899 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
900 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
15bc8d84
CB
901 save_access_regs(vcpu->run->s.regs.acrs);
902
f64ca217 903 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
904 vcpu->arch.guest_fpregs.fprs, 128, prefix))
905 return -EFAULT;
906
f64ca217 907 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 908 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
909 return -EFAULT;
910
f64ca217 911 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
912 &vcpu->arch.sie_block->gpsw, 16, prefix))
913 return -EFAULT;
914
f64ca217 915 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
916 &vcpu->arch.sie_block->prefix, 4, prefix))
917 return -EFAULT;
918
919 if (__guestcopy(vcpu,
f64ca217 920 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
921 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
922 return -EFAULT;
923
f64ca217 924 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
925 &vcpu->arch.sie_block->todpr, 4, prefix))
926 return -EFAULT;
927
f64ca217 928 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
929 &vcpu->arch.sie_block->cputm, 8, prefix))
930 return -EFAULT;
931
f64ca217 932 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
933 &vcpu->arch.sie_block->ckc, 8, prefix))
934 return -EFAULT;
935
f64ca217 936 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 937 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
938 return -EFAULT;
939
940 if (__guestcopy(vcpu,
f64ca217 941 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
942 &vcpu->arch.sie_block->gcr, 128, prefix))
943 return -EFAULT;
944 return 0;
945}
946
d6712df9
CH
947static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
948 struct kvm_enable_cap *cap)
949{
950 int r;
951
952 if (cap->flags)
953 return -EINVAL;
954
955 switch (cap->cap) {
fa6b7fe9
CH
956 case KVM_CAP_S390_CSS_SUPPORT:
957 if (!vcpu->kvm->arch.css_support) {
958 vcpu->kvm->arch.css_support = 1;
959 trace_kvm_s390_enable_css(vcpu->kvm);
960 }
961 r = 0;
962 break;
d6712df9
CH
963 default:
964 r = -EINVAL;
965 break;
966 }
967 return r;
968}
969
b0c632db
HC
970long kvm_arch_vcpu_ioctl(struct file *filp,
971 unsigned int ioctl, unsigned long arg)
972{
973 struct kvm_vcpu *vcpu = filp->private_data;
974 void __user *argp = (void __user *)arg;
800c1065 975 int idx;
bc923cc9 976 long r;
b0c632db 977
93736624
AK
978 switch (ioctl) {
979 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
980 struct kvm_s390_interrupt s390int;
981
93736624 982 r = -EFAULT;
ba5c1e9b 983 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
984 break;
985 r = kvm_s390_inject_vcpu(vcpu, &s390int);
986 break;
ba5c1e9b 987 }
b0c632db 988 case KVM_S390_STORE_STATUS:
800c1065 989 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 990 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 991 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 992 break;
b0c632db
HC
993 case KVM_S390_SET_INITIAL_PSW: {
994 psw_t psw;
995
bc923cc9 996 r = -EFAULT;
b0c632db 997 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
998 break;
999 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1000 break;
b0c632db
HC
1001 }
1002 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1003 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1004 break;
14eebd91
CO
1005 case KVM_SET_ONE_REG:
1006 case KVM_GET_ONE_REG: {
1007 struct kvm_one_reg reg;
1008 r = -EFAULT;
1009 if (copy_from_user(&reg, argp, sizeof(reg)))
1010 break;
1011 if (ioctl == KVM_SET_ONE_REG)
1012 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1013 else
1014 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1015 break;
1016 }
27e0393f
CO
1017#ifdef CONFIG_KVM_S390_UCONTROL
1018 case KVM_S390_UCAS_MAP: {
1019 struct kvm_s390_ucas_mapping ucasmap;
1020
1021 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1022 r = -EFAULT;
1023 break;
1024 }
1025
1026 if (!kvm_is_ucontrol(vcpu->kvm)) {
1027 r = -EINVAL;
1028 break;
1029 }
1030
1031 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1032 ucasmap.vcpu_addr, ucasmap.length);
1033 break;
1034 }
1035 case KVM_S390_UCAS_UNMAP: {
1036 struct kvm_s390_ucas_mapping ucasmap;
1037
1038 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1039 r = -EFAULT;
1040 break;
1041 }
1042
1043 if (!kvm_is_ucontrol(vcpu->kvm)) {
1044 r = -EINVAL;
1045 break;
1046 }
1047
1048 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1049 ucasmap.length);
1050 break;
1051 }
1052#endif
ccc7910f
CO
1053 case KVM_S390_VCPU_FAULT: {
1054 r = gmap_fault(arg, vcpu->arch.gmap);
1055 if (!IS_ERR_VALUE(r))
1056 r = 0;
1057 break;
1058 }
d6712df9
CH
1059 case KVM_ENABLE_CAP:
1060 {
1061 struct kvm_enable_cap cap;
1062 r = -EFAULT;
1063 if (copy_from_user(&cap, argp, sizeof(cap)))
1064 break;
1065 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1066 break;
1067 }
b0c632db 1068 default:
3e6afcf1 1069 r = -ENOTTY;
b0c632db 1070 }
bc923cc9 1071 return r;
b0c632db
HC
1072}
1073
5b1c1493
CO
1074int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1075{
1076#ifdef CONFIG_KVM_S390_UCONTROL
1077 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1078 && (kvm_is_ucontrol(vcpu->kvm))) {
1079 vmf->page = virt_to_page(vcpu->arch.sie_block);
1080 get_page(vmf->page);
1081 return 0;
1082 }
1083#endif
1084 return VM_FAULT_SIGBUS;
1085}
1086
5587027c 1087void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1088 struct kvm_memory_slot *dont)
1089{
1090}
1091
5587027c
AK
1092int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1093 unsigned long npages)
db3fe4eb
TY
1094{
1095 return 0;
1096}
1097
e59dbe09
TY
1098void kvm_arch_memslots_updated(struct kvm *kvm)
1099{
1100}
1101
b0c632db 1102/* Section: memory related */
f7784b8e
MT
1103int kvm_arch_prepare_memory_region(struct kvm *kvm,
1104 struct kvm_memory_slot *memslot,
7b6195a9
TY
1105 struct kvm_userspace_memory_region *mem,
1106 enum kvm_mr_change change)
b0c632db 1107{
dd2887e7
NW
1108 /* A few sanity checks. We can have memory slots which have to be
1109 located/ended at a segment boundary (1MB). The memory in userland is
1110 ok to be fragmented into various different vmas. It is okay to mmap()
1111 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1112
598841ca 1113 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1114 return -EINVAL;
1115
598841ca 1116 if (mem->memory_size & 0xffffful)
b0c632db
HC
1117 return -EINVAL;
1118
f7784b8e
MT
1119 return 0;
1120}
1121
1122void kvm_arch_commit_memory_region(struct kvm *kvm,
1123 struct kvm_userspace_memory_region *mem,
8482644a
TY
1124 const struct kvm_memory_slot *old,
1125 enum kvm_mr_change change)
f7784b8e 1126{
f7850c92 1127 int rc;
f7784b8e 1128
2cef4deb
CB
1129 /* If the basics of the memslot do not change, we do not want
1130 * to update the gmap. Every update causes several unnecessary
1131 * segment translation exceptions. This is usually handled just
1132 * fine by the normal fault handler + gmap, but it will also
1133 * cause faults on the prefix page of running guest CPUs.
1134 */
1135 if (old->userspace_addr == mem->userspace_addr &&
1136 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1137 old->npages * PAGE_SIZE == mem->memory_size)
1138 return;
598841ca
CO
1139
1140 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1141 mem->guest_phys_addr, mem->memory_size);
1142 if (rc)
f7850c92 1143 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1144 return;
b0c632db
HC
1145}
1146
2df72e9b
MT
1147void kvm_arch_flush_shadow_all(struct kvm *kvm)
1148{
1149}
1150
1151void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1152 struct kvm_memory_slot *slot)
34d4cb8f
MT
1153{
1154}
1155
b0c632db
HC
1156static int __init kvm_s390_init(void)
1157{
ef50f7ac 1158 int ret;
0ee75bea 1159 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1160 if (ret)
1161 return ret;
1162
1163 /*
1164 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1165 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1166 * only set facilities that are known to work in KVM.
1167 */
78c4b59f
MM
1168 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1169 if (!vfacilities) {
ef50f7ac
CB
1170 kvm_exit();
1171 return -ENOMEM;
1172 }
78c4b59f
MM
1173 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1174 vfacilities[0] &= 0xff82fff3f47c0000UL;
1175 vfacilities[1] &= 0x001c000000000000UL;
ef50f7ac 1176 return 0;
b0c632db
HC
1177}
1178
1179static void __exit kvm_s390_exit(void)
1180{
78c4b59f 1181 free_page((unsigned long) vfacilities);
b0c632db
HC
1182 kvm_exit();
1183}
1184
1185module_init(kvm_s390_init);
1186module_exit(kvm_s390_exit);
566af940
CH
1187
1188/*
1189 * Enable autoloading of the kvm module.
1190 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1191 * since x86 takes a different approach.
1192 */
1193#include <linux/miscdevice.h>
1194MODULE_ALIAS_MISCDEV(KVM_MINOR);
1195MODULE_ALIAS("devname:kvm");