kvm: powerpc: book3s: Support building HV and PR KVM as module
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
78c4b59f 31#include <asm/facility.h>
1526bf9c 32#include <asm/sclp.h>
8f2abe6a 33#include "kvm-s390.h"
b0c632db
HC
34#include "gaccess.h"
35
5786fffa
CH
36#define CREATE_TRACE_POINTS
37#include "trace.h"
ade38c31 38#include "trace-s390.h"
5786fffa 39
b0c632db
HC
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 44 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
45 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
49 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
53 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 55 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
64 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 73 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 82 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 83 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 84 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
85 { NULL }
86};
87
78c4b59f 88unsigned long *vfacilities;
2c70fe44 89static struct gmap_notifier gmap_notifier;
b0c632db 90
78c4b59f
MM
91/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
b0c632db 97/* Section: not file related */
10474ae8 98int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
99{
100 /* every s390 is virtualization enabled ;-) */
10474ae8 101 return 0;
b0c632db
HC
102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
2c70fe44
CB
108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
b0c632db
HC
110int kvm_arch_hardware_setup(void)
111{
2c70fe44
CB
112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
2c70fe44 119 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
d7b0b5eb
CO
146 int r;
147
2bd0ac4e 148 switch (ext) {
d7b0b5eb 149 case KVM_CAP_S390_PSW:
b6cf8788 150 case KVM_CAP_S390_GMAP:
52e16b18 151 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
60b413c9 155 case KVM_CAP_SYNC_REGS:
14eebd91 156 case KVM_CAP_ONE_REG:
d6712df9 157 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 158 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 159 case KVM_CAP_IOEVENTFD:
d7b0b5eb
CO
160 r = 1;
161 break;
e726b1bd
CB
162 case KVM_CAP_NR_VCPUS:
163 case KVM_CAP_MAX_VCPUS:
164 r = KVM_MAX_VCPUS;
165 break;
e1e2e605
NW
166 case KVM_CAP_NR_MEMSLOTS:
167 r = KVM_USER_MEM_SLOTS;
168 break;
1526bf9c 169 case KVM_CAP_S390_COW:
abf09bed 170 r = MACHINE_HAS_ESOP;
1526bf9c 171 break;
2bd0ac4e 172 default:
d7b0b5eb 173 r = 0;
2bd0ac4e 174 }
d7b0b5eb 175 return r;
b0c632db
HC
176}
177
178/* Section: vm related */
179/*
180 * Get (and clear) the dirty memory log for a memory slot.
181 */
182int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
183 struct kvm_dirty_log *log)
184{
185 return 0;
186}
187
188long kvm_arch_vm_ioctl(struct file *filp,
189 unsigned int ioctl, unsigned long arg)
190{
191 struct kvm *kvm = filp->private_data;
192 void __user *argp = (void __user *)arg;
193 int r;
194
195 switch (ioctl) {
ba5c1e9b
CO
196 case KVM_S390_INTERRUPT: {
197 struct kvm_s390_interrupt s390int;
198
199 r = -EFAULT;
200 if (copy_from_user(&s390int, argp, sizeof(s390int)))
201 break;
202 r = kvm_s390_inject_vm(kvm, &s390int);
203 break;
204 }
b0c632db 205 default:
367e1319 206 r = -ENOTTY;
b0c632db
HC
207 }
208
209 return r;
210}
211
e08b9637 212int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 213{
b0c632db
HC
214 int rc;
215 char debug_name[16];
216
e08b9637
CO
217 rc = -EINVAL;
218#ifdef CONFIG_KVM_S390_UCONTROL
219 if (type & ~KVM_VM_S390_UCONTROL)
220 goto out_err;
221 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
222 goto out_err;
223#else
224 if (type)
225 goto out_err;
226#endif
227
b0c632db
HC
228 rc = s390_enable_sie();
229 if (rc)
d89f5eff 230 goto out_err;
b0c632db 231
b290411a
CO
232 rc = -ENOMEM;
233
b0c632db
HC
234 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
235 if (!kvm->arch.sca)
d89f5eff 236 goto out_err;
b0c632db
HC
237
238 sprintf(debug_name, "kvm-%u", current->pid);
239
240 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
241 if (!kvm->arch.dbf)
242 goto out_nodbf;
243
ba5c1e9b
CO
244 spin_lock_init(&kvm->arch.float_int.lock);
245 INIT_LIST_HEAD(&kvm->arch.float_int.list);
246
b0c632db
HC
247 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
248 VM_EVENT(kvm, 3, "%s", "vm created");
249
e08b9637
CO
250 if (type & KVM_VM_S390_UCONTROL) {
251 kvm->arch.gmap = NULL;
252 } else {
253 kvm->arch.gmap = gmap_alloc(current->mm);
254 if (!kvm->arch.gmap)
255 goto out_nogmap;
2c70fe44 256 kvm->arch.gmap->private = kvm;
e08b9637 257 }
fa6b7fe9
CH
258
259 kvm->arch.css_support = 0;
260
d89f5eff 261 return 0;
598841ca
CO
262out_nogmap:
263 debug_unregister(kvm->arch.dbf);
b0c632db
HC
264out_nodbf:
265 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
266out_err:
267 return rc;
b0c632db
HC
268}
269
d329c035
CB
270void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
271{
272 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 273 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
274 if (!kvm_is_ucontrol(vcpu->kvm)) {
275 clear_bit(63 - vcpu->vcpu_id,
276 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
277 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
278 (__u64) vcpu->arch.sie_block)
279 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
280 }
abf4a71e 281 smp_mb();
27e0393f
CO
282
283 if (kvm_is_ucontrol(vcpu->kvm))
284 gmap_free(vcpu->arch.gmap);
285
d329c035 286 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 287 kvm_vcpu_uninit(vcpu);
b110feaf 288 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
289}
290
291static void kvm_free_vcpus(struct kvm *kvm)
292{
293 unsigned int i;
988a2cae 294 struct kvm_vcpu *vcpu;
d329c035 295
988a2cae
GN
296 kvm_for_each_vcpu(i, vcpu, kvm)
297 kvm_arch_vcpu_destroy(vcpu);
298
299 mutex_lock(&kvm->lock);
300 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
301 kvm->vcpus[i] = NULL;
302
303 atomic_set(&kvm->online_vcpus, 0);
304 mutex_unlock(&kvm->lock);
d329c035
CB
305}
306
ad8ba2cd
SY
307void kvm_arch_sync_events(struct kvm *kvm)
308{
309}
310
b0c632db
HC
311void kvm_arch_destroy_vm(struct kvm *kvm)
312{
d329c035 313 kvm_free_vcpus(kvm);
b0c632db 314 free_page((unsigned long)(kvm->arch.sca));
d329c035 315 debug_unregister(kvm->arch.dbf);
27e0393f
CO
316 if (!kvm_is_ucontrol(kvm))
317 gmap_free(kvm->arch.gmap);
b0c632db
HC
318}
319
320/* Section: vcpu related */
321int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
322{
27e0393f
CO
323 if (kvm_is_ucontrol(vcpu->kvm)) {
324 vcpu->arch.gmap = gmap_alloc(current->mm);
325 if (!vcpu->arch.gmap)
326 return -ENOMEM;
2c70fe44 327 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
328 return 0;
329 }
330
598841ca 331 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
332 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
333 KVM_SYNC_GPRS |
9eed0735
CB
334 KVM_SYNC_ACRS |
335 KVM_SYNC_CRS;
b0c632db
HC
336 return 0;
337}
338
339void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
340{
6692cef3 341 /* Nothing todo */
b0c632db
HC
342}
343
344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
345{
346 save_fp_regs(&vcpu->arch.host_fpregs);
347 save_access_regs(vcpu->arch.host_acrs);
348 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
349 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 350 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 351 gmap_enable(vcpu->arch.gmap);
9e6dabef 352 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
353}
354
355void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
356{
9e6dabef 357 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 358 gmap_disable(vcpu->arch.gmap);
b0c632db 359 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 360 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
361 restore_fp_regs(&vcpu->arch.host_fpregs);
362 restore_access_regs(vcpu->arch.host_acrs);
363}
364
365static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
366{
367 /* this equals initial cpu reset in pop, but we don't switch to ESA */
368 vcpu->arch.sie_block->gpsw.mask = 0UL;
369 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 370 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
371 vcpu->arch.sie_block->cputm = 0UL;
372 vcpu->arch.sie_block->ckc = 0UL;
373 vcpu->arch.sie_block->todpr = 0;
374 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
375 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
376 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
377 vcpu->arch.guest_fpregs.fpc = 0;
378 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
379 vcpu->arch.sie_block->gbea = 1;
61bde82c 380 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
381}
382
42897d86
MT
383int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
384{
385 return 0;
386}
387
b0c632db
HC
388int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
389{
9e6dabef
CH
390 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
391 CPUSTAT_SM |
69d0d3a3
CB
392 CPUSTAT_STOPPED |
393 CPUSTAT_GED);
fc34531d 394 vcpu->arch.sie_block->ecb = 6;
69d0d3a3 395 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 396 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 397 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
ca872302
CB
398 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
399 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
400 (unsigned long) vcpu);
401 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 402 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 403 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
404 return 0;
405}
406
407struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
408 unsigned int id)
409{
4d47555a
CO
410 struct kvm_vcpu *vcpu;
411 int rc = -EINVAL;
412
413 if (id >= KVM_MAX_VCPUS)
414 goto out;
415
416 rc = -ENOMEM;
b0c632db 417
b110feaf 418 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 419 if (!vcpu)
4d47555a 420 goto out;
b0c632db 421
180c12fb
CB
422 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
423 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
424
425 if (!vcpu->arch.sie_block)
426 goto out_free_cpu;
427
428 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
429 if (!kvm_is_ucontrol(kvm)) {
430 if (!kvm->arch.sca) {
431 WARN_ON_ONCE(1);
432 goto out_free_cpu;
433 }
434 if (!kvm->arch.sca->cpu[id].sda)
435 kvm->arch.sca->cpu[id].sda =
436 (__u64) vcpu->arch.sie_block;
437 vcpu->arch.sie_block->scaoh =
438 (__u32)(((__u64)kvm->arch.sca) >> 32);
439 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
440 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
441 }
b0c632db 442
ba5c1e9b
CO
443 spin_lock_init(&vcpu->arch.local_int.lock);
444 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
445 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 446 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 447 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 448 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 449 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 450 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 451
b0c632db
HC
452 rc = kvm_vcpu_init(vcpu, kvm, id);
453 if (rc)
7b06bf2f 454 goto out_free_sie_block;
b0c632db
HC
455 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
456 vcpu->arch.sie_block);
ade38c31 457 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 458
b0c632db 459 return vcpu;
7b06bf2f
WY
460out_free_sie_block:
461 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 462out_free_cpu:
b110feaf 463 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 464out:
b0c632db
HC
465 return ERR_PTR(rc);
466}
467
b0c632db
HC
468int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
469{
470 /* kvm common code refers to this, but never calls it */
471 BUG();
472 return 0;
473}
474
49b99e1e
CB
475void s390_vcpu_block(struct kvm_vcpu *vcpu)
476{
477 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
478}
479
480void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
481{
482 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
483}
484
485/*
486 * Kick a guest cpu out of SIE and wait until SIE is not running.
487 * If the CPU is not running (e.g. waiting as idle) the function will
488 * return immediately. */
489void exit_sie(struct kvm_vcpu *vcpu)
490{
491 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
492 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
493 cpu_relax();
494}
495
496/* Kick a guest cpu out of SIE and prevent SIE-reentry */
497void exit_sie_sync(struct kvm_vcpu *vcpu)
498{
499 s390_vcpu_block(vcpu);
500 exit_sie(vcpu);
501}
502
2c70fe44
CB
503static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
504{
505 int i;
506 struct kvm *kvm = gmap->private;
507 struct kvm_vcpu *vcpu;
508
509 kvm_for_each_vcpu(i, vcpu, kvm) {
510 /* match against both prefix pages */
511 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
512 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
513 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
514 exit_sie_sync(vcpu);
515 }
516 }
517}
518
b6d33834
CD
519int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
520{
521 /* kvm common code refers to this, but never calls it */
522 BUG();
523 return 0;
524}
525
14eebd91
CO
526static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
527 struct kvm_one_reg *reg)
528{
529 int r = -EINVAL;
530
531 switch (reg->id) {
29b7c71b
CO
532 case KVM_REG_S390_TODPR:
533 r = put_user(vcpu->arch.sie_block->todpr,
534 (u32 __user *)reg->addr);
535 break;
536 case KVM_REG_S390_EPOCHDIFF:
537 r = put_user(vcpu->arch.sie_block->epoch,
538 (u64 __user *)reg->addr);
539 break;
46a6dd1c
J
540 case KVM_REG_S390_CPU_TIMER:
541 r = put_user(vcpu->arch.sie_block->cputm,
542 (u64 __user *)reg->addr);
543 break;
544 case KVM_REG_S390_CLOCK_COMP:
545 r = put_user(vcpu->arch.sie_block->ckc,
546 (u64 __user *)reg->addr);
547 break;
14eebd91
CO
548 default:
549 break;
550 }
551
552 return r;
553}
554
555static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
556 struct kvm_one_reg *reg)
557{
558 int r = -EINVAL;
559
560 switch (reg->id) {
29b7c71b
CO
561 case KVM_REG_S390_TODPR:
562 r = get_user(vcpu->arch.sie_block->todpr,
563 (u32 __user *)reg->addr);
564 break;
565 case KVM_REG_S390_EPOCHDIFF:
566 r = get_user(vcpu->arch.sie_block->epoch,
567 (u64 __user *)reg->addr);
568 break;
46a6dd1c
J
569 case KVM_REG_S390_CPU_TIMER:
570 r = get_user(vcpu->arch.sie_block->cputm,
571 (u64 __user *)reg->addr);
572 break;
573 case KVM_REG_S390_CLOCK_COMP:
574 r = get_user(vcpu->arch.sie_block->ckc,
575 (u64 __user *)reg->addr);
576 break;
14eebd91
CO
577 default:
578 break;
579 }
580
581 return r;
582}
b6d33834 583
b0c632db
HC
584static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
585{
b0c632db 586 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
587 return 0;
588}
589
590int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
591{
5a32c1af 592 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
593 return 0;
594}
595
596int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
597{
5a32c1af 598 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
599 return 0;
600}
601
602int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
603 struct kvm_sregs *sregs)
604{
59674c1a 605 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 606 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 607 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
608 return 0;
609}
610
611int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
612 struct kvm_sregs *sregs)
613{
59674c1a 614 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 615 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
616 return 0;
617}
618
619int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
620{
b0c632db 621 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 622 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 623 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
624 return 0;
625}
626
627int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
628{
b0c632db
HC
629 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
630 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
631 return 0;
632}
633
634static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
635{
636 int rc = 0;
637
9e6dabef 638 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 639 rc = -EBUSY;
d7b0b5eb
CO
640 else {
641 vcpu->run->psw_mask = psw.mask;
642 vcpu->run->psw_addr = psw.addr;
643 }
b0c632db
HC
644 return rc;
645}
646
647int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
648 struct kvm_translation *tr)
649{
650 return -EINVAL; /* not implemented yet */
651}
652
d0bfb940
JK
653int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
654 struct kvm_guest_debug *dbg)
b0c632db
HC
655{
656 return -EINVAL; /* not implemented yet */
657}
658
62d9f0db
MT
659int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
660 struct kvm_mp_state *mp_state)
661{
662 return -EINVAL; /* not implemented yet */
663}
664
665int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
666 struct kvm_mp_state *mp_state)
667{
668 return -EINVAL; /* not implemented yet */
669}
670
2c70fe44
CB
671static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
672{
673 /*
674 * We use MMU_RELOAD just to re-arm the ipte notifier for the
675 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
676 * This ensures that the ipte instruction for this request has
677 * already finished. We might race against a second unmapper that
678 * wants to set the blocking bit. Lets just retry the request loop.
679 */
680 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
681 int rc;
682 rc = gmap_ipte_notify(vcpu->arch.gmap,
683 vcpu->arch.sie_block->prefix,
684 PAGE_SIZE * 2);
685 if (rc)
686 return rc;
687 s390_vcpu_unblock(vcpu);
688 }
689 return 0;
690}
691
3fb4c40f 692static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 693{
3fb4c40f 694 int rc, cpuflags;
e168bf8d 695
5a32c1af 696 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
697
698 if (need_resched())
699 schedule();
700
71cde587
CB
701 if (test_thread_flag(TIF_MCCK_PENDING))
702 s390_handle_mcck();
703
d6b6d166
CO
704 if (!kvm_is_ucontrol(vcpu->kvm))
705 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 706
2c70fe44
CB
707 rc = kvm_s390_handle_requests(vcpu);
708 if (rc)
709 return rc;
710
b0c632db 711 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
712 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
713 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
714 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 715
3fb4c40f
TH
716 return 0;
717}
718
719static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
720{
721 int rc;
2b29a9fd
DD
722
723 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
724 vcpu->arch.sie_block->icptcode);
725 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
726
3fb4c40f 727 if (exit_reason >= 0) {
7c470539 728 rc = 0;
3fb4c40f 729 } else {
e168bf8d
CO
730 if (kvm_is_ucontrol(vcpu->kvm)) {
731 rc = SIE_INTERCEPT_UCONTROL;
732 } else {
733 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 734 trace_kvm_s390_sie_fault(vcpu);
db4a29cb 735 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
e168bf8d 736 }
1f0d0f09 737 }
b0c632db 738
5a32c1af 739 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 740
a76ccff6
TH
741 if (rc == 0) {
742 if (kvm_is_ucontrol(vcpu->kvm))
743 rc = -EOPNOTSUPP;
744 else
745 rc = kvm_handle_sie_intercept(vcpu);
746 }
747
3fb4c40f
TH
748 return rc;
749}
750
751static int __vcpu_run(struct kvm_vcpu *vcpu)
752{
753 int rc, exit_reason;
754
800c1065
TH
755 /*
756 * We try to hold kvm->srcu during most of vcpu_run (except when run-
757 * ning the guest), so that memslots (and other stuff) are protected
758 */
759 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
760
a76ccff6
TH
761 do {
762 rc = vcpu_pre_run(vcpu);
763 if (rc)
764 break;
3fb4c40f 765
800c1065 766 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
767 /*
768 * As PF_VCPU will be used in fault handler, between
769 * guest_enter and guest_exit should be no uaccess.
770 */
771 preempt_disable();
772 kvm_guest_enter();
773 preempt_enable();
774 exit_reason = sie64a(vcpu->arch.sie_block,
775 vcpu->run->s.regs.gprs);
776 kvm_guest_exit();
800c1065 777 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
778
779 rc = vcpu_post_run(vcpu, exit_reason);
780 } while (!signal_pending(current) && !rc);
3fb4c40f 781
800c1065 782 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 783 return rc;
b0c632db
HC
784}
785
786int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
787{
8f2abe6a 788 int rc;
b0c632db
HC
789 sigset_t sigsaved;
790
b0c632db
HC
791 if (vcpu->sigset_active)
792 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
793
9e6dabef 794 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 795
ba5c1e9b
CO
796 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
797
8f2abe6a
CB
798 switch (kvm_run->exit_reason) {
799 case KVM_EXIT_S390_SIEIC:
8f2abe6a 800 case KVM_EXIT_UNKNOWN:
9ace903d 801 case KVM_EXIT_INTR:
8f2abe6a 802 case KVM_EXIT_S390_RESET:
e168bf8d 803 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 804 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
805 break;
806 default:
807 BUG();
808 }
809
d7b0b5eb
CO
810 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
811 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
812 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
813 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
814 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
815 }
9eed0735
CB
816 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
817 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
818 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
819 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
820 }
d7b0b5eb 821
dab4079d 822 might_fault();
a76ccff6 823 rc = __vcpu_run(vcpu);
8f2abe6a 824
b1d16c49
CE
825 if (signal_pending(current) && !rc) {
826 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 827 rc = -EINTR;
b1d16c49 828 }
8f2abe6a 829
e168bf8d
CO
830#ifdef CONFIG_KVM_S390_UCONTROL
831 if (rc == SIE_INTERCEPT_UCONTROL) {
832 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
833 kvm_run->s390_ucontrol.trans_exc_code =
834 current->thread.gmap_addr;
835 kvm_run->s390_ucontrol.pgm_code = 0x10;
836 rc = 0;
837 }
838#endif
839
b8e660b8 840 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
841 /* intercept cannot be handled in-kernel, prepare kvm-run */
842 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
843 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
844 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
845 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
846 rc = 0;
847 }
848
849 if (rc == -EREMOTE) {
850 /* intercept was handled, but userspace support is needed
851 * kvm_run has been prepared by the handler */
852 rc = 0;
853 }
b0c632db 854
d7b0b5eb
CO
855 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
856 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 857 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 858 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 859
b0c632db
HC
860 if (vcpu->sigset_active)
861 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
862
b0c632db 863 vcpu->stat.exit_userspace++;
7e8e6ab4 864 return rc;
b0c632db
HC
865}
866
092670cd 867static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
868 unsigned long n, int prefix)
869{
870 if (prefix)
871 return copy_to_guest(vcpu, guestdest, from, n);
872 else
873 return copy_to_guest_absolute(vcpu, guestdest, from, n);
874}
875
876/*
877 * store status at address
878 * we use have two special cases:
879 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
880 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
881 */
971eb77f 882int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 883{
092670cd 884 unsigned char archmode = 1;
b0c632db
HC
885 int prefix;
886
887 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
888 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
889 return -EFAULT;
890 addr = SAVE_AREA_BASE;
891 prefix = 0;
892 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
893 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
894 return -EFAULT;
895 addr = SAVE_AREA_BASE;
896 prefix = 1;
897 } else
898 prefix = 0;
899
15bc8d84
CB
900 /*
901 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
902 * copying in vcpu load/put. Lets update our copies before we save
903 * it into the save area
904 */
905 save_fp_regs(&vcpu->arch.guest_fpregs);
906 save_access_regs(vcpu->run->s.regs.acrs);
907
f64ca217 908 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
909 vcpu->arch.guest_fpregs.fprs, 128, prefix))
910 return -EFAULT;
911
f64ca217 912 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 913 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
914 return -EFAULT;
915
f64ca217 916 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
917 &vcpu->arch.sie_block->gpsw, 16, prefix))
918 return -EFAULT;
919
f64ca217 920 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
921 &vcpu->arch.sie_block->prefix, 4, prefix))
922 return -EFAULT;
923
924 if (__guestcopy(vcpu,
f64ca217 925 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
926 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
927 return -EFAULT;
928
f64ca217 929 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
930 &vcpu->arch.sie_block->todpr, 4, prefix))
931 return -EFAULT;
932
f64ca217 933 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
934 &vcpu->arch.sie_block->cputm, 8, prefix))
935 return -EFAULT;
936
f64ca217 937 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
938 &vcpu->arch.sie_block->ckc, 8, prefix))
939 return -EFAULT;
940
f64ca217 941 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 942 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
943 return -EFAULT;
944
945 if (__guestcopy(vcpu,
f64ca217 946 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
947 &vcpu->arch.sie_block->gcr, 128, prefix))
948 return -EFAULT;
949 return 0;
950}
951
d6712df9
CH
952static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
953 struct kvm_enable_cap *cap)
954{
955 int r;
956
957 if (cap->flags)
958 return -EINVAL;
959
960 switch (cap->cap) {
fa6b7fe9
CH
961 case KVM_CAP_S390_CSS_SUPPORT:
962 if (!vcpu->kvm->arch.css_support) {
963 vcpu->kvm->arch.css_support = 1;
964 trace_kvm_s390_enable_css(vcpu->kvm);
965 }
966 r = 0;
967 break;
d6712df9
CH
968 default:
969 r = -EINVAL;
970 break;
971 }
972 return r;
973}
974
b0c632db
HC
975long kvm_arch_vcpu_ioctl(struct file *filp,
976 unsigned int ioctl, unsigned long arg)
977{
978 struct kvm_vcpu *vcpu = filp->private_data;
979 void __user *argp = (void __user *)arg;
800c1065 980 int idx;
bc923cc9 981 long r;
b0c632db 982
93736624
AK
983 switch (ioctl) {
984 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
985 struct kvm_s390_interrupt s390int;
986
93736624 987 r = -EFAULT;
ba5c1e9b 988 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
989 break;
990 r = kvm_s390_inject_vcpu(vcpu, &s390int);
991 break;
ba5c1e9b 992 }
b0c632db 993 case KVM_S390_STORE_STATUS:
800c1065 994 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 995 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 996 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 997 break;
b0c632db
HC
998 case KVM_S390_SET_INITIAL_PSW: {
999 psw_t psw;
1000
bc923cc9 1001 r = -EFAULT;
b0c632db 1002 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1003 break;
1004 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1005 break;
b0c632db
HC
1006 }
1007 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1008 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1009 break;
14eebd91
CO
1010 case KVM_SET_ONE_REG:
1011 case KVM_GET_ONE_REG: {
1012 struct kvm_one_reg reg;
1013 r = -EFAULT;
1014 if (copy_from_user(&reg, argp, sizeof(reg)))
1015 break;
1016 if (ioctl == KVM_SET_ONE_REG)
1017 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1018 else
1019 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1020 break;
1021 }
27e0393f
CO
1022#ifdef CONFIG_KVM_S390_UCONTROL
1023 case KVM_S390_UCAS_MAP: {
1024 struct kvm_s390_ucas_mapping ucasmap;
1025
1026 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1027 r = -EFAULT;
1028 break;
1029 }
1030
1031 if (!kvm_is_ucontrol(vcpu->kvm)) {
1032 r = -EINVAL;
1033 break;
1034 }
1035
1036 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1037 ucasmap.vcpu_addr, ucasmap.length);
1038 break;
1039 }
1040 case KVM_S390_UCAS_UNMAP: {
1041 struct kvm_s390_ucas_mapping ucasmap;
1042
1043 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1044 r = -EFAULT;
1045 break;
1046 }
1047
1048 if (!kvm_is_ucontrol(vcpu->kvm)) {
1049 r = -EINVAL;
1050 break;
1051 }
1052
1053 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1054 ucasmap.length);
1055 break;
1056 }
1057#endif
ccc7910f
CO
1058 case KVM_S390_VCPU_FAULT: {
1059 r = gmap_fault(arg, vcpu->arch.gmap);
1060 if (!IS_ERR_VALUE(r))
1061 r = 0;
1062 break;
1063 }
d6712df9
CH
1064 case KVM_ENABLE_CAP:
1065 {
1066 struct kvm_enable_cap cap;
1067 r = -EFAULT;
1068 if (copy_from_user(&cap, argp, sizeof(cap)))
1069 break;
1070 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1071 break;
1072 }
b0c632db 1073 default:
3e6afcf1 1074 r = -ENOTTY;
b0c632db 1075 }
bc923cc9 1076 return r;
b0c632db
HC
1077}
1078
5b1c1493
CO
1079int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1080{
1081#ifdef CONFIG_KVM_S390_UCONTROL
1082 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1083 && (kvm_is_ucontrol(vcpu->kvm))) {
1084 vmf->page = virt_to_page(vcpu->arch.sie_block);
1085 get_page(vmf->page);
1086 return 0;
1087 }
1088#endif
1089 return VM_FAULT_SIGBUS;
1090}
1091
db3fe4eb
TY
1092void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1093 struct kvm_memory_slot *dont)
1094{
1095}
1096
1097int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1098{
1099 return 0;
1100}
1101
e59dbe09
TY
1102void kvm_arch_memslots_updated(struct kvm *kvm)
1103{
1104}
1105
b0c632db 1106/* Section: memory related */
f7784b8e
MT
1107int kvm_arch_prepare_memory_region(struct kvm *kvm,
1108 struct kvm_memory_slot *memslot,
7b6195a9
TY
1109 struct kvm_userspace_memory_region *mem,
1110 enum kvm_mr_change change)
b0c632db 1111{
dd2887e7
NW
1112 /* A few sanity checks. We can have memory slots which have to be
1113 located/ended at a segment boundary (1MB). The memory in userland is
1114 ok to be fragmented into various different vmas. It is okay to mmap()
1115 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1116
598841ca 1117 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1118 return -EINVAL;
1119
598841ca 1120 if (mem->memory_size & 0xffffful)
b0c632db
HC
1121 return -EINVAL;
1122
f7784b8e
MT
1123 return 0;
1124}
1125
1126void kvm_arch_commit_memory_region(struct kvm *kvm,
1127 struct kvm_userspace_memory_region *mem,
8482644a
TY
1128 const struct kvm_memory_slot *old,
1129 enum kvm_mr_change change)
f7784b8e 1130{
f7850c92 1131 int rc;
f7784b8e 1132
2cef4deb
CB
1133 /* If the basics of the memslot do not change, we do not want
1134 * to update the gmap. Every update causes several unnecessary
1135 * segment translation exceptions. This is usually handled just
1136 * fine by the normal fault handler + gmap, but it will also
1137 * cause faults on the prefix page of running guest CPUs.
1138 */
1139 if (old->userspace_addr == mem->userspace_addr &&
1140 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1141 old->npages * PAGE_SIZE == mem->memory_size)
1142 return;
598841ca
CO
1143
1144 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1145 mem->guest_phys_addr, mem->memory_size);
1146 if (rc)
f7850c92 1147 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1148 return;
b0c632db
HC
1149}
1150
2df72e9b
MT
1151void kvm_arch_flush_shadow_all(struct kvm *kvm)
1152{
1153}
1154
1155void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1156 struct kvm_memory_slot *slot)
34d4cb8f
MT
1157{
1158}
1159
b0c632db
HC
1160static int __init kvm_s390_init(void)
1161{
ef50f7ac 1162 int ret;
0ee75bea 1163 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1164 if (ret)
1165 return ret;
1166
1167 /*
1168 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1169 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1170 * only set facilities that are known to work in KVM.
1171 */
78c4b59f
MM
1172 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1173 if (!vfacilities) {
ef50f7ac
CB
1174 kvm_exit();
1175 return -ENOMEM;
1176 }
78c4b59f
MM
1177 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1178 vfacilities[0] &= 0xff82fff3f47c0000UL;
1179 vfacilities[1] &= 0x001c000000000000UL;
ef50f7ac 1180 return 0;
b0c632db
HC
1181}
1182
1183static void __exit kvm_s390_exit(void)
1184{
78c4b59f 1185 free_page((unsigned long) vfacilities);
b0c632db
HC
1186 kvm_exit();
1187}
1188
1189module_init(kvm_s390_init);
1190module_exit(kvm_s390_exit);
566af940
CH
1191
1192/*
1193 * Enable autoloading of the kvm module.
1194 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1195 * since x86 takes a different approach.
1196 */
1197#include <linux/miscdevice.h>
1198MODULE_ALIAS_MISCDEV(KVM_MINOR);
1199MODULE_ALIAS("devname:kvm");