KVM: x86 emulator: convert XADD to fastop
[linux-2.6-block.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
5786fffa
CH
35#define CREATE_TRACE_POINTS
36#include "trace.h"
ade38c31 37#include "trace-s390.h"
5786fffa 38
b0c632db
HC
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 43 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 80 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 81 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
83 { NULL }
84};
85
ef50f7ac 86static unsigned long long *facilities;
2c70fe44 87static struct gmap_notifier gmap_notifier;
b0c632db
HC
88
89/* Section: not file related */
10474ae8 90int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
91{
92 /* every s390 is virtualization enabled ;-) */
10474ae8 93 return 0;
b0c632db
HC
94}
95
96void kvm_arch_hardware_disable(void *garbage)
97{
98}
99
2c70fe44
CB
100static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
101
b0c632db
HC
102int kvm_arch_hardware_setup(void)
103{
2c70fe44
CB
104 gmap_notifier.notifier_call = kvm_gmap_notifier;
105 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
106 return 0;
107}
108
109void kvm_arch_hardware_unsetup(void)
110{
2c70fe44 111 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
112}
113
114void kvm_arch_check_processor_compat(void *rtn)
115{
116}
117
118int kvm_arch_init(void *opaque)
119{
120 return 0;
121}
122
123void kvm_arch_exit(void)
124{
125}
126
127/* Section: device related */
128long kvm_arch_dev_ioctl(struct file *filp,
129 unsigned int ioctl, unsigned long arg)
130{
131 if (ioctl == KVM_S390_ENABLE_SIE)
132 return s390_enable_sie();
133 return -EINVAL;
134}
135
136int kvm_dev_ioctl_check_extension(long ext)
137{
d7b0b5eb
CO
138 int r;
139
2bd0ac4e 140 switch (ext) {
d7b0b5eb 141 case KVM_CAP_S390_PSW:
b6cf8788 142 case KVM_CAP_S390_GMAP:
52e16b18 143 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
144#ifdef CONFIG_KVM_S390_UCONTROL
145 case KVM_CAP_S390_UCONTROL:
146#endif
60b413c9 147 case KVM_CAP_SYNC_REGS:
14eebd91 148 case KVM_CAP_ONE_REG:
d6712df9 149 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 150 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 151 case KVM_CAP_IOEVENTFD:
d7b0b5eb
CO
152 r = 1;
153 break;
e726b1bd
CB
154 case KVM_CAP_NR_VCPUS:
155 case KVM_CAP_MAX_VCPUS:
156 r = KVM_MAX_VCPUS;
157 break;
e1e2e605
NW
158 case KVM_CAP_NR_MEMSLOTS:
159 r = KVM_USER_MEM_SLOTS;
160 break;
1526bf9c 161 case KVM_CAP_S390_COW:
abf09bed 162 r = MACHINE_HAS_ESOP;
1526bf9c 163 break;
2bd0ac4e 164 default:
d7b0b5eb 165 r = 0;
2bd0ac4e 166 }
d7b0b5eb 167 return r;
b0c632db
HC
168}
169
170/* Section: vm related */
171/*
172 * Get (and clear) the dirty memory log for a memory slot.
173 */
174int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
175 struct kvm_dirty_log *log)
176{
177 return 0;
178}
179
180long kvm_arch_vm_ioctl(struct file *filp,
181 unsigned int ioctl, unsigned long arg)
182{
183 struct kvm *kvm = filp->private_data;
184 void __user *argp = (void __user *)arg;
185 int r;
186
187 switch (ioctl) {
ba5c1e9b
CO
188 case KVM_S390_INTERRUPT: {
189 struct kvm_s390_interrupt s390int;
190
191 r = -EFAULT;
192 if (copy_from_user(&s390int, argp, sizeof(s390int)))
193 break;
194 r = kvm_s390_inject_vm(kvm, &s390int);
195 break;
196 }
b0c632db 197 default:
367e1319 198 r = -ENOTTY;
b0c632db
HC
199 }
200
201 return r;
202}
203
e08b9637 204int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 205{
b0c632db
HC
206 int rc;
207 char debug_name[16];
208
e08b9637
CO
209 rc = -EINVAL;
210#ifdef CONFIG_KVM_S390_UCONTROL
211 if (type & ~KVM_VM_S390_UCONTROL)
212 goto out_err;
213 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
214 goto out_err;
215#else
216 if (type)
217 goto out_err;
218#endif
219
b0c632db
HC
220 rc = s390_enable_sie();
221 if (rc)
d89f5eff 222 goto out_err;
b0c632db 223
b290411a
CO
224 rc = -ENOMEM;
225
b0c632db
HC
226 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
227 if (!kvm->arch.sca)
d89f5eff 228 goto out_err;
b0c632db
HC
229
230 sprintf(debug_name, "kvm-%u", current->pid);
231
232 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
233 if (!kvm->arch.dbf)
234 goto out_nodbf;
235
ba5c1e9b
CO
236 spin_lock_init(&kvm->arch.float_int.lock);
237 INIT_LIST_HEAD(&kvm->arch.float_int.list);
238
b0c632db
HC
239 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
240 VM_EVENT(kvm, 3, "%s", "vm created");
241
e08b9637
CO
242 if (type & KVM_VM_S390_UCONTROL) {
243 kvm->arch.gmap = NULL;
244 } else {
245 kvm->arch.gmap = gmap_alloc(current->mm);
246 if (!kvm->arch.gmap)
247 goto out_nogmap;
2c70fe44 248 kvm->arch.gmap->private = kvm;
e08b9637 249 }
fa6b7fe9
CH
250
251 kvm->arch.css_support = 0;
252
d89f5eff 253 return 0;
598841ca
CO
254out_nogmap:
255 debug_unregister(kvm->arch.dbf);
b0c632db
HC
256out_nodbf:
257 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
258out_err:
259 return rc;
b0c632db
HC
260}
261
d329c035
CB
262void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
263{
264 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 265 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
266 if (!kvm_is_ucontrol(vcpu->kvm)) {
267 clear_bit(63 - vcpu->vcpu_id,
268 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
269 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
270 (__u64) vcpu->arch.sie_block)
271 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
272 }
abf4a71e 273 smp_mb();
27e0393f
CO
274
275 if (kvm_is_ucontrol(vcpu->kvm))
276 gmap_free(vcpu->arch.gmap);
277
d329c035 278 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 279 kvm_vcpu_uninit(vcpu);
d329c035
CB
280 kfree(vcpu);
281}
282
283static void kvm_free_vcpus(struct kvm *kvm)
284{
285 unsigned int i;
988a2cae 286 struct kvm_vcpu *vcpu;
d329c035 287
988a2cae
GN
288 kvm_for_each_vcpu(i, vcpu, kvm)
289 kvm_arch_vcpu_destroy(vcpu);
290
291 mutex_lock(&kvm->lock);
292 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
293 kvm->vcpus[i] = NULL;
294
295 atomic_set(&kvm->online_vcpus, 0);
296 mutex_unlock(&kvm->lock);
d329c035
CB
297}
298
ad8ba2cd
SY
299void kvm_arch_sync_events(struct kvm *kvm)
300{
301}
302
b0c632db
HC
303void kvm_arch_destroy_vm(struct kvm *kvm)
304{
d329c035 305 kvm_free_vcpus(kvm);
b0c632db 306 free_page((unsigned long)(kvm->arch.sca));
d329c035 307 debug_unregister(kvm->arch.dbf);
27e0393f
CO
308 if (!kvm_is_ucontrol(kvm))
309 gmap_free(kvm->arch.gmap);
b0c632db
HC
310}
311
312/* Section: vcpu related */
313int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
314{
27e0393f
CO
315 if (kvm_is_ucontrol(vcpu->kvm)) {
316 vcpu->arch.gmap = gmap_alloc(current->mm);
317 if (!vcpu->arch.gmap)
318 return -ENOMEM;
2c70fe44 319 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
320 return 0;
321 }
322
598841ca 323 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
324 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
325 KVM_SYNC_GPRS |
9eed0735
CB
326 KVM_SYNC_ACRS |
327 KVM_SYNC_CRS;
b0c632db
HC
328 return 0;
329}
330
331void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
332{
6692cef3 333 /* Nothing todo */
b0c632db
HC
334}
335
336void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
337{
338 save_fp_regs(&vcpu->arch.host_fpregs);
339 save_access_regs(vcpu->arch.host_acrs);
340 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
341 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 342 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 343 gmap_enable(vcpu->arch.gmap);
9e6dabef 344 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
345}
346
347void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
348{
9e6dabef 349 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 350 gmap_disable(vcpu->arch.gmap);
b0c632db 351 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 352 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
353 restore_fp_regs(&vcpu->arch.host_fpregs);
354 restore_access_regs(vcpu->arch.host_acrs);
355}
356
357static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
358{
359 /* this equals initial cpu reset in pop, but we don't switch to ESA */
360 vcpu->arch.sie_block->gpsw.mask = 0UL;
361 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 362 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
363 vcpu->arch.sie_block->cputm = 0UL;
364 vcpu->arch.sie_block->ckc = 0UL;
365 vcpu->arch.sie_block->todpr = 0;
366 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
367 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
368 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
369 vcpu->arch.guest_fpregs.fpc = 0;
370 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
371 vcpu->arch.sie_block->gbea = 1;
61bde82c 372 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
373}
374
42897d86
MT
375int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
376{
377 return 0;
378}
379
b0c632db
HC
380int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
381{
9e6dabef
CH
382 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
383 CPUSTAT_SM |
384 CPUSTAT_STOPPED);
fc34531d 385 vcpu->arch.sie_block->ecb = 6;
b0c632db 386 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 387 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
388 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
389 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
390 (unsigned long) vcpu);
391 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 392 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 393 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
394 return 0;
395}
396
397struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
398 unsigned int id)
399{
4d47555a
CO
400 struct kvm_vcpu *vcpu;
401 int rc = -EINVAL;
402
403 if (id >= KVM_MAX_VCPUS)
404 goto out;
405
406 rc = -ENOMEM;
b0c632db 407
4d47555a 408 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 409 if (!vcpu)
4d47555a 410 goto out;
b0c632db 411
180c12fb
CB
412 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
413 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
414
415 if (!vcpu->arch.sie_block)
416 goto out_free_cpu;
417
418 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
419 if (!kvm_is_ucontrol(kvm)) {
420 if (!kvm->arch.sca) {
421 WARN_ON_ONCE(1);
422 goto out_free_cpu;
423 }
424 if (!kvm->arch.sca->cpu[id].sda)
425 kvm->arch.sca->cpu[id].sda =
426 (__u64) vcpu->arch.sie_block;
427 vcpu->arch.sie_block->scaoh =
428 (__u32)(((__u64)kvm->arch.sca) >> 32);
429 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
430 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
431 }
b0c632db 432
ba5c1e9b
CO
433 spin_lock_init(&vcpu->arch.local_int.lock);
434 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
435 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 436 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
437 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
438 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 439 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 440 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 441
b0c632db
HC
442 rc = kvm_vcpu_init(vcpu, kvm, id);
443 if (rc)
7b06bf2f 444 goto out_free_sie_block;
b0c632db
HC
445 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
446 vcpu->arch.sie_block);
ade38c31 447 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 448
b0c632db 449 return vcpu;
7b06bf2f
WY
450out_free_sie_block:
451 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
452out_free_cpu:
453 kfree(vcpu);
4d47555a 454out:
b0c632db
HC
455 return ERR_PTR(rc);
456}
457
b0c632db
HC
458int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
459{
460 /* kvm common code refers to this, but never calls it */
461 BUG();
462 return 0;
463}
464
49b99e1e
CB
465void s390_vcpu_block(struct kvm_vcpu *vcpu)
466{
467 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
468}
469
470void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
471{
472 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
473}
474
475/*
476 * Kick a guest cpu out of SIE and wait until SIE is not running.
477 * If the CPU is not running (e.g. waiting as idle) the function will
478 * return immediately. */
479void exit_sie(struct kvm_vcpu *vcpu)
480{
481 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
482 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
483 cpu_relax();
484}
485
486/* Kick a guest cpu out of SIE and prevent SIE-reentry */
487void exit_sie_sync(struct kvm_vcpu *vcpu)
488{
489 s390_vcpu_block(vcpu);
490 exit_sie(vcpu);
491}
492
2c70fe44
CB
493static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
494{
495 int i;
496 struct kvm *kvm = gmap->private;
497 struct kvm_vcpu *vcpu;
498
499 kvm_for_each_vcpu(i, vcpu, kvm) {
500 /* match against both prefix pages */
501 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
502 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
503 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
504 exit_sie_sync(vcpu);
505 }
506 }
507}
508
b6d33834
CD
509int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
510{
511 /* kvm common code refers to this, but never calls it */
512 BUG();
513 return 0;
514}
515
14eebd91
CO
516static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
517 struct kvm_one_reg *reg)
518{
519 int r = -EINVAL;
520
521 switch (reg->id) {
29b7c71b
CO
522 case KVM_REG_S390_TODPR:
523 r = put_user(vcpu->arch.sie_block->todpr,
524 (u32 __user *)reg->addr);
525 break;
526 case KVM_REG_S390_EPOCHDIFF:
527 r = put_user(vcpu->arch.sie_block->epoch,
528 (u64 __user *)reg->addr);
529 break;
46a6dd1c
J
530 case KVM_REG_S390_CPU_TIMER:
531 r = put_user(vcpu->arch.sie_block->cputm,
532 (u64 __user *)reg->addr);
533 break;
534 case KVM_REG_S390_CLOCK_COMP:
535 r = put_user(vcpu->arch.sie_block->ckc,
536 (u64 __user *)reg->addr);
537 break;
14eebd91
CO
538 default:
539 break;
540 }
541
542 return r;
543}
544
545static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
546 struct kvm_one_reg *reg)
547{
548 int r = -EINVAL;
549
550 switch (reg->id) {
29b7c71b
CO
551 case KVM_REG_S390_TODPR:
552 r = get_user(vcpu->arch.sie_block->todpr,
553 (u32 __user *)reg->addr);
554 break;
555 case KVM_REG_S390_EPOCHDIFF:
556 r = get_user(vcpu->arch.sie_block->epoch,
557 (u64 __user *)reg->addr);
558 break;
46a6dd1c
J
559 case KVM_REG_S390_CPU_TIMER:
560 r = get_user(vcpu->arch.sie_block->cputm,
561 (u64 __user *)reg->addr);
562 break;
563 case KVM_REG_S390_CLOCK_COMP:
564 r = get_user(vcpu->arch.sie_block->ckc,
565 (u64 __user *)reg->addr);
566 break;
14eebd91
CO
567 default:
568 break;
569 }
570
571 return r;
572}
b6d33834 573
b0c632db
HC
574static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
575{
b0c632db 576 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
577 return 0;
578}
579
580int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
581{
5a32c1af 582 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
583 return 0;
584}
585
586int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
587{
5a32c1af 588 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
589 return 0;
590}
591
592int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
593 struct kvm_sregs *sregs)
594{
59674c1a 595 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 596 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 597 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
598 return 0;
599}
600
601int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
602 struct kvm_sregs *sregs)
603{
59674c1a 604 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 605 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
606 return 0;
607}
608
609int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
610{
b0c632db 611 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 612 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 613 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
614 return 0;
615}
616
617int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
618{
b0c632db
HC
619 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
620 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
621 return 0;
622}
623
624static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
625{
626 int rc = 0;
627
9e6dabef 628 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 629 rc = -EBUSY;
d7b0b5eb
CO
630 else {
631 vcpu->run->psw_mask = psw.mask;
632 vcpu->run->psw_addr = psw.addr;
633 }
b0c632db
HC
634 return rc;
635}
636
637int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
638 struct kvm_translation *tr)
639{
640 return -EINVAL; /* not implemented yet */
641}
642
d0bfb940
JK
643int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
644 struct kvm_guest_debug *dbg)
b0c632db
HC
645{
646 return -EINVAL; /* not implemented yet */
647}
648
62d9f0db
MT
649int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
650 struct kvm_mp_state *mp_state)
651{
652 return -EINVAL; /* not implemented yet */
653}
654
655int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
656 struct kvm_mp_state *mp_state)
657{
658 return -EINVAL; /* not implemented yet */
659}
660
2c70fe44
CB
661static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
662{
663 /*
664 * We use MMU_RELOAD just to re-arm the ipte notifier for the
665 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
666 * This ensures that the ipte instruction for this request has
667 * already finished. We might race against a second unmapper that
668 * wants to set the blocking bit. Lets just retry the request loop.
669 */
670 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
671 int rc;
672 rc = gmap_ipte_notify(vcpu->arch.gmap,
673 vcpu->arch.sie_block->prefix,
674 PAGE_SIZE * 2);
675 if (rc)
676 return rc;
677 s390_vcpu_unblock(vcpu);
678 }
679 return 0;
680}
681
e168bf8d 682static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 683{
e168bf8d
CO
684 int rc;
685
5a32c1af 686 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
687
688 if (need_resched())
689 schedule();
690
71cde587
CB
691 if (test_thread_flag(TIF_MCCK_PENDING))
692 s390_handle_mcck();
693
d6b6d166
CO
694 if (!kvm_is_ucontrol(vcpu->kvm))
695 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 696
2c70fe44
CB
697 rc = kvm_s390_handle_requests(vcpu);
698 if (rc)
699 return rc;
700
b0c632db 701 vcpu->arch.sie_block->icptcode = 0;
83987ace 702 preempt_disable();
b0c632db 703 kvm_guest_enter();
83987ace 704 preempt_enable();
b0c632db
HC
705 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
706 atomic_read(&vcpu->arch.sie_block->cpuflags));
5786fffa
CH
707 trace_kvm_s390_sie_enter(vcpu,
708 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 709 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
7c470539
MS
710 if (rc > 0)
711 rc = 0;
712 if (rc < 0) {
e168bf8d
CO
713 if (kvm_is_ucontrol(vcpu->kvm)) {
714 rc = SIE_INTERCEPT_UCONTROL;
715 } else {
716 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 717 trace_kvm_s390_sie_fault(vcpu);
db4a29cb 718 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
e168bf8d 719 }
1f0d0f09 720 }
b0c632db
HC
721 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
722 vcpu->arch.sie_block->icptcode);
5786fffa 723 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
b0c632db 724 kvm_guest_exit();
b0c632db 725
5a32c1af 726 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 727 return rc;
b0c632db
HC
728}
729
730int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
731{
8f2abe6a 732 int rc;
b0c632db
HC
733 sigset_t sigsaved;
734
9ace903d 735rerun_vcpu:
b0c632db
HC
736 if (vcpu->sigset_active)
737 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
738
9e6dabef 739 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 740
ba5c1e9b
CO
741 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
742
8f2abe6a
CB
743 switch (kvm_run->exit_reason) {
744 case KVM_EXIT_S390_SIEIC:
8f2abe6a 745 case KVM_EXIT_UNKNOWN:
9ace903d 746 case KVM_EXIT_INTR:
8f2abe6a 747 case KVM_EXIT_S390_RESET:
e168bf8d 748 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 749 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
750 break;
751 default:
752 BUG();
753 }
754
d7b0b5eb
CO
755 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
756 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
757 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
758 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
759 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
760 }
9eed0735
CB
761 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
762 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
763 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
764 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
765 }
d7b0b5eb 766
dab4079d 767 might_fault();
8f2abe6a
CB
768
769 do {
e168bf8d
CO
770 rc = __vcpu_run(vcpu);
771 if (rc)
772 break;
c0d744a9
CO
773 if (kvm_is_ucontrol(vcpu->kvm))
774 rc = -EOPNOTSUPP;
775 else
776 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
777 } while (!signal_pending(current) && !rc);
778
9ace903d
CE
779 if (rc == SIE_INTERCEPT_RERUNVCPU)
780 goto rerun_vcpu;
781
b1d16c49
CE
782 if (signal_pending(current) && !rc) {
783 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 784 rc = -EINTR;
b1d16c49 785 }
8f2abe6a 786
e168bf8d
CO
787#ifdef CONFIG_KVM_S390_UCONTROL
788 if (rc == SIE_INTERCEPT_UCONTROL) {
789 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
790 kvm_run->s390_ucontrol.trans_exc_code =
791 current->thread.gmap_addr;
792 kvm_run->s390_ucontrol.pgm_code = 0x10;
793 rc = 0;
794 }
795#endif
796
b8e660b8 797 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
798 /* intercept cannot be handled in-kernel, prepare kvm-run */
799 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
800 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
801 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
802 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
803 rc = 0;
804 }
805
806 if (rc == -EREMOTE) {
807 /* intercept was handled, but userspace support is needed
808 * kvm_run has been prepared by the handler */
809 rc = 0;
810 }
b0c632db 811
d7b0b5eb
CO
812 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
813 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 814 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 815 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 816
b0c632db
HC
817 if (vcpu->sigset_active)
818 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
819
b0c632db 820 vcpu->stat.exit_userspace++;
7e8e6ab4 821 return rc;
b0c632db
HC
822}
823
092670cd 824static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
825 unsigned long n, int prefix)
826{
827 if (prefix)
828 return copy_to_guest(vcpu, guestdest, from, n);
829 else
830 return copy_to_guest_absolute(vcpu, guestdest, from, n);
831}
832
833/*
834 * store status at address
835 * we use have two special cases:
836 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
837 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
838 */
971eb77f 839int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 840{
092670cd 841 unsigned char archmode = 1;
b0c632db
HC
842 int prefix;
843
844 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
845 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
846 return -EFAULT;
847 addr = SAVE_AREA_BASE;
848 prefix = 0;
849 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
850 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
851 return -EFAULT;
852 addr = SAVE_AREA_BASE;
853 prefix = 1;
854 } else
855 prefix = 0;
856
15bc8d84
CB
857 /*
858 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
859 * copying in vcpu load/put. Lets update our copies before we save
860 * it into the save area
861 */
862 save_fp_regs(&vcpu->arch.guest_fpregs);
863 save_access_regs(vcpu->run->s.regs.acrs);
864
f64ca217 865 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
866 vcpu->arch.guest_fpregs.fprs, 128, prefix))
867 return -EFAULT;
868
f64ca217 869 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 870 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
871 return -EFAULT;
872
f64ca217 873 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
874 &vcpu->arch.sie_block->gpsw, 16, prefix))
875 return -EFAULT;
876
f64ca217 877 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
878 &vcpu->arch.sie_block->prefix, 4, prefix))
879 return -EFAULT;
880
881 if (__guestcopy(vcpu,
f64ca217 882 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
883 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
884 return -EFAULT;
885
f64ca217 886 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
887 &vcpu->arch.sie_block->todpr, 4, prefix))
888 return -EFAULT;
889
f64ca217 890 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
891 &vcpu->arch.sie_block->cputm, 8, prefix))
892 return -EFAULT;
893
f64ca217 894 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
895 &vcpu->arch.sie_block->ckc, 8, prefix))
896 return -EFAULT;
897
f64ca217 898 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 899 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
900 return -EFAULT;
901
902 if (__guestcopy(vcpu,
f64ca217 903 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
904 &vcpu->arch.sie_block->gcr, 128, prefix))
905 return -EFAULT;
906 return 0;
907}
908
d6712df9
CH
909static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
910 struct kvm_enable_cap *cap)
911{
912 int r;
913
914 if (cap->flags)
915 return -EINVAL;
916
917 switch (cap->cap) {
fa6b7fe9
CH
918 case KVM_CAP_S390_CSS_SUPPORT:
919 if (!vcpu->kvm->arch.css_support) {
920 vcpu->kvm->arch.css_support = 1;
921 trace_kvm_s390_enable_css(vcpu->kvm);
922 }
923 r = 0;
924 break;
d6712df9
CH
925 default:
926 r = -EINVAL;
927 break;
928 }
929 return r;
930}
931
b0c632db
HC
932long kvm_arch_vcpu_ioctl(struct file *filp,
933 unsigned int ioctl, unsigned long arg)
934{
935 struct kvm_vcpu *vcpu = filp->private_data;
936 void __user *argp = (void __user *)arg;
bc923cc9 937 long r;
b0c632db 938
93736624
AK
939 switch (ioctl) {
940 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
941 struct kvm_s390_interrupt s390int;
942
93736624 943 r = -EFAULT;
ba5c1e9b 944 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
945 break;
946 r = kvm_s390_inject_vcpu(vcpu, &s390int);
947 break;
ba5c1e9b 948 }
b0c632db 949 case KVM_S390_STORE_STATUS:
bc923cc9
AK
950 r = kvm_s390_vcpu_store_status(vcpu, arg);
951 break;
b0c632db
HC
952 case KVM_S390_SET_INITIAL_PSW: {
953 psw_t psw;
954
bc923cc9 955 r = -EFAULT;
b0c632db 956 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
957 break;
958 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
959 break;
b0c632db
HC
960 }
961 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
962 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
963 break;
14eebd91
CO
964 case KVM_SET_ONE_REG:
965 case KVM_GET_ONE_REG: {
966 struct kvm_one_reg reg;
967 r = -EFAULT;
968 if (copy_from_user(&reg, argp, sizeof(reg)))
969 break;
970 if (ioctl == KVM_SET_ONE_REG)
971 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
972 else
973 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
974 break;
975 }
27e0393f
CO
976#ifdef CONFIG_KVM_S390_UCONTROL
977 case KVM_S390_UCAS_MAP: {
978 struct kvm_s390_ucas_mapping ucasmap;
979
980 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
981 r = -EFAULT;
982 break;
983 }
984
985 if (!kvm_is_ucontrol(vcpu->kvm)) {
986 r = -EINVAL;
987 break;
988 }
989
990 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
991 ucasmap.vcpu_addr, ucasmap.length);
992 break;
993 }
994 case KVM_S390_UCAS_UNMAP: {
995 struct kvm_s390_ucas_mapping ucasmap;
996
997 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
998 r = -EFAULT;
999 break;
1000 }
1001
1002 if (!kvm_is_ucontrol(vcpu->kvm)) {
1003 r = -EINVAL;
1004 break;
1005 }
1006
1007 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1008 ucasmap.length);
1009 break;
1010 }
1011#endif
ccc7910f
CO
1012 case KVM_S390_VCPU_FAULT: {
1013 r = gmap_fault(arg, vcpu->arch.gmap);
1014 if (!IS_ERR_VALUE(r))
1015 r = 0;
1016 break;
1017 }
d6712df9
CH
1018 case KVM_ENABLE_CAP:
1019 {
1020 struct kvm_enable_cap cap;
1021 r = -EFAULT;
1022 if (copy_from_user(&cap, argp, sizeof(cap)))
1023 break;
1024 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1025 break;
1026 }
b0c632db 1027 default:
3e6afcf1 1028 r = -ENOTTY;
b0c632db 1029 }
bc923cc9 1030 return r;
b0c632db
HC
1031}
1032
5b1c1493
CO
1033int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1034{
1035#ifdef CONFIG_KVM_S390_UCONTROL
1036 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1037 && (kvm_is_ucontrol(vcpu->kvm))) {
1038 vmf->page = virt_to_page(vcpu->arch.sie_block);
1039 get_page(vmf->page);
1040 return 0;
1041 }
1042#endif
1043 return VM_FAULT_SIGBUS;
1044}
1045
db3fe4eb
TY
1046void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1047 struct kvm_memory_slot *dont)
1048{
1049}
1050
1051int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1052{
1053 return 0;
1054}
1055
b0c632db 1056/* Section: memory related */
f7784b8e
MT
1057int kvm_arch_prepare_memory_region(struct kvm *kvm,
1058 struct kvm_memory_slot *memslot,
7b6195a9
TY
1059 struct kvm_userspace_memory_region *mem,
1060 enum kvm_mr_change change)
b0c632db 1061{
dd2887e7
NW
1062 /* A few sanity checks. We can have memory slots which have to be
1063 located/ended at a segment boundary (1MB). The memory in userland is
1064 ok to be fragmented into various different vmas. It is okay to mmap()
1065 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1066
598841ca 1067 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1068 return -EINVAL;
1069
598841ca 1070 if (mem->memory_size & 0xffffful)
b0c632db
HC
1071 return -EINVAL;
1072
f7784b8e
MT
1073 return 0;
1074}
1075
1076void kvm_arch_commit_memory_region(struct kvm *kvm,
1077 struct kvm_userspace_memory_region *mem,
8482644a
TY
1078 const struct kvm_memory_slot *old,
1079 enum kvm_mr_change change)
f7784b8e 1080{
f7850c92 1081 int rc;
f7784b8e 1082
2cef4deb
CB
1083 /* If the basics of the memslot do not change, we do not want
1084 * to update the gmap. Every update causes several unnecessary
1085 * segment translation exceptions. This is usually handled just
1086 * fine by the normal fault handler + gmap, but it will also
1087 * cause faults on the prefix page of running guest CPUs.
1088 */
1089 if (old->userspace_addr == mem->userspace_addr &&
1090 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1091 old->npages * PAGE_SIZE == mem->memory_size)
1092 return;
598841ca
CO
1093
1094 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1095 mem->guest_phys_addr, mem->memory_size);
1096 if (rc)
f7850c92 1097 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1098 return;
b0c632db
HC
1099}
1100
2df72e9b
MT
1101void kvm_arch_flush_shadow_all(struct kvm *kvm)
1102{
1103}
1104
1105void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1106 struct kvm_memory_slot *slot)
34d4cb8f
MT
1107{
1108}
1109
b0c632db
HC
1110static int __init kvm_s390_init(void)
1111{
ef50f7ac 1112 int ret;
0ee75bea 1113 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1114 if (ret)
1115 return ret;
1116
1117 /*
1118 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1119 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1120 * only set facilities that are known to work in KVM.
1121 */
c2f0e8c8 1122 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
1123 if (!facilities) {
1124 kvm_exit();
1125 return -ENOMEM;
1126 }
14375bc4 1127 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 1128 facilities[0] &= 0xff00fff3f47c0000ULL;
87cac8f8 1129 facilities[1] &= 0x001c000000000000ULL;
ef50f7ac 1130 return 0;
b0c632db
HC
1131}
1132
1133static void __exit kvm_s390_exit(void)
1134{
ef50f7ac 1135 free_page((unsigned long) facilities);
b0c632db
HC
1136 kvm_exit();
1137}
1138
1139module_init(kvm_s390_init);
1140module_exit(kvm_s390_exit);