2 * handling privileged instructions
4 * Copyright IBM Corp. 2008, 2013
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <linux/mm_types.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/facility.h>
22 #include <asm/current.h>
23 #include <asm/debug.h>
24 #include <asm/ebcdic.h>
25 #include <asm/sysinfo.h>
26 #include <asm/pgtable.h>
27 #include <asm/page-states.h>
28 #include <asm/pgalloc.h>
31 #include <asm/ptrace.h>
32 #include <asm/compat.h>
38 static int handle_ri(struct kvm_vcpu *vcpu)
40 if (test_kvm_facility(vcpu->kvm, 64)) {
41 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
42 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
43 kvm_s390_retry_instr(vcpu);
46 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
49 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
51 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
52 return handle_ri(vcpu);
57 static int handle_gs(struct kvm_vcpu *vcpu)
59 if (test_kvm_facility(vcpu->kvm, 133)) {
60 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
63 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
64 restore_gs_cb(current->thread.gs_cb);
66 vcpu->arch.sie_block->ecb |= ECB_GS;
67 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
68 vcpu->arch.gs_enabled = 1;
69 kvm_s390_retry_instr(vcpu);
72 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
75 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
77 int code = vcpu->arch.sie_block->ipb & 0xff;
79 if (code == 0x49 || code == 0x4d)
80 return handle_gs(vcpu);
84 /* Handle SCK (SET CLOCK) interception */
85 static int handle_set_clock(struct kvm_vcpu *vcpu)
91 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
92 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
94 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
95 if (op2 & 7) /* Operand must be on a doubleword boundary */
96 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
97 rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
99 return kvm_s390_inject_prog_cond(vcpu, rc);
101 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
102 kvm_s390_set_tod_clock(vcpu->kvm, val);
104 kvm_s390_set_psw_cc(vcpu, 0);
108 static int handle_set_prefix(struct kvm_vcpu *vcpu)
115 vcpu->stat.instruction_spx++;
117 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
118 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
120 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
122 /* must be word boundary */
124 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
127 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
129 return kvm_s390_inject_prog_cond(vcpu, rc);
131 address &= 0x7fffe000u;
134 * Make sure the new value is valid memory. We only need to check the
135 * first page, since address is 8k aligned and memory pieces are always
136 * at least 1MB aligned and have at least a size of 1MB.
138 if (kvm_is_error_gpa(vcpu->kvm, address))
139 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
141 kvm_s390_set_prefix(vcpu, address);
142 trace_kvm_s390_handle_prefix(vcpu, 1, address);
146 static int handle_store_prefix(struct kvm_vcpu *vcpu)
153 vcpu->stat.instruction_stpx++;
155 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
156 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
158 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
160 /* must be word boundary */
162 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
164 address = kvm_s390_get_prefix(vcpu);
167 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
169 return kvm_s390_inject_prog_cond(vcpu, rc);
171 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
172 trace_kvm_s390_handle_prefix(vcpu, 0, address);
176 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
178 u16 vcpu_id = vcpu->vcpu_id;
183 vcpu->stat.instruction_stap++;
185 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
186 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
188 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
191 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
193 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
195 return kvm_s390_inject_prog_cond(vcpu, rc);
197 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
198 trace_kvm_s390_handle_stap(vcpu, ga);
202 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
205 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
207 trace_kvm_s390_skey_related_inst(vcpu);
208 if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
209 !(atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS))
212 rc = s390_enable_skey();
213 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
215 if (atomic_read(&sie_block->cpuflags) & CPUSTAT_KSS)
216 atomic_andnot(CPUSTAT_KSS, &sie_block->cpuflags);
218 sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
224 static int try_handle_skey(struct kvm_vcpu *vcpu)
228 vcpu->stat.instruction_storage_key++;
229 rc = kvm_s390_skey_check_enable(vcpu);
233 /* with storage-key facility, SIE interprets it for us */
234 kvm_s390_retry_instr(vcpu);
235 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
238 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
239 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
243 static int handle_iske(struct kvm_vcpu *vcpu)
250 rc = try_handle_skey(vcpu);
252 return rc != -EAGAIN ? rc : 0;
254 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
256 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
257 addr = kvm_s390_logical_to_effective(vcpu, addr);
258 addr = kvm_s390_real_to_abs(vcpu, addr);
259 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
260 if (kvm_is_error_hva(addr))
261 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
263 down_read(¤t->mm->mmap_sem);
264 rc = get_guest_storage_key(current->mm, addr, &key);
265 up_read(¤t->mm->mmap_sem);
267 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
268 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
269 vcpu->run->s.regs.gprs[reg1] |= key;
273 static int handle_rrbe(struct kvm_vcpu *vcpu)
279 rc = try_handle_skey(vcpu);
281 return rc != -EAGAIN ? rc : 0;
283 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
285 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
286 addr = kvm_s390_logical_to_effective(vcpu, addr);
287 addr = kvm_s390_real_to_abs(vcpu, addr);
288 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
289 if (kvm_is_error_hva(addr))
290 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
292 down_read(¤t->mm->mmap_sem);
293 rc = reset_guest_reference_bit(current->mm, addr);
294 up_read(¤t->mm->mmap_sem);
296 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
298 kvm_s390_set_psw_cc(vcpu, rc);
306 static int handle_sske(struct kvm_vcpu *vcpu)
308 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
309 unsigned long start, end;
310 unsigned char key, oldkey;
314 rc = try_handle_skey(vcpu);
316 return rc != -EAGAIN ? rc : 0;
318 if (!test_kvm_facility(vcpu->kvm, 8))
320 if (!test_kvm_facility(vcpu->kvm, 10))
321 m3 &= ~(SSKE_MC | SSKE_MR);
322 if (!test_kvm_facility(vcpu->kvm, 14))
325 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
327 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
328 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
329 start = kvm_s390_logical_to_effective(vcpu, start);
331 /* start already designates an absolute address */
332 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
334 start = kvm_s390_real_to_abs(vcpu, start);
335 end = start + PAGE_SIZE;
338 while (start != end) {
339 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
341 if (kvm_is_error_hva(addr))
342 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
344 down_read(¤t->mm->mmap_sem);
345 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
346 m3 & SSKE_NQ, m3 & SSKE_MR,
348 up_read(¤t->mm->mmap_sem);
350 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
354 if (m3 & (SSKE_MC | SSKE_MR)) {
356 /* skey in reg1 is unpredictable */
357 kvm_s390_set_psw_cc(vcpu, 3);
359 kvm_s390_set_psw_cc(vcpu, rc);
360 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
361 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
365 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
366 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
368 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
369 end = kvm_s390_logical_to_effective(vcpu, end);
370 vcpu->run->s.regs.gprs[reg2] |= end;
375 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
377 vcpu->stat.instruction_ipte_interlock++;
378 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
379 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
380 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
381 kvm_s390_retry_instr(vcpu);
382 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
386 static int handle_test_block(struct kvm_vcpu *vcpu)
391 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
392 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
394 kvm_s390_get_regs_rre(vcpu, NULL, ®2);
395 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
396 addr = kvm_s390_logical_to_effective(vcpu, addr);
397 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
398 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
399 addr = kvm_s390_real_to_abs(vcpu, addr);
401 if (kvm_is_error_gpa(vcpu->kvm, addr))
402 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
404 * We don't expect errors on modern systems, and do not care
405 * about storage keys (yet), so let's just clear the page.
407 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
409 kvm_s390_set_psw_cc(vcpu, 0);
410 vcpu->run->s.regs.gprs[0] = 0;
414 static int handle_tpi(struct kvm_vcpu *vcpu)
416 struct kvm_s390_interrupt_info *inti;
423 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
425 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
427 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
429 kvm_s390_set_psw_cc(vcpu, 0);
433 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
434 tpi_data[1] = inti->io.io_int_parm;
435 tpi_data[2] = inti->io.io_int_word;
438 * Store the two-word I/O interruption code into the
441 len = sizeof(tpi_data) - 4;
442 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
444 rc = kvm_s390_inject_prog_cond(vcpu, rc);
445 goto reinject_interrupt;
449 * Store the three-word I/O interruption code into
450 * the appropriate lowcore area.
452 len = sizeof(tpi_data);
453 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
454 /* failed writes to the low core are not recoverable */
456 goto reinject_interrupt;
460 /* irq was successfully handed to the guest */
462 kvm_s390_set_psw_cc(vcpu, 1);
466 * If we encounter a problem storing the interruption code, the
467 * instruction is suppressed from the guest's view: reinject the
470 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
474 /* don't set the cc, a pgm irq was injected or we drop to user space */
475 return rc ? -EFAULT : 0;
478 static int handle_tsch(struct kvm_vcpu *vcpu)
480 struct kvm_s390_interrupt_info *inti = NULL;
481 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
483 /* a valid schid has at least one bit set */
484 if (vcpu->run->s.regs.gprs[1])
485 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
486 vcpu->run->s.regs.gprs[1]);
489 * Prepare exit to userspace.
490 * We indicate whether we dequeued a pending I/O interrupt
491 * so that userspace can re-inject it if the instruction gets
492 * a program check. While this may re-order the pending I/O
493 * interrupts, this is no problem since the priority is kept
496 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
497 vcpu->run->s390_tsch.dequeued = !!inti;
499 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
500 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
501 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
502 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
504 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
509 static int handle_io_inst(struct kvm_vcpu *vcpu)
511 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
513 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
514 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
516 if (vcpu->kvm->arch.css_support) {
518 * Most I/O instructions will be handled by userspace.
519 * Exceptions are tpi and the interrupt portion of tsch.
521 if (vcpu->arch.sie_block->ipa == 0xb236)
522 return handle_tpi(vcpu);
523 if (vcpu->arch.sie_block->ipa == 0xb235)
524 return handle_tsch(vcpu);
525 /* Handle in userspace. */
529 * Set condition code 3 to stop the guest from issuing channel
532 kvm_s390_set_psw_cc(vcpu, 3);
537 static int handle_stfl(struct kvm_vcpu *vcpu)
542 vcpu->stat.instruction_stfl++;
544 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
545 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
548 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
549 * into a u32 memory representation. They will remain bits 0-31.
551 fac = *vcpu->kvm->arch.model.fac_list >> 32;
552 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
556 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
557 trace_kvm_s390_handle_stfl(vcpu, fac);
561 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
562 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
563 #define PSW_ADDR_24 0x0000000000ffffffUL
564 #define PSW_ADDR_31 0x000000007fffffffUL
566 int is_valid_psw(psw_t *psw)
568 if (psw->mask & PSW_MASK_UNASSIGNED)
570 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
571 if (psw->addr & ~PSW_ADDR_31)
574 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
576 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
583 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
585 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
586 psw_compat_t new_psw;
591 if (gpsw->mask & PSW_MASK_PSTATE)
592 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
594 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
596 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
598 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
600 return kvm_s390_inject_prog_cond(vcpu, rc);
601 if (!(new_psw.mask & PSW32_MASK_BASE))
602 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
603 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
604 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
605 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
606 if (!is_valid_psw(gpsw))
607 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
611 static int handle_lpswe(struct kvm_vcpu *vcpu)
618 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
619 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
621 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
623 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
624 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
626 return kvm_s390_inject_prog_cond(vcpu, rc);
627 vcpu->arch.sie_block->gpsw = new_psw;
628 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
629 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
633 static int handle_stidp(struct kvm_vcpu *vcpu)
635 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
640 vcpu->stat.instruction_stidp++;
642 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
643 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
645 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
648 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
650 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
652 return kvm_s390_inject_prog_cond(vcpu, rc);
654 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
658 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
663 cpus = atomic_read(&vcpu->kvm->online_vcpus);
665 /* deal with other level 3 hypervisors */
666 if (stsi(mem, 3, 2, 2))
670 for (n = mem->count - 1; n > 0 ; n--)
671 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
673 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
674 mem->vm[0].cpus_total = cpus;
675 mem->vm[0].cpus_configured = cpus;
676 mem->vm[0].cpus_standby = 0;
677 mem->vm[0].cpus_reserved = 0;
678 mem->vm[0].caf = 1000;
679 memcpy(mem->vm[0].name, "KVMguest", 8);
680 ASCEBC(mem->vm[0].name, 8);
681 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
682 ASCEBC(mem->vm[0].cpi, 16);
685 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
686 u8 fc, u8 sel1, u16 sel2)
688 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
689 vcpu->run->s390_stsi.addr = addr;
690 vcpu->run->s390_stsi.ar = ar;
691 vcpu->run->s390_stsi.fc = fc;
692 vcpu->run->s390_stsi.sel1 = sel1;
693 vcpu->run->s390_stsi.sel2 = sel2;
696 static int handle_stsi(struct kvm_vcpu *vcpu)
698 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
699 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
700 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
701 unsigned long mem = 0;
706 vcpu->stat.instruction_stsi++;
707 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
709 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
710 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
713 kvm_s390_set_psw_cc(vcpu, 3);
717 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
718 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
719 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
722 vcpu->run->s.regs.gprs[0] = 3 << 28;
723 kvm_s390_set_psw_cc(vcpu, 0);
727 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
729 if (operand2 & 0xfff)
730 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
733 case 1: /* same handling for 1 and 2 */
735 mem = get_zeroed_page(GFP_KERNEL);
738 if (stsi((void *) mem, fc, sel1, sel2))
742 if (sel1 != 2 || sel2 != 2)
744 mem = get_zeroed_page(GFP_KERNEL);
747 handle_stsi_3_2_2(vcpu, (void *) mem);
751 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
753 rc = kvm_s390_inject_prog_cond(vcpu, rc);
756 if (vcpu->kvm->arch.user_stsi) {
757 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
760 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
762 kvm_s390_set_psw_cc(vcpu, 0);
763 vcpu->run->s.regs.gprs[0] = 0;
766 kvm_s390_set_psw_cc(vcpu, 3);
772 static const intercept_handler_t b2_handlers[256] = {
773 [0x02] = handle_stidp,
774 [0x04] = handle_set_clock,
775 [0x10] = handle_set_prefix,
776 [0x11] = handle_store_prefix,
777 [0x12] = handle_store_cpu_address,
778 [0x14] = kvm_s390_handle_vsie,
779 [0x21] = handle_ipte_interlock,
780 [0x29] = handle_iske,
781 [0x2a] = handle_rrbe,
782 [0x2b] = handle_sske,
783 [0x2c] = handle_test_block,
784 [0x30] = handle_io_inst,
785 [0x31] = handle_io_inst,
786 [0x32] = handle_io_inst,
787 [0x33] = handle_io_inst,
788 [0x34] = handle_io_inst,
789 [0x35] = handle_io_inst,
790 [0x36] = handle_io_inst,
791 [0x37] = handle_io_inst,
792 [0x38] = handle_io_inst,
793 [0x39] = handle_io_inst,
794 [0x3a] = handle_io_inst,
795 [0x3b] = handle_io_inst,
796 [0x3c] = handle_io_inst,
797 [0x50] = handle_ipte_interlock,
798 [0x56] = handle_sthyi,
799 [0x5f] = handle_io_inst,
800 [0x74] = handle_io_inst,
801 [0x76] = handle_io_inst,
802 [0x7d] = handle_stsi,
803 [0xb1] = handle_stfl,
804 [0xb2] = handle_lpswe,
807 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
809 intercept_handler_t handler;
812 * A lot of B2 instructions are priviledged. Here we check for
813 * the privileged ones, that we can handle in the kernel.
814 * Anything else goes to userspace.
816 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
818 return handler(vcpu);
823 static int handle_epsw(struct kvm_vcpu *vcpu)
827 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
829 /* This basically extracts the mask half of the psw. */
830 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
831 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
833 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
834 vcpu->run->s.regs.gprs[reg2] |=
835 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
840 #define PFMF_RESERVED 0xfffc0101UL
841 #define PFMF_SK 0x00020000UL
842 #define PFMF_CF 0x00010000UL
843 #define PFMF_UI 0x00008000UL
844 #define PFMF_FSC 0x00007000UL
845 #define PFMF_NQ 0x00000800UL
846 #define PFMF_MR 0x00000400UL
847 #define PFMF_MC 0x00000200UL
848 #define PFMF_KEY 0x000000feUL
850 static int handle_pfmf(struct kvm_vcpu *vcpu)
852 bool mr = false, mc = false, nq;
854 unsigned long start, end;
857 vcpu->stat.instruction_pfmf++;
859 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
861 if (!test_kvm_facility(vcpu->kvm, 8))
862 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
864 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
865 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
867 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
868 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
870 /* Only provide non-quiescing support if enabled for the guest */
871 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
872 !test_kvm_facility(vcpu->kvm, 14))
873 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
875 /* Only provide conditional-SSKE support if enabled for the guest */
876 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
877 test_kvm_facility(vcpu->kvm, 10)) {
878 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
879 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
882 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
883 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
884 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
885 start = kvm_s390_logical_to_effective(vcpu, start);
887 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
888 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
889 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
892 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
894 /* only 4k frames specify a real address */
895 start = kvm_s390_real_to_abs(vcpu, start);
896 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
899 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
902 /* only support 2G frame size if EDAT2 is available and we are
903 not in 24-bit addressing mode */
904 if (!test_kvm_facility(vcpu->kvm, 78) ||
905 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
906 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
907 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
910 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
913 while (start != end) {
914 unsigned long useraddr;
916 /* Translate guest address to host address */
917 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
918 if (kvm_is_error_hva(useraddr))
919 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
921 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
922 if (clear_user((void __user *)useraddr, PAGE_SIZE))
923 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
926 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
927 int rc = kvm_s390_skey_check_enable(vcpu);
931 down_read(¤t->mm->mmap_sem);
932 rc = cond_set_guest_storage_key(current->mm, useraddr,
933 key, NULL, nq, mr, mc);
934 up_read(¤t->mm->mmap_sem);
936 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
941 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
942 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
943 vcpu->run->s.regs.gprs[reg2] = end;
945 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
946 end = kvm_s390_logical_to_effective(vcpu, end);
947 vcpu->run->s.regs.gprs[reg2] |= end;
953 static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
955 struct kvm_s390_migration_state *ms = vcpu->kvm->arch.migration_state;
956 int r1, r2, nappended, entries;
957 unsigned long gfn, hva, res, pgstev, ptev;
958 unsigned long *cbrlo;
961 * We don't need to set SD.FPF.SK to 1 here, because if we have a
962 * machine check here we either handle it or crash
965 kvm_s390_get_regs_rre(vcpu, &r1, &r2);
966 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
967 hva = gfn_to_hva(vcpu->kvm, gfn);
968 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
970 if (kvm_is_error_hva(hva))
971 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
973 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
975 res = orc ? 0x10 : 0;
976 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
979 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
981 * Set the block-content state part of the result. 0 means resident, so
982 * nothing to do if the page is valid. 2 is for preserved pages
983 * (non-present and non-zero), and 3 for zero pages (non-present and
986 if (ptev & _PAGE_INVALID) {
988 if (pgstev & _PGSTE_GPS_ZERO)
991 if (pgstev & _PGSTE_GPS_NODAT)
993 vcpu->run->s.regs.gprs[r1] = res;
995 * It is possible that all the normal 511 slots were full, in which case
996 * we will now write in the 512th slot, which is reserved for host use.
997 * In both cases we let the normal essa handling code process all the
998 * slots, including the reserved one, if needed.
1000 if (nappended > 0) {
1001 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1002 cbrlo[entries] = gfn << PAGE_SHIFT;
1006 /* increment only if we are really flipping the bit to 1 */
1007 if (!test_and_set_bit(gfn, ms->pgste_bitmap))
1008 atomic64_inc(&ms->dirty_pages);
1014 static int handle_essa(struct kvm_vcpu *vcpu)
1016 /* entries expected to be 1FF */
1017 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1018 unsigned long *cbrlo;
1022 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1023 gmap = vcpu->arch.gmap;
1024 vcpu->stat.instruction_essa++;
1025 if (!vcpu->kvm->arch.use_cmma)
1026 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1028 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1029 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1030 /* Check for invalid operation request code */
1031 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1032 /* ORCs 0-6 are always valid */
1033 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1034 : ESSA_SET_STABLE_IF_RESIDENT))
1035 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1037 if (likely(!vcpu->kvm->arch.migration_state)) {
1039 * CMMA is enabled in the KVM settings, but is disabled in
1040 * the SIE block and in the mm_context, and we are not doing
1041 * a migration. Enable CMMA in the mm_context.
1042 * Since we need to take a write lock to write to the context
1043 * to avoid races with storage keys handling, we check if the
1044 * value really needs to be written to; if the value is
1045 * already correct, we do nothing and avoid the lock.
1047 if (vcpu->kvm->mm->context.use_cmma == 0) {
1048 down_write(&vcpu->kvm->mm->mmap_sem);
1049 vcpu->kvm->mm->context.use_cmma = 1;
1050 up_write(&vcpu->kvm->mm->mmap_sem);
1053 * If we are here, we are supposed to have CMMA enabled in
1054 * the SIE block. Enabling CMMA works on a per-CPU basis,
1055 * while the context use_cmma flag is per process.
1056 * It's possible that the context flag is enabled and the
1057 * SIE flag is not, so we set the flag always; if it was
1058 * already set, nothing changes, otherwise we enable it
1061 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1062 /* Retry the ESSA instruction */
1063 kvm_s390_retry_instr(vcpu);
1065 /* Account for the possible extra cbrl entry */
1066 i = do_essa(vcpu, orc);
1071 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
1072 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1073 down_read(&gmap->mm->mmap_sem);
1074 for (i = 0; i < entries; ++i)
1075 __gmap_zap(gmap, cbrlo[i]);
1076 up_read(&gmap->mm->mmap_sem);
1080 static const intercept_handler_t b9_handlers[256] = {
1081 [0x8a] = handle_ipte_interlock,
1082 [0x8d] = handle_epsw,
1083 [0x8e] = handle_ipte_interlock,
1084 [0x8f] = handle_ipte_interlock,
1085 [0xab] = handle_essa,
1086 [0xaf] = handle_pfmf,
1089 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1091 intercept_handler_t handler;
1093 /* This is handled just as for the B2 instructions. */
1094 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1096 return handler(vcpu);
1101 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1103 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1104 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1105 int reg, rc, nr_regs;
1110 vcpu->stat.instruction_lctl++;
1112 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1113 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1115 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1118 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1120 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1121 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1123 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1124 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1126 return kvm_s390_inject_prog_cond(vcpu, rc);
1130 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1131 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1134 reg = (reg + 1) % 16;
1136 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1140 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1142 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1143 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1144 int reg, rc, nr_regs;
1149 vcpu->stat.instruction_stctl++;
1151 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1152 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1154 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1157 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1159 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1160 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1165 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1168 reg = (reg + 1) % 16;
1170 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1171 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1174 static int handle_lctlg(struct kvm_vcpu *vcpu)
1176 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1177 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1178 int reg, rc, nr_regs;
1183 vcpu->stat.instruction_lctlg++;
1185 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1186 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1188 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1191 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1193 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1194 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1196 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1197 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1199 return kvm_s390_inject_prog_cond(vcpu, rc);
1203 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1206 reg = (reg + 1) % 16;
1208 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1212 static int handle_stctg(struct kvm_vcpu *vcpu)
1214 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1215 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1216 int reg, rc, nr_regs;
1221 vcpu->stat.instruction_stctg++;
1223 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1224 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1226 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1229 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1231 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1232 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1237 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1240 reg = (reg + 1) % 16;
1242 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1243 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1246 static const intercept_handler_t eb_handlers[256] = {
1247 [0x2f] = handle_lctlg,
1248 [0x25] = handle_stctg,
1254 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1256 intercept_handler_t handler;
1258 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
1260 return handler(vcpu);
1264 static int handle_tprot(struct kvm_vcpu *vcpu)
1266 u64 address1, address2;
1267 unsigned long hva, gpa;
1268 int ret = 0, cc = 0;
1272 vcpu->stat.instruction_tprot++;
1274 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1275 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1277 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1279 /* we only handle the Linux memory detection case:
1281 * everything else goes to userspace. */
1282 if (address2 & 0xf0)
1284 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1286 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1287 if (ret == PGM_PROTECTION) {
1288 /* Write protected? Try again with read-only... */
1290 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1294 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1295 ret = kvm_s390_inject_program_int(vcpu, ret);
1296 } else if (ret > 0) {
1297 /* Translation not available */
1298 kvm_s390_set_psw_cc(vcpu, 3);
1304 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1305 if (kvm_is_error_hva(hva)) {
1306 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1309 cc = 1; /* Write not permitted ==> read-only */
1310 kvm_s390_set_psw_cc(vcpu, cc);
1311 /* Note: CC2 only occurs for storage keys (not supported yet) */
1314 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1319 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1321 /* For e5xx... instructions we only handle TPROT */
1322 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
1323 return handle_tprot(vcpu);
1327 static int handle_sckpf(struct kvm_vcpu *vcpu)
1331 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1332 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1334 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1335 return kvm_s390_inject_program_int(vcpu,
1338 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1339 vcpu->arch.sie_block->todpr = value;
1344 static int handle_ptff(struct kvm_vcpu *vcpu)
1346 /* we don't emulate any control instructions yet */
1347 kvm_s390_set_psw_cc(vcpu, 3);
1351 static const intercept_handler_t x01_handlers[256] = {
1352 [0x04] = handle_ptff,
1353 [0x07] = handle_sckpf,
1356 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1358 intercept_handler_t handler;
1360 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1362 return handler(vcpu);