1 // SPDX-License-Identifier: GPL-2.0
3 * handling privileged instructions
5 * Copyright IBM Corp. 2008, 2018
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
11 #include <linux/kvm.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/compat.h>
15 #include <linux/mm_types.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/facility.h>
19 #include <asm/current.h>
20 #include <asm/debug.h>
21 #include <asm/ebcdic.h>
22 #include <asm/sysinfo.h>
23 #include <asm/pgtable.h>
24 #include <asm/page-states.h>
25 #include <asm/pgalloc.h>
28 #include <asm/ptrace.h>
29 #include <asm/compat.h>
35 static int handle_ri(struct kvm_vcpu *vcpu)
37 vcpu->stat.instruction_ri++;
39 if (test_kvm_facility(vcpu->kvm, 64)) {
40 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
41 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
42 kvm_s390_retry_instr(vcpu);
45 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
48 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
50 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
51 return handle_ri(vcpu);
56 static int handle_gs(struct kvm_vcpu *vcpu)
58 vcpu->stat.instruction_gs++;
60 if (test_kvm_facility(vcpu->kvm, 133)) {
61 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
64 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
65 restore_gs_cb(current->thread.gs_cb);
67 vcpu->arch.sie_block->ecb |= ECB_GS;
68 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
69 vcpu->arch.gs_enabled = 1;
70 kvm_s390_retry_instr(vcpu);
73 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
76 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
78 int code = vcpu->arch.sie_block->ipb & 0xff;
80 if (code == 0x49 || code == 0x4d)
81 return handle_gs(vcpu);
85 /* Handle SCK (SET CLOCK) interception */
86 static int handle_set_clock(struct kvm_vcpu *vcpu)
92 vcpu->stat.instruction_sck++;
94 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
95 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
97 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
98 if (op2 & 7) /* Operand must be on a doubleword boundary */
99 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
100 rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
102 return kvm_s390_inject_prog_cond(vcpu, rc);
104 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
105 kvm_s390_set_tod_clock(vcpu->kvm, val);
107 kvm_s390_set_psw_cc(vcpu, 0);
111 static int handle_set_prefix(struct kvm_vcpu *vcpu)
118 vcpu->stat.instruction_spx++;
120 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
121 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
123 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
125 /* must be word boundary */
127 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
130 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
132 return kvm_s390_inject_prog_cond(vcpu, rc);
134 address &= 0x7fffe000u;
137 * Make sure the new value is valid memory. We only need to check the
138 * first page, since address is 8k aligned and memory pieces are always
139 * at least 1MB aligned and have at least a size of 1MB.
141 if (kvm_is_error_gpa(vcpu->kvm, address))
142 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
144 kvm_s390_set_prefix(vcpu, address);
145 trace_kvm_s390_handle_prefix(vcpu, 1, address);
149 static int handle_store_prefix(struct kvm_vcpu *vcpu)
156 vcpu->stat.instruction_stpx++;
158 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
159 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
161 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
163 /* must be word boundary */
165 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
167 address = kvm_s390_get_prefix(vcpu);
170 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
172 return kvm_s390_inject_prog_cond(vcpu, rc);
174 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
175 trace_kvm_s390_handle_prefix(vcpu, 0, address);
179 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
181 u16 vcpu_id = vcpu->vcpu_id;
186 vcpu->stat.instruction_stap++;
188 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
189 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
191 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
194 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
196 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
198 return kvm_s390_inject_prog_cond(vcpu, rc);
200 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
201 trace_kvm_s390_handle_stap(vcpu, ga);
205 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
208 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
210 trace_kvm_s390_skey_related_inst(vcpu);
211 if (!(sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)) &&
212 !kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
215 rc = s390_enable_skey();
216 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
218 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
219 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
221 sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE |
227 static int try_handle_skey(struct kvm_vcpu *vcpu)
231 rc = kvm_s390_skey_check_enable(vcpu);
235 /* with storage-key facility, SIE interprets it for us */
236 kvm_s390_retry_instr(vcpu);
237 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
243 static int handle_iske(struct kvm_vcpu *vcpu)
250 vcpu->stat.instruction_iske++;
252 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
253 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
255 rc = try_handle_skey(vcpu);
257 return rc != -EAGAIN ? rc : 0;
259 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
261 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
262 addr = kvm_s390_logical_to_effective(vcpu, addr);
263 addr = kvm_s390_real_to_abs(vcpu, addr);
264 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
265 if (kvm_is_error_hva(addr))
266 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
268 down_read(¤t->mm->mmap_sem);
269 rc = get_guest_storage_key(current->mm, addr, &key);
270 up_read(¤t->mm->mmap_sem);
272 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
273 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
274 vcpu->run->s.regs.gprs[reg1] |= key;
278 static int handle_rrbe(struct kvm_vcpu *vcpu)
284 vcpu->stat.instruction_rrbe++;
286 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
287 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
289 rc = try_handle_skey(vcpu);
291 return rc != -EAGAIN ? rc : 0;
293 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
295 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
296 addr = kvm_s390_logical_to_effective(vcpu, addr);
297 addr = kvm_s390_real_to_abs(vcpu, addr);
298 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
299 if (kvm_is_error_hva(addr))
300 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
302 down_read(¤t->mm->mmap_sem);
303 rc = reset_guest_reference_bit(current->mm, addr);
304 up_read(¤t->mm->mmap_sem);
306 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
308 kvm_s390_set_psw_cc(vcpu, rc);
316 static int handle_sske(struct kvm_vcpu *vcpu)
318 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
319 unsigned long start, end;
320 unsigned char key, oldkey;
324 vcpu->stat.instruction_sske++;
326 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
327 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
329 rc = try_handle_skey(vcpu);
331 return rc != -EAGAIN ? rc : 0;
333 if (!test_kvm_facility(vcpu->kvm, 8))
335 if (!test_kvm_facility(vcpu->kvm, 10))
336 m3 &= ~(SSKE_MC | SSKE_MR);
337 if (!test_kvm_facility(vcpu->kvm, 14))
340 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
342 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
343 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
344 start = kvm_s390_logical_to_effective(vcpu, start);
346 /* start already designates an absolute address */
347 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
349 start = kvm_s390_real_to_abs(vcpu, start);
350 end = start + PAGE_SIZE;
353 while (start != end) {
354 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
356 if (kvm_is_error_hva(addr))
357 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
359 down_read(¤t->mm->mmap_sem);
360 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
361 m3 & SSKE_NQ, m3 & SSKE_MR,
363 up_read(¤t->mm->mmap_sem);
365 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
369 if (m3 & (SSKE_MC | SSKE_MR)) {
371 /* skey in reg1 is unpredictable */
372 kvm_s390_set_psw_cc(vcpu, 3);
374 kvm_s390_set_psw_cc(vcpu, rc);
375 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
376 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
380 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
381 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
383 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
384 end = kvm_s390_logical_to_effective(vcpu, end);
385 vcpu->run->s.regs.gprs[reg2] |= end;
390 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
392 vcpu->stat.instruction_ipte_interlock++;
393 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
394 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
395 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
396 kvm_s390_retry_instr(vcpu);
397 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
401 static int handle_test_block(struct kvm_vcpu *vcpu)
406 vcpu->stat.instruction_tb++;
408 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
409 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
411 kvm_s390_get_regs_rre(vcpu, NULL, ®2);
412 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
413 addr = kvm_s390_logical_to_effective(vcpu, addr);
414 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
415 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
416 addr = kvm_s390_real_to_abs(vcpu, addr);
418 if (kvm_is_error_gpa(vcpu->kvm, addr))
419 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
421 * We don't expect errors on modern systems, and do not care
422 * about storage keys (yet), so let's just clear the page.
424 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
426 kvm_s390_set_psw_cc(vcpu, 0);
427 vcpu->run->s.regs.gprs[0] = 0;
431 static int handle_tpi(struct kvm_vcpu *vcpu)
433 struct kvm_s390_interrupt_info *inti;
440 vcpu->stat.instruction_tpi++;
442 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
444 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
446 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
448 kvm_s390_set_psw_cc(vcpu, 0);
452 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
453 tpi_data[1] = inti->io.io_int_parm;
454 tpi_data[2] = inti->io.io_int_word;
457 * Store the two-word I/O interruption code into the
460 len = sizeof(tpi_data) - 4;
461 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
463 rc = kvm_s390_inject_prog_cond(vcpu, rc);
464 goto reinject_interrupt;
468 * Store the three-word I/O interruption code into
469 * the appropriate lowcore area.
471 len = sizeof(tpi_data);
472 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
473 /* failed writes to the low core are not recoverable */
475 goto reinject_interrupt;
479 /* irq was successfully handed to the guest */
481 kvm_s390_set_psw_cc(vcpu, 1);
485 * If we encounter a problem storing the interruption code, the
486 * instruction is suppressed from the guest's view: reinject the
489 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
493 /* don't set the cc, a pgm irq was injected or we drop to user space */
494 return rc ? -EFAULT : 0;
497 static int handle_tsch(struct kvm_vcpu *vcpu)
499 struct kvm_s390_interrupt_info *inti = NULL;
500 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
502 vcpu->stat.instruction_tsch++;
504 /* a valid schid has at least one bit set */
505 if (vcpu->run->s.regs.gprs[1])
506 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
507 vcpu->run->s.regs.gprs[1]);
510 * Prepare exit to userspace.
511 * We indicate whether we dequeued a pending I/O interrupt
512 * so that userspace can re-inject it if the instruction gets
513 * a program check. While this may re-order the pending I/O
514 * interrupts, this is no problem since the priority is kept
517 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
518 vcpu->run->s390_tsch.dequeued = !!inti;
520 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
521 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
522 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
523 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
525 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
530 static int handle_io_inst(struct kvm_vcpu *vcpu)
532 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
534 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
535 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
537 if (vcpu->kvm->arch.css_support) {
539 * Most I/O instructions will be handled by userspace.
540 * Exceptions are tpi and the interrupt portion of tsch.
542 if (vcpu->arch.sie_block->ipa == 0xb236)
543 return handle_tpi(vcpu);
544 if (vcpu->arch.sie_block->ipa == 0xb235)
545 return handle_tsch(vcpu);
546 /* Handle in userspace. */
547 vcpu->stat.instruction_io_other++;
551 * Set condition code 3 to stop the guest from issuing channel
554 kvm_s390_set_psw_cc(vcpu, 3);
559 static int handle_stfl(struct kvm_vcpu *vcpu)
564 vcpu->stat.instruction_stfl++;
566 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
567 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
570 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
571 * into a u32 memory representation. They will remain bits 0-31.
573 fac = *vcpu->kvm->arch.model.fac_list >> 32;
574 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
578 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
579 trace_kvm_s390_handle_stfl(vcpu, fac);
583 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
584 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
585 #define PSW_ADDR_24 0x0000000000ffffffUL
586 #define PSW_ADDR_31 0x000000007fffffffUL
588 int is_valid_psw(psw_t *psw)
590 if (psw->mask & PSW_MASK_UNASSIGNED)
592 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
593 if (psw->addr & ~PSW_ADDR_31)
596 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
598 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
605 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
607 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
608 psw_compat_t new_psw;
613 vcpu->stat.instruction_lpsw++;
615 if (gpsw->mask & PSW_MASK_PSTATE)
616 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
618 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
620 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
622 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
624 return kvm_s390_inject_prog_cond(vcpu, rc);
625 if (!(new_psw.mask & PSW32_MASK_BASE))
626 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
627 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
628 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
629 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
630 if (!is_valid_psw(gpsw))
631 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
635 static int handle_lpswe(struct kvm_vcpu *vcpu)
642 vcpu->stat.instruction_lpswe++;
644 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
645 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
647 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
649 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
650 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
652 return kvm_s390_inject_prog_cond(vcpu, rc);
653 vcpu->arch.sie_block->gpsw = new_psw;
654 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
655 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
659 static int handle_stidp(struct kvm_vcpu *vcpu)
661 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
666 vcpu->stat.instruction_stidp++;
668 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
669 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
671 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
674 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
676 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
678 return kvm_s390_inject_prog_cond(vcpu, rc);
680 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
684 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
689 cpus = atomic_read(&vcpu->kvm->online_vcpus);
691 /* deal with other level 3 hypervisors */
692 if (stsi(mem, 3, 2, 2))
696 for (n = mem->count - 1; n > 0 ; n--)
697 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
699 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
700 mem->vm[0].cpus_total = cpus;
701 mem->vm[0].cpus_configured = cpus;
702 mem->vm[0].cpus_standby = 0;
703 mem->vm[0].cpus_reserved = 0;
704 mem->vm[0].caf = 1000;
705 memcpy(mem->vm[0].name, "KVMguest", 8);
706 ASCEBC(mem->vm[0].name, 8);
707 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
708 ASCEBC(mem->vm[0].cpi, 16);
711 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
712 u8 fc, u8 sel1, u16 sel2)
714 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
715 vcpu->run->s390_stsi.addr = addr;
716 vcpu->run->s390_stsi.ar = ar;
717 vcpu->run->s390_stsi.fc = fc;
718 vcpu->run->s390_stsi.sel1 = sel1;
719 vcpu->run->s390_stsi.sel2 = sel2;
722 static int handle_stsi(struct kvm_vcpu *vcpu)
724 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
725 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
726 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
727 unsigned long mem = 0;
732 vcpu->stat.instruction_stsi++;
733 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
735 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
736 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
739 kvm_s390_set_psw_cc(vcpu, 3);
743 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
744 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
745 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
748 vcpu->run->s.regs.gprs[0] = 3 << 28;
749 kvm_s390_set_psw_cc(vcpu, 0);
753 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
755 if (operand2 & 0xfff)
756 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
759 case 1: /* same handling for 1 and 2 */
761 mem = get_zeroed_page(GFP_KERNEL);
764 if (stsi((void *) mem, fc, sel1, sel2))
768 if (sel1 != 2 || sel2 != 2)
770 mem = get_zeroed_page(GFP_KERNEL);
773 handle_stsi_3_2_2(vcpu, (void *) mem);
777 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
779 rc = kvm_s390_inject_prog_cond(vcpu, rc);
782 if (vcpu->kvm->arch.user_stsi) {
783 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
786 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
788 kvm_s390_set_psw_cc(vcpu, 0);
789 vcpu->run->s.regs.gprs[0] = 0;
792 kvm_s390_set_psw_cc(vcpu, 3);
798 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
800 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
802 return handle_stidp(vcpu);
804 return handle_set_clock(vcpu);
806 return handle_set_prefix(vcpu);
808 return handle_store_prefix(vcpu);
810 return handle_store_cpu_address(vcpu);
812 return kvm_s390_handle_vsie(vcpu);
815 return handle_ipte_interlock(vcpu);
817 return handle_iske(vcpu);
819 return handle_rrbe(vcpu);
821 return handle_sske(vcpu);
823 return handle_test_block(vcpu);
840 return handle_io_inst(vcpu);
842 return handle_sthyi(vcpu);
844 return handle_stsi(vcpu);
846 return handle_stfl(vcpu);
848 return handle_lpswe(vcpu);
854 static int handle_epsw(struct kvm_vcpu *vcpu)
858 vcpu->stat.instruction_epsw++;
860 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
862 /* This basically extracts the mask half of the psw. */
863 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
864 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
866 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
867 vcpu->run->s.regs.gprs[reg2] |=
868 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
873 #define PFMF_RESERVED 0xfffc0101UL
874 #define PFMF_SK 0x00020000UL
875 #define PFMF_CF 0x00010000UL
876 #define PFMF_UI 0x00008000UL
877 #define PFMF_FSC 0x00007000UL
878 #define PFMF_NQ 0x00000800UL
879 #define PFMF_MR 0x00000400UL
880 #define PFMF_MC 0x00000200UL
881 #define PFMF_KEY 0x000000feUL
883 static int handle_pfmf(struct kvm_vcpu *vcpu)
885 bool mr = false, mc = false, nq;
887 unsigned long start, end;
890 vcpu->stat.instruction_pfmf++;
892 kvm_s390_get_regs_rre(vcpu, ®1, ®2);
894 if (!test_kvm_facility(vcpu->kvm, 8))
895 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
897 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
898 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
900 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
901 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
903 /* Only provide non-quiescing support if enabled for the guest */
904 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
905 !test_kvm_facility(vcpu->kvm, 14))
906 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
908 /* Only provide conditional-SSKE support if enabled for the guest */
909 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
910 test_kvm_facility(vcpu->kvm, 10)) {
911 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
912 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
915 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
916 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
917 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
918 start = kvm_s390_logical_to_effective(vcpu, start);
920 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
921 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
922 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
925 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
927 /* only 4k frames specify a real address */
928 start = kvm_s390_real_to_abs(vcpu, start);
929 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
932 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
935 /* only support 2G frame size if EDAT2 is available and we are
936 not in 24-bit addressing mode */
937 if (!test_kvm_facility(vcpu->kvm, 78) ||
938 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
939 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
940 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
943 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
946 while (start != end) {
947 unsigned long useraddr;
949 /* Translate guest address to host address */
950 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
951 if (kvm_is_error_hva(useraddr))
952 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
954 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
955 if (clear_user((void __user *)useraddr, PAGE_SIZE))
956 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
959 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
960 int rc = kvm_s390_skey_check_enable(vcpu);
964 down_read(¤t->mm->mmap_sem);
965 rc = cond_set_guest_storage_key(current->mm, useraddr,
966 key, NULL, nq, mr, mc);
967 up_read(¤t->mm->mmap_sem);
969 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
974 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
975 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
976 vcpu->run->s.regs.gprs[reg2] = end;
978 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
979 end = kvm_s390_logical_to_effective(vcpu, end);
980 vcpu->run->s.regs.gprs[reg2] |= end;
986 static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
988 struct kvm_s390_migration_state *ms = vcpu->kvm->arch.migration_state;
989 int r1, r2, nappended, entries;
990 unsigned long gfn, hva, res, pgstev, ptev;
991 unsigned long *cbrlo;
994 * We don't need to set SD.FPF.SK to 1 here, because if we have a
995 * machine check here we either handle it or crash
998 kvm_s390_get_regs_rre(vcpu, &r1, &r2);
999 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
1000 hva = gfn_to_hva(vcpu->kvm, gfn);
1001 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1003 if (kvm_is_error_hva(hva))
1004 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1006 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
1007 if (nappended < 0) {
1008 res = orc ? 0x10 : 0;
1009 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
1012 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
1014 * Set the block-content state part of the result. 0 means resident, so
1015 * nothing to do if the page is valid. 2 is for preserved pages
1016 * (non-present and non-zero), and 3 for zero pages (non-present and
1019 if (ptev & _PAGE_INVALID) {
1021 if (pgstev & _PGSTE_GPS_ZERO)
1024 if (pgstev & _PGSTE_GPS_NODAT)
1026 vcpu->run->s.regs.gprs[r1] = res;
1028 * It is possible that all the normal 511 slots were full, in which case
1029 * we will now write in the 512th slot, which is reserved for host use.
1030 * In both cases we let the normal essa handling code process all the
1031 * slots, including the reserved one, if needed.
1033 if (nappended > 0) {
1034 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1035 cbrlo[entries] = gfn << PAGE_SHIFT;
1038 if (orc && gfn < ms->bitmap_size) {
1039 /* increment only if we are really flipping the bit to 1 */
1040 if (!test_and_set_bit(gfn, ms->pgste_bitmap))
1041 atomic64_inc(&ms->dirty_pages);
1047 static int handle_essa(struct kvm_vcpu *vcpu)
1049 /* entries expected to be 1FF */
1050 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1051 unsigned long *cbrlo;
1055 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1056 gmap = vcpu->arch.gmap;
1057 vcpu->stat.instruction_essa++;
1058 if (!vcpu->kvm->arch.use_cmma)
1059 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1061 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1062 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1063 /* Check for invalid operation request code */
1064 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1065 /* ORCs 0-6 are always valid */
1066 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1067 : ESSA_SET_STABLE_IF_RESIDENT))
1068 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1070 if (likely(!vcpu->kvm->arch.migration_state)) {
1072 * CMMA is enabled in the KVM settings, but is disabled in
1073 * the SIE block and in the mm_context, and we are not doing
1074 * a migration. Enable CMMA in the mm_context.
1075 * Since we need to take a write lock to write to the context
1076 * to avoid races with storage keys handling, we check if the
1077 * value really needs to be written to; if the value is
1078 * already correct, we do nothing and avoid the lock.
1080 if (vcpu->kvm->mm->context.use_cmma == 0) {
1081 down_write(&vcpu->kvm->mm->mmap_sem);
1082 vcpu->kvm->mm->context.use_cmma = 1;
1083 up_write(&vcpu->kvm->mm->mmap_sem);
1086 * If we are here, we are supposed to have CMMA enabled in
1087 * the SIE block. Enabling CMMA works on a per-CPU basis,
1088 * while the context use_cmma flag is per process.
1089 * It's possible that the context flag is enabled and the
1090 * SIE flag is not, so we set the flag always; if it was
1091 * already set, nothing changes, otherwise we enable it
1094 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1095 /* Retry the ESSA instruction */
1096 kvm_s390_retry_instr(vcpu);
1098 /* Account for the possible extra cbrl entry */
1099 i = do_essa(vcpu, orc);
1104 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
1105 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1106 down_read(&gmap->mm->mmap_sem);
1107 for (i = 0; i < entries; ++i)
1108 __gmap_zap(gmap, cbrlo[i]);
1109 up_read(&gmap->mm->mmap_sem);
1113 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1115 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1119 return handle_ipte_interlock(vcpu);
1121 return handle_epsw(vcpu);
1123 return handle_essa(vcpu);
1125 return handle_pfmf(vcpu);
1131 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1133 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1134 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1135 int reg, rc, nr_regs;
1140 vcpu->stat.instruction_lctl++;
1142 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1143 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1145 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1148 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1150 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1151 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1153 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1154 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1156 return kvm_s390_inject_prog_cond(vcpu, rc);
1160 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1161 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1164 reg = (reg + 1) % 16;
1166 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1170 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1172 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1173 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1174 int reg, rc, nr_regs;
1179 vcpu->stat.instruction_stctl++;
1181 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1182 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1184 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1187 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1189 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1190 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1195 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1198 reg = (reg + 1) % 16;
1200 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1201 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1204 static int handle_lctlg(struct kvm_vcpu *vcpu)
1206 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1207 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1208 int reg, rc, nr_regs;
1213 vcpu->stat.instruction_lctlg++;
1215 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1216 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1218 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1221 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1223 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1224 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1226 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1227 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1229 return kvm_s390_inject_prog_cond(vcpu, rc);
1233 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1236 reg = (reg + 1) % 16;
1238 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1242 static int handle_stctg(struct kvm_vcpu *vcpu)
1244 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1245 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1246 int reg, rc, nr_regs;
1251 vcpu->stat.instruction_stctg++;
1253 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1254 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1256 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1259 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1261 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1262 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1267 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1270 reg = (reg + 1) % 16;
1272 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1273 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1276 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1278 switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
1280 return handle_stctg(vcpu);
1282 return handle_lctlg(vcpu);
1286 return handle_ri(vcpu);
1292 static int handle_tprot(struct kvm_vcpu *vcpu)
1294 u64 address1, address2;
1295 unsigned long hva, gpa;
1296 int ret = 0, cc = 0;
1300 vcpu->stat.instruction_tprot++;
1302 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1303 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1305 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1307 /* we only handle the Linux memory detection case:
1309 * everything else goes to userspace. */
1310 if (address2 & 0xf0)
1312 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1314 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1315 if (ret == PGM_PROTECTION) {
1316 /* Write protected? Try again with read-only... */
1318 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1322 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1323 ret = kvm_s390_inject_program_int(vcpu, ret);
1324 } else if (ret > 0) {
1325 /* Translation not available */
1326 kvm_s390_set_psw_cc(vcpu, 3);
1332 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1333 if (kvm_is_error_hva(hva)) {
1334 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1337 cc = 1; /* Write not permitted ==> read-only */
1338 kvm_s390_set_psw_cc(vcpu, cc);
1339 /* Note: CC2 only occurs for storage keys (not supported yet) */
1342 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1347 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1349 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1351 return handle_tprot(vcpu);
1357 static int handle_sckpf(struct kvm_vcpu *vcpu)
1361 vcpu->stat.instruction_sckpf++;
1363 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1364 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1366 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1367 return kvm_s390_inject_program_int(vcpu,
1370 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1371 vcpu->arch.sie_block->todpr = value;
1376 static int handle_ptff(struct kvm_vcpu *vcpu)
1378 vcpu->stat.instruction_ptff++;
1380 /* we don't emulate any control instructions yet */
1381 kvm_s390_set_psw_cc(vcpu, 3);
1385 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1387 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1389 return handle_ptff(vcpu);
1391 return handle_sckpf(vcpu);