Merge tag 'loongarch-kvm-6.14' of git://git.kernel.org/pub/scm/linux/kernel/git/chenh...
authorPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Jan 2025 16:51:56 +0000 (11:51 -0500)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Jan 2025 16:51:56 +0000 (11:51 -0500)
LoongArch KVM changes for v6.14

1. Clear LLBCTL if secondary mmu mapping changed.
2. Add hypercall service support for usermode VMM.

This is a really small changeset, because the Chinese New Year
(Spring Festival) is coming. Happy New Year!

1  2 
MAINTAINERS
tools/testing/selftests/kvm/arm64/set_id_regs.c
tools/testing/selftests/kvm/s390/ucontrol_test.c

diff --cc MAINTAINERS
Simple merge
index bc6cf50e51358903d52f6414e841298b1773a76a,0000000000000000000000000000000000000000..3dd85ce8551c6a1811757f4e950d60ef3b5fc3cc
mode 100644,000000..100644
--- /dev/null
@@@ -1,695 -1,0 +1,694 @@@
-       REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
 +// SPDX-License-Identifier: GPL-2.0-only
 +/*
 + * set_id_regs - Test for setting ID register from usersapce.
 + *
 + * Copyright (c) 2023 Google LLC.
 + *
 + *
 + * Test that KVM supports setting ID registers from userspace and handles the
 + * feature set correctly.
 + */
 +
 +#include <stdint.h>
 +#include "kvm_util.h"
 +#include "processor.h"
 +#include "test_util.h"
 +#include <linux/bitfield.h>
 +
 +enum ftr_type {
 +      FTR_EXACT,                      /* Use a predefined safe value */
 +      FTR_LOWER_SAFE,                 /* Smaller value is safe */
 +      FTR_HIGHER_SAFE,                /* Bigger value is safe */
 +      FTR_HIGHER_OR_ZERO_SAFE,        /* Bigger value is safe, but 0 is biggest */
 +      FTR_END,                        /* Mark the last ftr bits */
 +};
 +
 +#define FTR_SIGNED    true    /* Value should be treated as signed */
 +#define FTR_UNSIGNED  false   /* Value should be treated as unsigned */
 +
 +struct reg_ftr_bits {
 +      char *name;
 +      bool sign;
 +      enum ftr_type type;
 +      uint8_t shift;
 +      uint64_t mask;
 +      /*
 +       * For FTR_EXACT, safe_val is used as the exact safe value.
 +       * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
 +       */
 +      int64_t safe_val;
 +};
 +
 +struct test_feature_reg {
 +      uint32_t reg;
 +      const struct reg_ftr_bits *ftr_bits;
 +};
 +
 +#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL)     \
 +      {                                                               \
 +              .name = #NAME,                                          \
 +              .sign = SIGNED,                                         \
 +              .type = TYPE,                                           \
 +              .shift = SHIFT,                                         \
 +              .mask = MASK,                                           \
 +              .safe_val = SAFE_VAL,                                   \
 +      }
 +
 +#define REG_FTR_BITS(type, reg, field, safe_val) \
 +      __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
 +                     reg##_##field##_MASK, safe_val)
 +
 +#define S_REG_FTR_BITS(type, reg, field, safe_val) \
 +      __REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
 +                     reg##_##field##_MASK, safe_val)
 +
 +#define REG_FTR_END                                   \
 +      {                                               \
 +              .type = FTR_END,                        \
 +      }
 +
 +static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DoubleLock, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, WRPs, 0),
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, ID_AA64DFR0_EL1_DebugVer_IMP),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, ID_DFR0_EL1_PerfMon_PMUv3),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, ID_DFR0_EL1_CopDbg_Armv8),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
 +      REG_FTR_BITS(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64pfr1_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, CSV2_frac, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, SSBS, ID_AA64PFR1_EL1_SSBS_NI),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, BT, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),
 +      REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0),
 +      REG_FTR_END,
 +};
 +
 +#define TEST_REG(id, table)                   \
 +      {                                       \
 +              .reg = id,                      \
 +              .ftr_bits = &((table)[0]),      \
 +      }
 +
 +static struct test_feature_reg test_regs[] = {
 +      TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1),
 +      TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1),
 +      TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),
 +      TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
 +      TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
 +      TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
 +      TEST_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1_el1),
 +      TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
 +      TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
 +      TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
 +      TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),
 +};
 +
 +#define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0);
 +
 +static void guest_code(void)
 +{
 +      GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_DFR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
 +      GUEST_REG_SYNC(SYS_CTR_EL0);
 +
 +      GUEST_DONE();
 +}
 +
 +/* Return a safe value to a given ftr_bits an ftr value */
 +uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
 +{
 +      uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
 +
 +      if (ftr_bits->sign == FTR_UNSIGNED) {
 +              switch (ftr_bits->type) {
 +              case FTR_EXACT:
 +                      ftr = ftr_bits->safe_val;
 +                      break;
 +              case FTR_LOWER_SAFE:
 +                      if (ftr > ftr_bits->safe_val)
 +                              ftr--;
 +                      break;
 +              case FTR_HIGHER_SAFE:
 +                      if (ftr < ftr_max)
 +                              ftr++;
 +                      break;
 +              case FTR_HIGHER_OR_ZERO_SAFE:
 +                      if (ftr == ftr_max)
 +                              ftr = 0;
 +                      else if (ftr != 0)
 +                              ftr++;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      } else if (ftr != ftr_max) {
 +              switch (ftr_bits->type) {
 +              case FTR_EXACT:
 +                      ftr = ftr_bits->safe_val;
 +                      break;
 +              case FTR_LOWER_SAFE:
 +                      if (ftr > ftr_bits->safe_val)
 +                              ftr--;
 +                      break;
 +              case FTR_HIGHER_SAFE:
 +                      if (ftr < ftr_max - 1)
 +                              ftr++;
 +                      break;
 +              case FTR_HIGHER_OR_ZERO_SAFE:
 +                      if (ftr != 0 && ftr != ftr_max - 1)
 +                              ftr++;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      }
 +
 +      return ftr;
 +}
 +
 +/* Return an invalid value to a given ftr_bits an ftr value */
 +uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
 +{
 +      uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
 +
 +      if (ftr_bits->sign == FTR_UNSIGNED) {
 +              switch (ftr_bits->type) {
 +              case FTR_EXACT:
 +                      ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
 +                      break;
 +              case FTR_LOWER_SAFE:
 +                      ftr++;
 +                      break;
 +              case FTR_HIGHER_SAFE:
 +                      ftr--;
 +                      break;
 +              case FTR_HIGHER_OR_ZERO_SAFE:
 +                      if (ftr == 0)
 +                              ftr = ftr_max;
 +                      else
 +                              ftr--;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      } else if (ftr != ftr_max) {
 +              switch (ftr_bits->type) {
 +              case FTR_EXACT:
 +                      ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
 +                      break;
 +              case FTR_LOWER_SAFE:
 +                      ftr++;
 +                      break;
 +              case FTR_HIGHER_SAFE:
 +                      ftr--;
 +                      break;
 +              case FTR_HIGHER_OR_ZERO_SAFE:
 +                      if (ftr == 0)
 +                              ftr = ftr_max - 1;
 +                      else
 +                              ftr--;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      } else {
 +              ftr = 0;
 +      }
 +
 +      return ftr;
 +}
 +
 +static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
 +                                   const struct reg_ftr_bits *ftr_bits)
 +{
 +      uint8_t shift = ftr_bits->shift;
 +      uint64_t mask = ftr_bits->mask;
 +      uint64_t val, new_val, ftr;
 +
 +      val = vcpu_get_reg(vcpu, reg);
 +      ftr = (val & mask) >> shift;
 +
 +      ftr = get_safe_value(ftr_bits, ftr);
 +
 +      ftr <<= shift;
 +      val &= ~mask;
 +      val |= ftr;
 +
 +      vcpu_set_reg(vcpu, reg, val);
 +      new_val = vcpu_get_reg(vcpu, reg);
 +      TEST_ASSERT_EQ(new_val, val);
 +
 +      return new_val;
 +}
 +
 +static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
 +                            const struct reg_ftr_bits *ftr_bits)
 +{
 +      uint8_t shift = ftr_bits->shift;
 +      uint64_t mask = ftr_bits->mask;
 +      uint64_t val, old_val, ftr;
 +      int r;
 +
 +      val = vcpu_get_reg(vcpu, reg);
 +      ftr = (val & mask) >> shift;
 +
 +      ftr = get_invalid_value(ftr_bits, ftr);
 +
 +      old_val = val;
 +      ftr <<= shift;
 +      val &= ~mask;
 +      val |= ftr;
 +
 +      r = __vcpu_set_reg(vcpu, reg, val);
 +      TEST_ASSERT(r < 0 && errno == EINVAL,
 +                  "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
 +
 +      val = vcpu_get_reg(vcpu, reg);
 +      TEST_ASSERT_EQ(val, old_val);
 +}
 +
 +static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
 +
 +#define encoding_to_range_idx(encoding)                                                       \
 +      KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding),      \
 +                                   sys_reg_CRn(encoding), sys_reg_CRm(encoding),      \
 +                                   sys_reg_Op2(encoding))
 +
 +
 +static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
 +{
 +      uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
 +      struct reg_mask_range range = {
 +              .addr = (__u64)masks,
 +      };
 +      int ret;
 +
 +      /* KVM should return error when reserved field is not zero */
 +      range.reserved[0] = 1;
 +      ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
 +      TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
 +
 +      /* Get writable masks for feature ID registers */
 +      memset(range.reserved, 0, sizeof(range.reserved));
 +      vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
 +
 +      for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
 +              const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
 +              uint32_t reg_id = test_regs[i].reg;
 +              uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
 +              int idx;
 +
 +              /* Get the index to masks array for the idreg */
 +              idx = encoding_to_range_idx(reg_id);
 +
 +              for (int j = 0;  ftr_bits[j].type != FTR_END; j++) {
 +                      /* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */
 +                      if (aarch64_only && sys_reg_CRm(reg_id) < 4) {
 +                              ksft_test_result_skip("%s on AARCH64 only system\n",
 +                                                    ftr_bits[j].name);
 +                              continue;
 +                      }
 +
 +                      /* Make sure the feature field is writable */
 +                      TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
 +
 +                      test_reg_set_fail(vcpu, reg, &ftr_bits[j]);
 +
 +                      test_reg_vals[idx] = test_reg_set_success(vcpu, reg,
 +                                                                &ftr_bits[j]);
 +
 +                      ksft_test_result_pass("%s\n", ftr_bits[j].name);
 +              }
 +      }
 +}
 +
 +#define MPAM_IDREG_TEST       6
 +static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
 +{
 +      uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
 +      struct reg_mask_range range = {
 +              .addr = (__u64)masks,
 +      };
 +      uint64_t val;
 +      int idx, err;
 +
 +      /*
 +       * If ID_AA64PFR0.MPAM is _not_ officially modifiable and is zero,
 +       * check that if it can be set to 1, (i.e. it is supported by the
 +       * hardware), that it can't be set to other values.
 +       */
 +
 +      /* Get writable masks for feature ID registers */
 +      memset(range.reserved, 0, sizeof(range.reserved));
 +      vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
 +
 +      /* Writeable? Nothing to test! */
 +      idx = encoding_to_range_idx(SYS_ID_AA64PFR0_EL1);
 +      if ((masks[idx] & ID_AA64PFR0_EL1_MPAM_MASK) == ID_AA64PFR0_EL1_MPAM_MASK) {
 +              ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is officially writable, nothing to test\n");
 +              return;
 +      }
 +
 +      /* Get the id register value */
 +      val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
 +
 +      /* Try to set MPAM=0. This should always be possible. */
 +      val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 0);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
 +      if (err)
 +              ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM=0 was not accepted\n");
 +      else
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=0 worked\n");
 +
 +      /* Try to set MPAM=1 */
 +      val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 1);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
 +      if (err)
 +              ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is not writable, nothing to test\n");
 +      else
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=1 was writable\n");
 +
 +      /* Try to set MPAM=2 */
 +      val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 2);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
 +      if (err)
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM not arbitrarily modifiable\n");
 +      else
 +              ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM value should not be ignored\n");
 +
 +      /* And again for ID_AA64PFR1_EL1.MPAM_frac */
 +      idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1);
 +      if ((masks[idx] & ID_AA64PFR1_EL1_MPAM_frac_MASK) == ID_AA64PFR1_EL1_MPAM_frac_MASK) {
 +              ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is officially writable, nothing to test\n");
 +              return;
 +      }
 +
 +      /* Get the id register value */
 +      val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
 +
 +      /* Try to set MPAM_frac=0. This should always be possible. */
 +      val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 0);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
 +      if (err)
 +              ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM_frac=0 was not accepted\n");
 +      else
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=0 worked\n");
 +
 +      /* Try to set MPAM_frac=1 */
 +      val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 1);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
 +      if (err)
 +              ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is not writable, nothing to test\n");
 +      else
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=1 was writable\n");
 +
 +      /* Try to set MPAM_frac=2 */
 +      val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 2);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
 +      if (err)
 +              ksft_test_result_pass("ID_AA64PFR1_EL1.MPAM_frac not arbitrarily modifiable\n");
 +      else
 +              ksft_test_result_fail("ID_AA64PFR1_EL1.MPAM_frac value should not be ignored\n");
 +}
 +
 +static void test_guest_reg_read(struct kvm_vcpu *vcpu)
 +{
 +      bool done = false;
 +      struct ucall uc;
 +
 +      while (!done) {
 +              vcpu_run(vcpu);
 +
 +              switch (get_ucall(vcpu, &uc)) {
 +              case UCALL_ABORT:
 +                      REPORT_GUEST_ASSERT(uc);
 +                      break;
 +              case UCALL_SYNC:
 +                      /* Make sure the written values are seen by guest */
 +                      TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])],
 +                                     uc.args[3]);
 +                      break;
 +              case UCALL_DONE:
 +                      done = true;
 +                      break;
 +              default:
 +                      TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
 +              }
 +      }
 +}
 +
 +/* Politely lifted from arch/arm64/include/asm/cache.h */
 +/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
 +#define CLIDR_CTYPE_SHIFT(level)      (3 * (level - 1))
 +#define CLIDR_CTYPE_MASK(level)               (7 << CLIDR_CTYPE_SHIFT(level))
 +#define CLIDR_CTYPE(clidr, level)     \
 +      (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
 +
 +static void test_clidr(struct kvm_vcpu *vcpu)
 +{
 +      uint64_t clidr;
 +      int level;
 +
 +      clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1));
 +
 +      /* find the first empty level in the cache hierarchy */
 +      for (level = 1; level < 7; level++) {
 +              if (!CLIDR_CTYPE(clidr, level))
 +                      break;
 +      }
 +
 +      /*
 +       * If you have a mind-boggling 7 levels of cache, congratulations, you
 +       * get to fix this.
 +       */
 +      TEST_ASSERT(level <= 7, "can't find an empty level in cache hierarchy");
 +
 +      /* stick in a unified cache level */
 +      clidr |= BIT(2) << CLIDR_CTYPE_SHIFT(level);
 +
 +      vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), clidr);
 +      test_reg_vals[encoding_to_range_idx(SYS_CLIDR_EL1)] = clidr;
 +}
 +
 +static void test_ctr(struct kvm_vcpu *vcpu)
 +{
 +      u64 ctr;
 +
 +      ctr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0));
 +      ctr &= ~CTR_EL0_DIC_MASK;
 +      if (ctr & CTR_EL0_IminLine_MASK)
 +              ctr--;
 +
 +      vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), ctr);
 +      test_reg_vals[encoding_to_range_idx(SYS_CTR_EL0)] = ctr;
 +}
 +
 +static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
 +{
 +      u64 val;
 +
 +      test_clidr(vcpu);
 +      test_ctr(vcpu);
 +
 +      val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1));
 +      val++;
 +      vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val);
 +
 +      test_reg_vals[encoding_to_range_idx(SYS_MPIDR_EL1)] = val;
 +      ksft_test_result_pass("%s\n", __func__);
 +}
 +
 +static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding)
 +{
 +      size_t idx = encoding_to_range_idx(encoding);
 +      uint64_t observed;
 +
 +      observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding));
 +      TEST_ASSERT_EQ(test_reg_vals[idx], observed);
 +}
 +
 +static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
 +{
 +      /*
 +       * Calls KVM_ARM_VCPU_INIT behind the scenes, which will do an
 +       * architectural reset of the vCPU.
 +       */
 +      aarch64_vcpu_setup(vcpu, NULL);
 +
 +      for (int i = 0; i < ARRAY_SIZE(test_regs); i++)
 +              test_assert_id_reg_unchanged(vcpu, test_regs[i].reg);
 +
 +      test_assert_id_reg_unchanged(vcpu, SYS_MPIDR_EL1);
 +      test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);
 +      test_assert_id_reg_unchanged(vcpu, SYS_CTR_EL0);
 +
 +      ksft_test_result_pass("%s\n", __func__);
 +}
 +
 +int main(void)
 +{
 +      struct kvm_vcpu *vcpu;
 +      struct kvm_vm *vm;
 +      bool aarch64_only;
 +      uint64_t val, el0;
 +      int test_cnt;
 +
 +      TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
 +
 +      vm = vm_create_with_one_vcpu(&vcpu, guest_code);
 +
 +      /* Check for AARCH64 only system */
 +      val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
 +      el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
 +      aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
 +
 +      ksft_print_header();
 +
 +      test_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 2 +
 +                 MPAM_IDREG_TEST;
 +
 +      ksft_set_plan(test_cnt);
 +
 +      test_vm_ftr_id_regs(vcpu, aarch64_only);
 +      test_vcpu_ftr_id_regs(vcpu);
 +      test_user_set_mpam_reg(vcpu);
 +
 +      test_guest_reg_read(vcpu);
 +
 +      test_reset_preserves_id_regs(vcpu);
 +
 +      kvm_vm_free(vm);
 +
 +      ksft_finished();
 +}
index 0c112319dab1ea127e616473eaa064a9758d96a7,0000000000000000000000000000000000000000..135ee22856cf1a4c57b2967cdf69c4cfe6daf987
mode 100644,000000..100644
--- /dev/null
@@@ -1,638 -1,0 +1,808 @@@
-               .addr = (unsigned long)&limit,
 +// SPDX-License-Identifier: GPL-2.0-only
 +/*
 + * Test code for the s390x kvm ucontrol interface
 + *
 + * Copyright IBM Corp. 2024
 + *
 + * Authors:
 + *  Christoph Schlameuss <schlameuss@linux.ibm.com>
 + */
 +#include "debug_print.h"
 +#include "kselftest_harness.h"
 +#include "kvm_util.h"
 +#include "processor.h"
 +#include "sie.h"
 +
 +#include <linux/capability.h>
 +#include <linux/sizes.h>
 +
 +#define PGM_SEGMENT_TRANSLATION 0x10
 +
 +#define VM_MEM_SIZE (4 * SZ_1M)
 +#define VM_MEM_EXT_SIZE (2 * SZ_1M)
 +#define VM_MEM_MAX_M ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M)
 +
 +/* so directly declare capget to check caps without libcap */
 +int capget(cap_user_header_t header, cap_user_data_t data);
 +
 +/**
 + * In order to create user controlled virtual machines on S390,
 + * check KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL
 + * as privileged user (SYS_ADMIN).
 + */
 +void require_ucontrol_admin(void)
 +{
 +      struct __user_cap_data_struct data[_LINUX_CAPABILITY_U32S_3];
 +      struct __user_cap_header_struct hdr = {
 +              .version = _LINUX_CAPABILITY_VERSION_3,
 +      };
 +      int rc;
 +
 +      rc = capget(&hdr, data);
 +      TEST_ASSERT_EQ(0, rc);
 +      TEST_REQUIRE((data->effective & CAP_TO_MASK(CAP_SYS_ADMIN)) > 0);
 +
 +      TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_UCONTROL));
 +}
 +
 +/* Test program setting some registers and looping */
 +extern char test_gprs_asm[];
 +asm("test_gprs_asm:\n"
 +      "xgr    %r0, %r0\n"
 +      "lgfi   %r1,1\n"
 +      "lgfi   %r2,2\n"
 +      "lgfi   %r3,3\n"
 +      "lgfi   %r4,4\n"
 +      "lgfi   %r5,5\n"
 +      "lgfi   %r6,6\n"
 +      "lgfi   %r7,7\n"
 +      "0:\n"
 +      "       diag    0,0,0x44\n"
 +      "       ahi     %r0,1\n"
 +      "       j       0b\n"
 +);
 +
 +/* Test program manipulating memory */
 +extern char test_mem_asm[];
 +asm("test_mem_asm:\n"
 +      "xgr    %r0, %r0\n"
 +
 +      "0:\n"
 +      "       ahi     %r0,1\n"
 +      "       st      %r1,0(%r5,%r6)\n"
 +
 +      "       xgr     %r1,%r1\n"
 +      "       l       %r1,0(%r5,%r6)\n"
 +      "       ahi     %r0,1\n"
 +      "       diag    0,0,0x44\n"
 +
 +      "       j       0b\n"
 +);
 +
 +/* Test program manipulating storage keys */
 +extern char test_skey_asm[];
 +asm("test_skey_asm:\n"
 +      "xgr    %r0, %r0\n"
 +
 +      "0:\n"
 +      "       ahi     %r0,1\n"
 +      "       st      %r1,0(%r5,%r6)\n"
 +
 +      "       iske    %r1,%r6\n"
 +      "       ahi     %r0,1\n"
 +      "       diag    0,0,0x44\n"
 +
 +      "       sske    %r1,%r6\n"
 +      "       xgr     %r1,%r1\n"
 +      "       iske    %r1,%r6\n"
 +      "       ahi     %r0,1\n"
 +      "       diag    0,0,0x44\n"
 +
 +      "       rrbe    %r1,%r6\n"
 +      "       iske    %r1,%r6\n"
 +      "       ahi     %r0,1\n"
 +      "       diag    0,0,0x44\n"
 +
 +      "       j       0b\n"
 +);
 +
 +FIXTURE(uc_kvm)
 +{
 +      struct kvm_s390_sie_block *sie_block;
 +      struct kvm_run *run;
 +      uintptr_t base_gpa;
 +      uintptr_t code_gpa;
 +      uintptr_t base_hva;
 +      uintptr_t code_hva;
 +      int kvm_run_size;
 +      vm_paddr_t pgd;
 +      void *vm_mem;
 +      int vcpu_fd;
 +      int kvm_fd;
 +      int vm_fd;
 +};
 +
 +/**
 + * create VM with single vcpu, map kvm_run and SIE control block for easy access
 + */
 +FIXTURE_SETUP(uc_kvm)
 +{
 +      struct kvm_s390_vm_cpu_processor info;
 +      int rc;
 +
 +      require_ucontrol_admin();
 +
 +      self->kvm_fd = open_kvm_dev_path_or_exit();
 +      self->vm_fd = ioctl(self->kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
 +      ASSERT_GE(self->vm_fd, 0);
 +
 +      kvm_device_attr_get(self->vm_fd, KVM_S390_VM_CPU_MODEL,
 +                          KVM_S390_VM_CPU_PROCESSOR, &info);
 +      TH_LOG("create VM 0x%llx", info.cpuid);
 +
 +      self->vcpu_fd = ioctl(self->vm_fd, KVM_CREATE_VCPU, 0);
 +      ASSERT_GE(self->vcpu_fd, 0);
 +
 +      self->kvm_run_size = ioctl(self->kvm_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
 +      ASSERT_GE(self->kvm_run_size, sizeof(struct kvm_run))
 +                TH_LOG(KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, self->kvm_run_size));
 +      self->run = (struct kvm_run *)mmap(NULL, self->kvm_run_size,
 +                  PROT_READ | PROT_WRITE, MAP_SHARED, self->vcpu_fd, 0);
 +      ASSERT_NE(self->run, MAP_FAILED);
 +      /**
 +       * For virtual cpus that have been created with S390 user controlled
 +       * virtual machines, the resulting vcpu fd can be memory mapped at page
 +       * offset KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of
 +       * the virtual cpu's hardware control block.
 +       */
 +      self->sie_block = (struct kvm_s390_sie_block *)mmap(NULL, PAGE_SIZE,
 +                        PROT_READ | PROT_WRITE, MAP_SHARED,
 +                        self->vcpu_fd, KVM_S390_SIE_PAGE_OFFSET << PAGE_SHIFT);
 +      ASSERT_NE(self->sie_block, MAP_FAILED);
 +
 +      TH_LOG("VM created %p %p", self->run, self->sie_block);
 +
 +      self->base_gpa = 0;
 +      self->code_gpa = self->base_gpa + (3 * SZ_1M);
 +
 +      self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_MAX_M * SZ_1M);
 +      ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno);
 +      self->base_hva = (uintptr_t)self->vm_mem;
 +      self->code_hva = self->base_hva - self->base_gpa + self->code_gpa;
 +      struct kvm_s390_ucas_mapping map = {
 +              .user_addr = self->base_hva,
 +              .vcpu_addr = self->base_gpa,
 +              .length = VM_MEM_SIZE,
 +      };
 +      TH_LOG("ucas map %p %p 0x%llx",
 +             (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
 +      rc = ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
 +      ASSERT_EQ(0, rc) TH_LOG("ucas map result %d not expected, %s",
 +                              rc, strerror(errno));
 +
 +      TH_LOG("page in %p", (void *)self->base_gpa);
 +      rc = ioctl(self->vcpu_fd, KVM_S390_VCPU_FAULT, self->base_gpa);
 +      ASSERT_EQ(0, rc) TH_LOG("vcpu fault (%p) result %d not expected, %s",
 +                              (void *)self->base_hva, rc, strerror(errno));
 +
 +      self->sie_block->cpuflags &= ~CPUSTAT_STOPPED;
 +}
 +
 +FIXTURE_TEARDOWN(uc_kvm)
 +{
 +      munmap(self->sie_block, PAGE_SIZE);
 +      munmap(self->run, self->kvm_run_size);
 +      close(self->vcpu_fd);
 +      close(self->vm_fd);
 +      close(self->kvm_fd);
 +      free(self->vm_mem);
 +}
 +
 +TEST_F(uc_kvm, uc_sie_assertions)
 +{
 +      /* assert interception of Code 08 (Program Interruption) is set */
 +      EXPECT_EQ(0, self->sie_block->ecb & ECB_SPECI);
 +}
 +
 +TEST_F(uc_kvm, uc_attr_mem_limit)
 +{
 +      u64 limit;
 +      struct kvm_device_attr attr = {
 +              .group = KVM_S390_VM_MEM_CTRL,
 +              .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
++              .addr = (u64)&limit,
 +      };
 +      int rc;
 +
++      rc = ioctl(self->vm_fd, KVM_HAS_DEVICE_ATTR, &attr);
++      EXPECT_EQ(0, rc);
++
 +      rc = ioctl(self->vm_fd, KVM_GET_DEVICE_ATTR, &attr);
 +      EXPECT_EQ(0, rc);
 +      EXPECT_EQ(~0UL, limit);
 +
 +      /* assert set not supported */
 +      rc = ioctl(self->vm_fd, KVM_SET_DEVICE_ATTR, &attr);
 +      EXPECT_EQ(-1, rc);
 +      EXPECT_EQ(EINVAL, errno);
 +}
 +
 +TEST_F(uc_kvm, uc_no_dirty_log)
 +{
 +      struct kvm_dirty_log dlog;
 +      int rc;
 +
 +      rc = ioctl(self->vm_fd, KVM_GET_DIRTY_LOG, &dlog);
 +      EXPECT_EQ(-1, rc);
 +      EXPECT_EQ(EINVAL, errno);
 +}
 +
 +/**
 + * Assert HPAGE CAP cannot be enabled on UCONTROL VM
 + */
 +TEST(uc_cap_hpage)
 +{
 +      int rc, kvm_fd, vm_fd, vcpu_fd;
 +      struct kvm_enable_cap cap = {
 +              .cap = KVM_CAP_S390_HPAGE_1M,
 +      };
 +
 +      require_ucontrol_admin();
 +
 +      kvm_fd = open_kvm_dev_path_or_exit();
 +      vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, KVM_VM_S390_UCONTROL);
 +      ASSERT_GE(vm_fd, 0);
 +
 +      /* assert hpages are not supported on ucontrol vm */
 +      rc = ioctl(vm_fd, KVM_CHECK_EXTENSION, KVM_CAP_S390_HPAGE_1M);
 +      EXPECT_EQ(0, rc);
 +
 +      /* Test that KVM_CAP_S390_HPAGE_1M can't be enabled for a ucontrol vm */
 +      rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
 +      EXPECT_EQ(-1, rc);
 +      EXPECT_EQ(EINVAL, errno);
 +
 +      /* assert HPAGE CAP is rejected after vCPU creation */
 +      vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
 +      ASSERT_GE(vcpu_fd, 0);
 +      rc = ioctl(vm_fd, KVM_ENABLE_CAP, cap);
 +      EXPECT_EQ(-1, rc);
 +      EXPECT_EQ(EBUSY, errno);
 +
 +      close(vcpu_fd);
 +      close(vm_fd);
 +      close(kvm_fd);
 +}
 +
 +/* calculate host virtual addr from guest physical addr */
 +static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa)
 +{
 +      return (void *)(self->base_hva - self->base_gpa + gpa);
 +}
 +
 +/* map / make additional memory available */
 +static int uc_map_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
 +{
 +      struct kvm_s390_ucas_mapping map = {
 +              .user_addr = (u64)gpa2hva(self, vcpu_addr),
 +              .vcpu_addr = vcpu_addr,
 +              .length = length,
 +      };
 +      pr_info("ucas map %p %p 0x%llx",
 +              (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
 +      return ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map);
 +}
 +
 +/* unmap previously mapped memory */
 +static int uc_unmap_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length)
 +{
 +      struct kvm_s390_ucas_mapping map = {
 +              .user_addr = (u64)gpa2hva(self, vcpu_addr),
 +              .vcpu_addr = vcpu_addr,
 +              .length = length,
 +      };
 +      pr_info("ucas unmap %p %p 0x%llx",
 +              (void *)map.user_addr, (void *)map.vcpu_addr, map.length);
 +      return ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map);
 +}
 +
 +/* handle ucontrol exit by mapping the accessed segment */
 +static void uc_handle_exit_ucontrol(FIXTURE_DATA(uc_kvm) *self)
 +{
 +      struct kvm_run *run = self->run;
 +      u64 seg_addr;
 +      int rc;
 +
 +      TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
 +      switch (run->s390_ucontrol.pgm_code) {
 +      case PGM_SEGMENT_TRANSLATION:
 +              seg_addr = run->s390_ucontrol.trans_exc_code & ~(SZ_1M - 1);
 +              pr_info("ucontrol pic segment translation 0x%llx, mapping segment 0x%lx\n",
 +                      run->s390_ucontrol.trans_exc_code, seg_addr);
 +              /* map / make additional memory available */
 +              rc = uc_map_ext(self, seg_addr, SZ_1M);
 +              TEST_ASSERT_EQ(0, rc);
 +              break;
 +      default:
 +              TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code);
 +      }
 +}
 +
 +/*
 + * Handle the SIEIC exit
 + * * fail on codes not expected in the test cases
 + * Returns if interception is handled / execution can be continued
 + */
 +static void uc_skey_enable(FIXTURE_DATA(uc_kvm) *self)
 +{
 +      struct kvm_s390_sie_block *sie_block = self->sie_block;
 +
 +      /* disable KSS */
 +      sie_block->cpuflags &= ~CPUSTAT_KSS;
 +      /* disable skey inst interception */
 +      sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
 +}
 +
 +/*
 + * Handle the instruction intercept
 + * Returns if interception is handled / execution can be continued
 + */
 +static bool uc_handle_insn_ic(FIXTURE_DATA(uc_kvm) *self)
 +{
 +      struct kvm_s390_sie_block *sie_block = self->sie_block;
 +      int ilen = insn_length(sie_block->ipa >> 8);
 +      struct kvm_run *run = self->run;
 +
 +      switch (run->s390_sieic.ipa) {
 +      case 0xB229: /* ISKE */
 +      case 0xB22b: /* SSKE */
 +      case 0xB22a: /* RRBE */
 +              uc_skey_enable(self);
 +
 +              /* rewind to reexecute intercepted instruction */
 +              run->psw_addr = run->psw_addr - ilen;
 +              pr_info("rewind guest addr to 0x%.16llx\n", run->psw_addr);
 +              return true;
 +      default:
 +              return false;
 +      }
 +}
 +
 +/*
 + * Handle the SIEIC exit
 + * * fail on codes not expected in the test cases
 + * Returns if interception is handled / execution can be continued
 + */
 +static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) *self)
 +{
 +      struct kvm_s390_sie_block *sie_block = self->sie_block;
 +      struct kvm_run *run = self->run;
 +
 +      /* check SIE interception code */
 +      pr_info("sieic: 0x%.2x 0x%.4x 0x%.8x\n",
 +              run->s390_sieic.icptcode,
 +              run->s390_sieic.ipa,
 +              run->s390_sieic.ipb);
 +      switch (run->s390_sieic.icptcode) {
 +      case ICPT_INST:
 +              /* end execution in caller on intercepted instruction */
 +              pr_info("sie instruction interception\n");
 +              return uc_handle_insn_ic(self);
 +      case ICPT_KSS:
 +              uc_skey_enable(self);
 +              return true;
 +      case ICPT_OPEREXC:
 +              /* operation exception */
 +              TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb);
 +      default:
 +              TEST_FAIL("UNEXPECTED SIEIC CODE %d", run->s390_sieic.icptcode);
 +      }
 +      return true;
 +}
 +
 +/* verify VM state on exit */
 +static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) *self)
 +{
 +      struct kvm_run *run = self->run;
 +
 +      switch (run->exit_reason) {
 +      case KVM_EXIT_S390_UCONTROL:
 +              /** check program interruption code
 +               * handle page fault --> ucas map
 +               */
 +              uc_handle_exit_ucontrol(self);
 +              break;
 +      case KVM_EXIT_S390_SIEIC:
 +              return uc_handle_sieic(self);
 +      default:
 +              pr_info("exit_reason %2d not handled\n", run->exit_reason);
 +      }
 +      return true;
 +}
 +
 +/* run the VM until interrupted */
 +static int uc_run_once(FIXTURE_DATA(uc_kvm) *self)
 +{
 +      int rc;
 +
 +      rc = ioctl(self->vcpu_fd, KVM_RUN, NULL);
 +      print_run(self->run, self->sie_block);
 +      print_regs(self->run);
 +      pr_debug("run %d / %d %s\n", rc, errno, strerror(errno));
 +      return rc;
 +}
 +
 +static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) *self)
 +{
 +      struct kvm_s390_sie_block *sie_block = self->sie_block;
 +
 +      /* assert vm was interrupted by diag 0x0044 */
 +      TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
 +      TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
 +      TEST_ASSERT_EQ(0x8300, sie_block->ipa);
 +      TEST_ASSERT_EQ(0x440000, sie_block->ipb);
 +}
 +
 +TEST_F(uc_kvm, uc_no_user_region)
 +{
 +      struct kvm_userspace_memory_region region = {
 +              .slot = 1,
 +              .guest_phys_addr = self->code_gpa,
 +              .memory_size = VM_MEM_EXT_SIZE,
 +              .userspace_addr = (uintptr_t)self->code_hva,
 +      };
 +      struct kvm_userspace_memory_region2 region2 = {
 +              .slot = 1,
 +              .guest_phys_addr = self->code_gpa,
 +              .memory_size = VM_MEM_EXT_SIZE,
 +              .userspace_addr = (uintptr_t)self->code_hva,
 +      };
 +
 +      ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &region));
 +      ASSERT_EQ(EINVAL, errno);
 +
 +      ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, &region2));
 +      ASSERT_EQ(EINVAL, errno);
 +}
 +
 +TEST_F(uc_kvm, uc_map_unmap)
 +{
 +      struct kvm_sync_regs *sync_regs = &self->run->s.regs;
 +      struct kvm_run *run = self->run;
 +      const u64 disp = 1;
 +      int rc;
 +
 +      /* copy test_mem_asm to code_hva / code_gpa */
 +      TH_LOG("copy code %p to vm mapped memory %p / %p",
 +             &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa);
 +      memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE);
 +
 +      /* DAT disabled + 64 bit mode */
 +      run->psw_mask = 0x0000000180000000ULL;
 +      run->psw_addr = self->code_gpa;
 +
 +      /* set register content for test_mem_asm to access not mapped memory*/
 +      sync_regs->gprs[1] = 0x55;
 +      sync_regs->gprs[5] = self->base_gpa;
 +      sync_regs->gprs[6] = VM_MEM_SIZE + disp;
 +      run->kvm_dirty_regs |= KVM_SYNC_GPRS;
 +
 +      /* run and expect to fail with ucontrol pic segment translation */
 +      ASSERT_EQ(0, uc_run_once(self));
 +      ASSERT_EQ(1, sync_regs->gprs[0]);
 +      ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
 +
 +      ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
 +      ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code);
 +
 +      /* fail to map memory with not segment aligned address */
 +      rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE + disp, VM_MEM_EXT_SIZE);
 +      ASSERT_GT(0, rc)
 +              TH_LOG("ucas map for non segment address should fail but didn't; "
 +                     "result %d not expected, %s", rc, strerror(errno));
 +
 +      /* map / make additional memory available */
 +      rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
 +      ASSERT_EQ(0, rc)
 +              TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno));
 +      ASSERT_EQ(0, uc_run_once(self));
 +      ASSERT_EQ(false, uc_handle_exit(self));
 +      uc_assert_diag44(self);
 +
 +      /* assert registers and memory are in expected state */
 +      ASSERT_EQ(2, sync_regs->gprs[0]);
 +      ASSERT_EQ(0x55, sync_regs->gprs[1]);
 +      ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE + disp));
 +
 +      /* unmap and run loop again */
 +      rc = uc_unmap_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE);
 +      ASSERT_EQ(0, rc)
 +              TH_LOG("ucas unmap result %d not expected, %s", rc, strerror(errno));
 +      ASSERT_EQ(0, uc_run_once(self));
 +      ASSERT_EQ(3, sync_regs->gprs[0]);
 +      ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason);
 +      ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code);
 +      /* handle ucontrol exit and remap memory after previous map and unmap */
 +      ASSERT_EQ(true, uc_handle_exit(self));
 +}
 +
 +TEST_F(uc_kvm, uc_gprs)
 +{
 +      struct kvm_sync_regs *sync_regs = &self->run->s.regs;
 +      struct kvm_run *run = self->run;
 +      struct kvm_regs regs = {};
 +
 +      /* Set registers to values that are different from the ones that we expect below */
 +      for (int i = 0; i < 8; i++)
 +              sync_regs->gprs[i] = 8;
 +      run->kvm_dirty_regs |= KVM_SYNC_GPRS;
 +
 +      /* copy test_gprs_asm to code_hva / code_gpa */
 +      TH_LOG("copy code %p to vm mapped memory %p / %p",
 +             &test_gprs_asm, (void *)self->code_hva, (void *)self->code_gpa);
 +      memcpy((void *)self->code_hva, &test_gprs_asm, PAGE_SIZE);
 +
 +      /* DAT disabled + 64 bit mode */
 +      run->psw_mask = 0x0000000180000000ULL;
 +      run->psw_addr = self->code_gpa;
 +
 +      /* run and expect interception of diag 44 */
 +      ASSERT_EQ(0, uc_run_once(self));
 +      ASSERT_EQ(false, uc_handle_exit(self));
 +      uc_assert_diag44(self);
 +
 +      /* Retrieve and check guest register values */
 +      ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
 +      for (int i = 0; i < 8; i++) {
 +              ASSERT_EQ(i, regs.gprs[i]);
 +              ASSERT_EQ(i, sync_regs->gprs[i]);
 +      }
 +
 +      /* run and expect interception of diag 44 again */
 +      ASSERT_EQ(0, uc_run_once(self));
 +      ASSERT_EQ(false, uc_handle_exit(self));
 +      uc_assert_diag44(self);
 +
 +      /* check continued increment of register 0 value */
 +      ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs));
 +      ASSERT_EQ(1, regs.gprs[0]);
 +      ASSERT_EQ(1, sync_regs->gprs[0]);
 +}
 +
 +TEST_F(uc_kvm, uc_skey)
 +{
 +      struct kvm_s390_sie_block *sie_block = self->sie_block;
 +      struct kvm_sync_regs *sync_regs = &self->run->s.regs;
 +      u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2);
 +      struct kvm_run *run = self->run;
 +      const u8 skeyvalue = 0x34;
 +
 +      /* copy test_skey_asm to code_hva / code_gpa */
 +      TH_LOG("copy code %p to vm mapped memory %p / %p",
 +             &test_skey_asm, (void *)self->code_hva, (void *)self->code_gpa);
 +      memcpy((void *)self->code_hva, &test_skey_asm, PAGE_SIZE);
 +
 +      /* set register content for test_skey_asm to access not mapped memory */
 +      sync_regs->gprs[1] = skeyvalue;
 +      sync_regs->gprs[5] = self->base_gpa;
 +      sync_regs->gprs[6] = test_vaddr;
 +      run->kvm_dirty_regs |= KVM_SYNC_GPRS;
 +
 +      /* DAT disabled + 64 bit mode */
 +      run->psw_mask = 0x0000000180000000ULL;
 +      run->psw_addr = self->code_gpa;
 +
 +      ASSERT_EQ(0, uc_run_once(self));
 +      ASSERT_EQ(true, uc_handle_exit(self));
 +      ASSERT_EQ(1, sync_regs->gprs[0]);
 +
 +      /* ISKE */
 +      ASSERT_EQ(0, uc_run_once(self));
 +
 +      /*
 +       * Bail out and skip the test after uc_skey_enable was executed but iske
 +       * is still intercepted. Instructions are not handled by the kernel.
 +       * Thus there is no need to test this here.
 +       */
 +      TEST_ASSERT_EQ(0, sie_block->cpuflags & CPUSTAT_KSS);
 +      TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE));
 +      TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason);
 +      TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode);
 +      TEST_REQUIRE(sie_block->ipa != 0xb229);
 +
 +      /* ISKE contd. */
 +      ASSERT_EQ(false, uc_handle_exit(self));
 +      ASSERT_EQ(2, sync_regs->gprs[0]);
 +      /* assert initial skey (ACC = 0, R & C = 1) */
 +      ASSERT_EQ(0x06, sync_regs->gprs[1]);
 +      uc_assert_diag44(self);
 +
 +      /* SSKE + ISKE */
 +      sync_regs->gprs[1] = skeyvalue;
 +      run->kvm_dirty_regs |= KVM_SYNC_GPRS;
 +      ASSERT_EQ(0, uc_run_once(self));
 +      ASSERT_EQ(false, uc_handle_exit(self));
 +      ASSERT_EQ(3, sync_regs->gprs[0]);
 +      ASSERT_EQ(skeyvalue, sync_regs->gprs[1]);
 +      uc_assert_diag44(self);
 +
 +      /* RRBE + ISKE */
 +      sync_regs->gprs[1] = skeyvalue;
 +      run->kvm_dirty_regs |= KVM_SYNC_GPRS;
 +      ASSERT_EQ(0, uc_run_once(self));
 +      ASSERT_EQ(false, uc_handle_exit(self));
 +      ASSERT_EQ(4, sync_regs->gprs[0]);
 +      /* assert R reset but rest of skey unchanged */
 +      ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]);
 +      ASSERT_EQ(0, sync_regs->gprs[1] & 0x04);
 +      uc_assert_diag44(self);
 +}
 +
++static char uc_flic_b[PAGE_SIZE];
++static struct kvm_s390_io_adapter uc_flic_ioa = { .id = 0 };
++static struct kvm_s390_io_adapter_req uc_flic_ioam = { .id = 0 };
++static struct kvm_s390_ais_req uc_flic_asim = { .isc = 0 };
++static struct kvm_s390_ais_all uc_flic_asima = { .simm = 0 };
++static struct uc_flic_attr_test {
++      char *name;
++      struct kvm_device_attr a;
++      int hasrc;
++      int geterrno;
++      int seterrno;
++} uc_flic_attr_tests[] = {
++      {
++              .name = "KVM_DEV_FLIC_GET_ALL_IRQS",
++              .seterrno = EINVAL,
++              .a = {
++                      .group = KVM_DEV_FLIC_GET_ALL_IRQS,
++                      .addr = (u64)&uc_flic_b,
++                      .attr = PAGE_SIZE,
++              },
++      },
++      {
++              .name = "KVM_DEV_FLIC_ENQUEUE",
++              .geterrno = EINVAL,
++              .a = { .group = KVM_DEV_FLIC_ENQUEUE, },
++      },
++      {
++              .name = "KVM_DEV_FLIC_CLEAR_IRQS",
++              .geterrno = EINVAL,
++              .a = { .group = KVM_DEV_FLIC_CLEAR_IRQS, },
++      },
++      {
++              .name = "KVM_DEV_FLIC_ADAPTER_REGISTER",
++              .geterrno = EINVAL,
++              .a = {
++                      .group = KVM_DEV_FLIC_ADAPTER_REGISTER,
++                      .addr = (u64)&uc_flic_ioa,
++              },
++      },
++      {
++              .name = "KVM_DEV_FLIC_ADAPTER_MODIFY",
++              .geterrno = EINVAL,
++              .seterrno = EINVAL,
++              .a = {
++                      .group = KVM_DEV_FLIC_ADAPTER_MODIFY,
++                      .addr = (u64)&uc_flic_ioam,
++                      .attr = sizeof(uc_flic_ioam),
++              },
++      },
++      {
++              .name = "KVM_DEV_FLIC_CLEAR_IO_IRQ",
++              .geterrno = EINVAL,
++              .seterrno = EINVAL,
++              .a = {
++                      .group = KVM_DEV_FLIC_CLEAR_IO_IRQ,
++                      .attr = 32,
++              },
++      },
++      {
++              .name = "KVM_DEV_FLIC_AISM",
++              .geterrno = EINVAL,
++              .seterrno = ENOTSUP,
++              .a = {
++                      .group = KVM_DEV_FLIC_AISM,
++                      .addr = (u64)&uc_flic_asim,
++              },
++      },
++      {
++              .name = "KVM_DEV_FLIC_AIRQ_INJECT",
++              .geterrno = EINVAL,
++              .a = { .group = KVM_DEV_FLIC_AIRQ_INJECT, },
++      },
++      {
++              .name = "KVM_DEV_FLIC_AISM_ALL",
++              .geterrno = ENOTSUP,
++              .seterrno = ENOTSUP,
++              .a = {
++                      .group = KVM_DEV_FLIC_AISM_ALL,
++                      .addr = (u64)&uc_flic_asima,
++                      .attr = sizeof(uc_flic_asima),
++              },
++      },
++      {
++              .name = "KVM_DEV_FLIC_APF_ENABLE",
++              .geterrno = EINVAL,
++              .seterrno = EINVAL,
++              .a = { .group = KVM_DEV_FLIC_APF_ENABLE, },
++      },
++      {
++              .name = "KVM_DEV_FLIC_APF_DISABLE_WAIT",
++              .geterrno = EINVAL,
++              .seterrno = EINVAL,
++              .a = { .group = KVM_DEV_FLIC_APF_DISABLE_WAIT, },
++      },
++};
++
++TEST_F(uc_kvm, uc_flic_attrs)
++{
++      struct kvm_create_device cd = { .type = KVM_DEV_TYPE_FLIC };
++      struct kvm_device_attr attr;
++      u64 value;
++      int rc, i;
++
++      rc = ioctl(self->vm_fd, KVM_CREATE_DEVICE, &cd);
++      ASSERT_EQ(0, rc) TH_LOG("create device failed with err %s (%i)",
++                              strerror(errno), errno);
++
++      for (i = 0; i < ARRAY_SIZE(uc_flic_attr_tests); i++) {
++              TH_LOG("test %s", uc_flic_attr_tests[i].name);
++              attr = (struct kvm_device_attr) {
++                      .group = uc_flic_attr_tests[i].a.group,
++                      .attr = uc_flic_attr_tests[i].a.attr,
++                      .addr = uc_flic_attr_tests[i].a.addr,
++              };
++              if (attr.addr == 0)
++                      attr.addr = (u64)&value;
++
++              rc = ioctl(cd.fd, KVM_HAS_DEVICE_ATTR, &attr);
++              EXPECT_EQ(uc_flic_attr_tests[i].hasrc, !!rc)
++                      TH_LOG("expected dev attr missing %s",
++                             uc_flic_attr_tests[i].name);
++
++              rc = ioctl(cd.fd, KVM_GET_DEVICE_ATTR, &attr);
++              EXPECT_EQ(!!uc_flic_attr_tests[i].geterrno, !!rc)
++                      TH_LOG("get dev attr rc not expected on %s %s (%i)",
++                             uc_flic_attr_tests[i].name,
++                             strerror(errno), errno);
++              if (uc_flic_attr_tests[i].geterrno)
++                      EXPECT_EQ(uc_flic_attr_tests[i].geterrno, errno)
++                              TH_LOG("get dev attr errno not expected on %s %s (%i)",
++                                     uc_flic_attr_tests[i].name,
++                                     strerror(errno), errno);
++
++              rc = ioctl(cd.fd, KVM_SET_DEVICE_ATTR, &attr);
++              EXPECT_EQ(!!uc_flic_attr_tests[i].seterrno, !!rc)
++                      TH_LOG("set sev attr rc not expected on %s %s (%i)",
++                             uc_flic_attr_tests[i].name,
++                             strerror(errno), errno);
++              if (uc_flic_attr_tests[i].seterrno)
++                      EXPECT_EQ(uc_flic_attr_tests[i].seterrno, errno)
++                              TH_LOG("set dev attr errno not expected on %s %s (%i)",
++                                     uc_flic_attr_tests[i].name,
++                                     strerror(errno), errno);
++      }
++
++      close(cd.fd);
++}
++
++TEST_F(uc_kvm, uc_set_gsi_routing)
++{
++      struct kvm_irq_routing *routing = kvm_gsi_routing_create();
++      struct kvm_irq_routing_entry ue = {
++              .type = KVM_IRQ_ROUTING_S390_ADAPTER,
++              .gsi = 1,
++              .u.adapter = (struct kvm_irq_routing_s390_adapter) {
++                      .ind_addr = 0,
++              },
++      };
++      int rc;
++
++      routing->entries[0] = ue;
++      routing->nr = 1;
++      rc = ioctl(self->vm_fd, KVM_SET_GSI_ROUTING, routing);
++      ASSERT_EQ(-1, rc) TH_LOG("err %s (%i)", strerror(errno), errno);
++      ASSERT_EQ(EINVAL, errno) TH_LOG("err %s (%i)", strerror(errno), errno);
++}
++
 +TEST_HARNESS_MAIN