Merge tag 'kvm-x86-pvunhalt-6.9' of https://github.com/kvm-x86/linux into HEAD
[linux-2.6-block.git] / tools / testing / selftests / kvm / include / x86_64 / processor.h
index 8b5c804562f99dfc2ebacbbd6ae130804b291f89..81ce37ec407dd18b04abab3ff0a40c8deca9cd65 100644 (file)
 extern bool host_cpu_is_intel;
 extern bool host_cpu_is_amd;
 
+enum vm_guest_x86_subtype {
+       VM_SUBTYPE_NONE = 0,
+       VM_SUBTYPE_SEV,
+       VM_SUBTYPE_SEV_ES,
+};
+
+/* Forced emulation prefix, used to invoke the emulator unconditionally. */
+#define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
+
 #define NMI_VECTOR             0x02
 
 #define X86_EFLAGS_FIXED        (1u << 1)
@@ -273,6 +282,7 @@ struct kvm_x86_cpu_property {
 #define X86_PROPERTY_MAX_EXT_LEAF              KVM_X86_CPU_PROPERTY(0x80000000, 0, EAX, 0, 31)
 #define X86_PROPERTY_MAX_PHY_ADDR              KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 0, 7)
 #define X86_PROPERTY_MAX_VIRT_ADDR             KVM_X86_CPU_PROPERTY(0x80000008, 0, EAX, 8, 15)
+#define X86_PROPERTY_SEV_C_BIT                 KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 0, 5)
 #define X86_PROPERTY_PHYS_ADDR_REDUCTION       KVM_X86_CPU_PROPERTY(0x8000001F, 0, EBX, 6, 11)
 
 #define X86_PROPERTY_MAX_CENTAUR_LEAF          KVM_X86_CPU_PROPERTY(0xC0000000, 0, EAX, 0, 31)
@@ -282,24 +292,41 @@ struct kvm_x86_cpu_property {
  * that indicates the feature is _not_ supported, and a property that states
  * the length of the bit mask of unsupported features.  A feature is supported
  * if the size of the bit mask is larger than the "unavailable" bit, and said
- * bit is not set.
+ * bit is not set.  Fixed counters also bizarre enumeration, but inverted from
+ * arch events for general purpose counters.  Fixed counters are supported if a
+ * feature flag is set **OR** the total number of fixed counters is greater
+ * than index of the counter.
  *
- * Wrap the "unavailable" feature to simplify checking whether or not a given
- * architectural event is supported.
+ * Wrap the events for general purpose and fixed counters to simplify checking
+ * whether or not a given architectural event is supported.
  */
 struct kvm_x86_pmu_feature {
-       struct kvm_x86_cpu_feature anti_feature;
+       struct kvm_x86_cpu_feature f;
 };
-#define        KVM_X86_PMU_FEATURE(name, __bit)                                        \
-({                                                                             \
-       struct kvm_x86_pmu_feature feature = {                                  \
-               .anti_feature = KVM_X86_CPU_FEATURE(0xa, 0, EBX, __bit),        \
-       };                                                                      \
-                                                                               \
-       feature;                                                                \
+#define        KVM_X86_PMU_FEATURE(__reg, __bit)                               \
+({                                                                     \
+       struct kvm_x86_pmu_feature feature = {                          \
+               .f = KVM_X86_CPU_FEATURE(0xa, 0, __reg, __bit),         \
+       };                                                              \
+                                                                       \
+       kvm_static_assert(KVM_CPUID_##__reg == KVM_CPUID_EBX ||         \
+                         KVM_CPUID_##__reg == KVM_CPUID_ECX);          \
+       feature;                                                        \
 })
 
-#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED   KVM_X86_PMU_FEATURE(BRANCH_INSNS_RETIRED, 5)
+#define X86_PMU_FEATURE_CPU_CYCLES                     KVM_X86_PMU_FEATURE(EBX, 0)
+#define X86_PMU_FEATURE_INSNS_RETIRED                  KVM_X86_PMU_FEATURE(EBX, 1)
+#define X86_PMU_FEATURE_REFERENCE_CYCLES               KVM_X86_PMU_FEATURE(EBX, 2)
+#define X86_PMU_FEATURE_LLC_REFERENCES                 KVM_X86_PMU_FEATURE(EBX, 3)
+#define X86_PMU_FEATURE_LLC_MISSES                     KVM_X86_PMU_FEATURE(EBX, 4)
+#define X86_PMU_FEATURE_BRANCH_INSNS_RETIRED           KVM_X86_PMU_FEATURE(EBX, 5)
+#define X86_PMU_FEATURE_BRANCHES_MISPREDICTED          KVM_X86_PMU_FEATURE(EBX, 6)
+#define X86_PMU_FEATURE_TOPDOWN_SLOTS                  KVM_X86_PMU_FEATURE(EBX, 7)
+
+#define X86_PMU_FEATURE_INSNS_RETIRED_FIXED            KVM_X86_PMU_FEATURE(ECX, 0)
+#define X86_PMU_FEATURE_CPU_CYCLES_FIXED               KVM_X86_PMU_FEATURE(ECX, 1)
+#define X86_PMU_FEATURE_REFERENCE_TSC_CYCLES_FIXED     KVM_X86_PMU_FEATURE(ECX, 2)
+#define X86_PMU_FEATURE_TOPDOWN_SLOTS_FIXED            KVM_X86_PMU_FEATURE(ECX, 3)
 
 static inline unsigned int x86_family(unsigned int eax)
 {
@@ -698,10 +725,16 @@ static __always_inline bool this_cpu_has_p(struct kvm_x86_cpu_property property)
 
 static inline bool this_pmu_has(struct kvm_x86_pmu_feature feature)
 {
-       uint32_t nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+       uint32_t nr_bits;
+
+       if (feature.f.reg == KVM_CPUID_EBX) {
+               nr_bits = this_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+               return nr_bits > feature.f.bit && !this_cpu_has(feature.f);
+       }
 
-       return nr_bits > feature.anti_feature.bit &&
-              !this_cpu_has(feature.anti_feature);
+       GUEST_ASSERT(feature.f.reg == KVM_CPUID_ECX);
+       nr_bits = this_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+       return nr_bits > feature.f.bit || this_cpu_has(feature.f);
 }
 
 static __always_inline uint64_t this_cpu_supported_xcr0(void)
@@ -917,10 +950,16 @@ static __always_inline bool kvm_cpu_has_p(struct kvm_x86_cpu_property property)
 
 static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
 {
-       uint32_t nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+       uint32_t nr_bits;
 
-       return nr_bits > feature.anti_feature.bit &&
-              !kvm_cpu_has(feature.anti_feature);
+       if (feature.f.reg == KVM_CPUID_EBX) {
+               nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
+               return nr_bits > feature.f.bit && !kvm_cpu_has(feature.f);
+       }
+
+       TEST_ASSERT_EQ(feature.f.reg, KVM_CPUID_ECX);
+       nr_bits = kvm_cpu_property(X86_PROPERTY_PMU_NR_FIXED_COUNTERS);
+       return nr_bits > feature.f.bit || kvm_cpu_has(feature.f);
 }
 
 static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
@@ -995,6 +1034,13 @@ static inline void vcpu_set_cpuid(struct kvm_vcpu *vcpu)
        vcpu_ioctl(vcpu, KVM_GET_CPUID2, vcpu->cpuid);
 }
 
+void vcpu_set_cpuid_property(struct kvm_vcpu *vcpu,
+                            struct kvm_x86_cpu_property property,
+                            uint32_t value);
+void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
+
+void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
+
 static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
                                  struct kvm_x86_cpu_feature feature)
 {
@@ -1004,9 +1050,6 @@ static inline bool vcpu_cpuid_has(struct kvm_vcpu *vcpu,
        return *((&entry->eax) + feature.reg) & BIT(feature.bit);
 }
 
-void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr);
-
-void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function);
 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
                                     struct kvm_x86_cpu_feature feature,
                                     bool set);
@@ -1068,6 +1111,7 @@ do {                                                                                      \
 } while (0)
 
 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
+void kvm_init_vm_address_properties(struct kvm_vm *vm);
 bool vm_is_unrestricted_guest(struct kvm_vm *vm);
 
 struct ex_regs {
@@ -1129,16 +1173,19 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
  * r9  = exception vector (non-zero)
  * r10 = error code
  */
-#define KVM_ASM_SAFE(insn)                                     \
+#define __KVM_ASM_SAFE(insn, fep)                              \
        "mov $" __stringify(KVM_EXCEPTION_MAGIC) ", %%r9\n\t"   \
        "lea 1f(%%rip), %%r10\n\t"                              \
        "lea 2f(%%rip), %%r11\n\t"                              \
-       "1: " insn "\n\t"                                       \
+       fep "1: " insn "\n\t"                                   \
        "xor %%r9, %%r9\n\t"                                    \
        "2:\n\t"                                                \
        "mov  %%r9b, %[vector]\n\t"                             \
        "mov  %%r10, %[error_code]\n\t"
 
+#define KVM_ASM_SAFE(insn) __KVM_ASM_SAFE(insn, "")
+#define KVM_ASM_SAFE_FEP(insn) __KVM_ASM_SAFE(insn, KVM_FEP)
+
 #define KVM_ASM_SAFE_OUTPUTS(v, ec)    [vector] "=qm"(v), [error_code] "=rm"(ec)
 #define KVM_ASM_SAFE_CLOBBERS  "r9", "r10", "r11"
 
@@ -1165,21 +1212,58 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
        vector;                                                         \
 })
 
-static inline uint8_t rdmsr_safe(uint32_t msr, uint64_t *val)
-{
-       uint64_t error_code;
-       uint8_t vector;
-       uint32_t a, d;
+#define kvm_asm_safe_fep(insn, inputs...)                              \
+({                                                                     \
+       uint64_t ign_error_code;                                        \
+       uint8_t vector;                                                 \
+                                                                       \
+       asm volatile(KVM_ASM_SAFE(insn)                                 \
+                    : KVM_ASM_SAFE_OUTPUTS(vector, ign_error_code)     \
+                    : inputs                                           \
+                    : KVM_ASM_SAFE_CLOBBERS);                          \
+       vector;                                                         \
+})
 
-       asm volatile(KVM_ASM_SAFE("rdmsr")
-                    : "=a"(a), "=d"(d), KVM_ASM_SAFE_OUTPUTS(vector, error_code)
-                    : "c"(msr)
-                    : KVM_ASM_SAFE_CLOBBERS);
+#define kvm_asm_safe_ec_fep(insn, error_code, inputs...)               \
+({                                                                     \
+       uint8_t vector;                                                 \
+                                                                       \
+       asm volatile(KVM_ASM_SAFE_FEP(insn)                             \
+                    : KVM_ASM_SAFE_OUTPUTS(vector, error_code)         \
+                    : inputs                                           \
+                    : KVM_ASM_SAFE_CLOBBERS);                          \
+       vector;                                                         \
+})
 
-       *val = (uint64_t)a | ((uint64_t)d << 32);
-       return vector;
+#define BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP)                   \
+static inline uint8_t insn##_safe ##_fep(uint32_t idx, uint64_t *val)  \
+{                                                                      \
+       uint64_t error_code;                                            \
+       uint8_t vector;                                                 \
+       uint32_t a, d;                                                  \
+                                                                       \
+       asm volatile(KVM_ASM_SAFE##_FEP(#insn)                          \
+                    : "=a"(a), "=d"(d),                                \
+                      KVM_ASM_SAFE_OUTPUTS(vector, error_code)         \
+                    : "c"(idx)                                         \
+                    : KVM_ASM_SAFE_CLOBBERS);                          \
+                                                                       \
+       *val = (uint64_t)a | ((uint64_t)d << 32);                       \
+       return vector;                                                  \
 }
 
+/*
+ * Generate {insn}_safe() and {insn}_safe_fep() helpers for instructions that
+ * use ECX as in input index, and EDX:EAX as a 64-bit output.
+ */
+#define BUILD_READ_U64_SAFE_HELPERS(insn)                              \
+       BUILD_READ_U64_SAFE_HELPER(insn, , )                            \
+       BUILD_READ_U64_SAFE_HELPER(insn, _fep, _FEP)                    \
+
+BUILD_READ_U64_SAFE_HELPERS(rdmsr)
+BUILD_READ_U64_SAFE_HELPERS(rdpmc)
+BUILD_READ_U64_SAFE_HELPERS(xgetbv)
+
 static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
 {
        return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
@@ -1195,6 +1279,16 @@ static inline uint8_t xsetbv_safe(uint32_t index, uint64_t value)
 
 bool kvm_is_tdp_enabled(void);
 
+static inline bool kvm_is_pmu_enabled(void)
+{
+       return get_kvm_param_bool("enable_pmu");
+}
+
+static inline bool kvm_is_forced_emulation_enabled(void)
+{
+       return !!get_kvm_param_integer("force_emulation_prefix");
+}
+
 uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr,
                                    int *level);
 uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr);
@@ -1280,4 +1374,6 @@ void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 #define PFERR_GUEST_PAGE_MASK  BIT_ULL(PFERR_GUEST_PAGE_BIT)
 #define PFERR_IMPLICIT_ACCESS  BIT_ULL(PFERR_IMPLICIT_ACCESS_BIT)
 
+bool sys_clocksource_is_based_on_tsc(void);
+
 #endif /* SELFTEST_KVM_PROCESSOR_H */