1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018, Red Hat, Inc.
7 #define _GNU_SOURCE /* for program_invocation_short_name */
13 #include <sys/ioctl.h>
15 #include "test_util.h"
24 #define PAGE_SIZE 4096
26 #define SMRAM_SIZE 65536
27 #define SMRAM_MEMSLOT ((1 << 16) | 1)
28 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
29 #define SMRAM_GPA 0x1000000
30 #define SMRAM_STAGE 0xfe
33 #define XSTR(s) STR(s)
39 * This is compiled as normal 64-bit code, however, SMI handler is executed
40 * in real-address mode. To stay simple we're limiting ourselves to a mode
41 * independent subset of asm here.
42 * SMI handler always report back fixed stage SMRAM_STAGE.
44 uint8_t smi_handler[] = {
45 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
46 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
50 static inline void sync_with_host(uint64_t phase)
52 asm volatile("in $" XSTR(SYNC_PORT)", %%al \n"
58 wrmsr(APIC_BASE_MSR + (APIC_ICR >> 4),
59 APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI);
62 void guest_code(void *arg)
64 uint64_t apicbase = rdmsr(MSR_IA32_APICBASE);
68 wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE);
78 generic_svm_setup(arg, NULL, NULL);
80 GUEST_ASSERT(prepare_for_vmx_operation(arg));
92 int main(int argc, char *argv[])
94 vm_vaddr_t nested_gva = 0;
99 struct kvm_x86_state *state;
100 int stage, stage_reported;
103 vm = vm_create_default(VCPU_ID, 0, guest_code);
105 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
107 run = vcpu_state(vm, VCPU_ID);
109 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
110 SMRAM_MEMSLOT, SMRAM_PAGES, 0);
111 TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
112 == SMRAM_GPA, "could not allocate guest physical addresses?");
114 memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
115 memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
116 sizeof(smi_handler));
118 vcpu_set_msr(vm, VCPU_ID, MSR_IA32_SMBASE, SMRAM_GPA);
120 if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
121 if (nested_svm_supported())
122 vcpu_alloc_svm(vm, &nested_gva);
123 else if (nested_vmx_supported())
124 vcpu_alloc_vmx(vm, &nested_gva);
128 pr_info("will skip SMM test with VMX enabled\n");
130 vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
132 for (stage = 1;; stage++) {
133 _vcpu_run(vm, VCPU_ID);
134 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
135 "Stage %d: unexpected exit reason: %u (%s),\n",
136 stage, run->exit_reason,
137 exit_reason_str(run->exit_reason));
139 memset(®s, 0, sizeof(regs));
140 vcpu_regs_get(vm, VCPU_ID, ®s);
142 stage_reported = regs.rax & 0xff;
144 if (stage_reported == DONE)
147 TEST_ASSERT(stage_reported == stage ||
148 stage_reported == SMRAM_STAGE,
149 "Unexpected stage: #%x, got %x",
150 stage, stage_reported);
152 state = vcpu_save_state(vm, VCPU_ID);
154 kvm_vm_restart(vm, O_RDWR);
155 vm_vcpu_add(vm, VCPU_ID);
156 vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
157 vcpu_load_state(vm, VCPU_ID, state);
158 run = vcpu_state(vm, VCPU_ID);