KVM: selftests: Move setting a vCPU's entry point to a dedicated API
authorSean Christopherson <seanjc@google.com>
Thu, 8 Feb 2024 20:48:39 +0000 (21:48 +0100)
committerSean Christopherson <seanjc@google.com>
Wed, 28 Feb 2024 20:58:05 +0000 (20:58 +0000)
Extract the code to set a vCPU's entry point out of vm_arch_vcpu_add() and
into a new API, vcpu_arch_set_entry_point().  Providing a separate API
will allow creating a KVM selftests hardness that can handle tests that
use different entry points for sub-tests, whereas *requiring* the entry
point to be specified at vCPU creation makes it difficult to create a
generic harness, e.g. the boilerplate setup/teardown can't easily create
and destroy the VM and vCPUs.

Signed-off-by: Thomas Huth <thuth@redhat.com>
Link: https://lore.kernel.org/r/20240208204844.119326-4-thuth@redhat.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/include/kvm_util_base.h
tools/testing/selftests/kvm/lib/aarch64/processor.c
tools/testing/selftests/kvm/lib/riscv/processor.c
tools/testing/selftests/kvm/lib/s390x/processor.c
tools/testing/selftests/kvm/lib/x86_64/processor.c

index 9e5afc472c14268bbe629cb9c1baf6049b702457..a6e7738a8db7386f0a9a64a9904fdcf11951faea 100644 (file)
@@ -969,15 +969,18 @@ static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
  * Input Args:
  *   vm - Virtual Machine
  *   vcpu_id - The id of the VCPU to add to the VM.
- *   guest_code - The vCPU's entry point
  */
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
-                                 void *guest_code);
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
+void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
 
 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
                                           void *guest_code)
 {
-       return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
+       struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
+
+       vcpu_arch_set_entry_point(vcpu, guest_code);
+
+       return vcpu;
 }
 
 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
index 41c776b642c0cd0be722e4bad1e0e9cc1f0cff80..c83616e19bad49a8b29e43832017ff86e0b71dac 100644 (file)
@@ -365,8 +365,13 @@ void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
                indent, "", pstate, pc);
 }
 
-struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
-                                 struct kvm_vcpu_init *init, void *guest_code)
+void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
+{
+       vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+}
+
+static struct kvm_vcpu *__aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+                                          struct kvm_vcpu_init *init)
 {
        size_t stack_size;
        uint64_t stack_vaddr;
@@ -381,15 +386,22 @@ struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        aarch64_vcpu_setup(vcpu, init);
 
        vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
-       vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+       return vcpu;
+}
+
+struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
+                                 struct kvm_vcpu_init *init, void *guest_code)
+{
+       struct kvm_vcpu *vcpu = __aarch64_vcpu_add(vm, vcpu_id, init);
+
+       vcpu_arch_set_entry_point(vcpu, guest_code);
 
        return vcpu;
 }
 
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
-                                 void *guest_code)
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
 {
-       return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code);
+       return __aarch64_vcpu_add(vm, vcpu_id, NULL);
 }
 
 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
index 7ca736fb4194046072bf69b3210f0fefd8ce0834..c993947f07823f09fb151578b2dad2c43e0eff32 100644 (file)
@@ -277,8 +277,12 @@ static void __aligned(16) guest_unexp_trap(void)
                  0, 0, 0, 0, 0, 0);
 }
 
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
-                                 void *guest_code)
+void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
+{
+       vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
+}
+
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
 {
        int r;
        size_t stack_size;
@@ -312,7 +316,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
 
        /* Setup stack pointer and program counter of guest */
        vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size);
-       vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code);
 
        /* Setup default exception vector of guest */
        vcpu_set_reg(vcpu, RISCV_GENERAL_CSR_REG(stvec), (unsigned long)guest_unexp_trap);
index 15945121daf17dc46bf38cf8f2a24d23b8f073f3..cd5301cc9788adc3e636c131b039d0e7e79080aa 100644 (file)
@@ -155,15 +155,18 @@ void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
        virt_dump_region(stream, vm, indent, vm->pgd);
 }
 
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
-                                 void *guest_code)
+void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
+{
+       vcpu->run->psw_addr = (uintptr_t)guest_code;
+}
+
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
 {
        size_t stack_size =  DEFAULT_STACK_PGS * getpagesize();
        uint64_t stack_vaddr;
        struct kvm_regs regs;
        struct kvm_sregs sregs;
        struct kvm_vcpu *vcpu;
-       struct kvm_run *run;
 
        TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
                    vm->page_size);
@@ -184,9 +187,7 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        sregs.crs[1] = vm->pgd | 0xf;           /* Primary region table */
        vcpu_sregs_set(vcpu, &sregs);
 
-       run = vcpu->run;
-       run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
-       run->psw_addr = (uintptr_t)guest_code;
+       vcpu->run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
 
        return vcpu;
 }
index d8288374078e4b3ce888bed569c7e59192d43e7c..b9b6cb730a0889ac8f2dc4c47080b2112aafaf66 100644 (file)
@@ -562,8 +562,16 @@ void kvm_arch_vm_post_create(struct kvm_vm *vm)
        sync_global_to_guest(vm, host_cpu_is_amd);
 }
 
-struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
-                                 void *guest_code)
+void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
+{
+       struct kvm_regs regs;
+
+       vcpu_regs_get(vcpu, &regs);
+       regs.rip = (unsigned long) guest_code;
+       vcpu_regs_set(vcpu, &regs);
+}
+
+struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
 {
        struct kvm_mp_state mp_state;
        struct kvm_regs regs;
@@ -597,7 +605,6 @@ struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
        vcpu_regs_get(vcpu, &regs);
        regs.rflags = regs.rflags | 0x2;
        regs.rsp = stack_vaddr;
-       regs.rip = (unsigned long) guest_code;
        vcpu_regs_set(vcpu, &regs);
 
        /* Setup the MP state */