Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-block.git] / tools / testing / selftests / kvm / include / kvm_util.h
CommitLineData
783e9e51
PB
1/*
2 * tools/testing/selftests/kvm/include/kvm_util.h
3 *
4 * Copyright (C) 2018, Google LLC.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 *
8 */
9#ifndef SELFTEST_KVM_UTIL_H
10#define SELFTEST_KVM_UTIL_H 1
11
12#include "test_util.h"
13
14#include "asm/kvm.h"
15#include "linux/kvm.h"
16#include <sys/ioctl.h>
17
18#include "sparsebit.h"
19
20/*
21 * Memslots can't cover the gfn starting at this gpa otherwise vCPUs can't be
22 * created. Only applies to VMs using EPT.
23 */
24#define KVM_DEFAULT_IDENTITY_MAP_ADDRESS 0xfffbc000ul
25
26
27/* Callers of kvm_util only have an incomplete/opaque description of the
28 * structure kvm_util is using to maintain the state of a VM.
29 */
30struct kvm_vm;
31
32typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
33typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
34
35/* Minimum allocated guest virtual and physical addresses */
36#define KVM_UTIL_MIN_VADDR 0x2000
37
38#define DEFAULT_GUEST_PHY_PAGES 512
39#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
40#define DEFAULT_STACK_PGS 5
41
42enum vm_guest_mode {
43 VM_MODE_FLAT48PG,
44};
45
46enum vm_mem_backing_src_type {
47 VM_MEM_SRC_ANONYMOUS,
48 VM_MEM_SRC_ANONYMOUS_THP,
49 VM_MEM_SRC_ANONYMOUS_HUGETLB,
50};
51
52int kvm_check_cap(long cap);
8b56ee91 53int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
783e9e51
PB
54
55struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
56void kvm_vm_free(struct kvm_vm *vmp);
fa3899ad
PB
57void kvm_vm_restart(struct kvm_vm *vmp, int perm);
58void kvm_vm_release(struct kvm_vm *vmp);
3b4cd0ff 59void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
783e9e51
PB
60
61int kvm_memcmp_hva_gva(void *hva,
62 struct kvm_vm *vm, const vm_vaddr_t gva, size_t len);
63
6089ae0b
PB
64void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
65 uint32_t data_memslot, uint32_t pgd_memslot);
66
783e9e51
PB
67void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
68void vcpu_dump(FILE *stream, struct kvm_vm *vm,
69 uint32_t vcpuid, uint8_t indent);
70
71void vm_create_irqchip(struct kvm_vm *vm);
72
73void vm_userspace_mem_region_add(struct kvm_vm *vm,
74 enum vm_mem_backing_src_type src_type,
75 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
76 uint32_t flags);
77
78void vcpu_ioctl(struct kvm_vm *vm,
79 uint32_t vcpuid, unsigned long ioctl, void *arg);
80void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
81void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
2305339e 82void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot);
783e9e51
PB
83vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
84 uint32_t data_memslot, uint32_t pgd_memslot);
3b4cd0ff
PX
85void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
86 size_t size, uint32_t pgd_memslot);
783e9e51
PB
87void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
88void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
89vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
90vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
91
92struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
93void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
94int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
95void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
96 struct kvm_mp_state *mp_state);
97void vcpu_regs_get(struct kvm_vm *vm,
98 uint32_t vcpuid, struct kvm_regs *regs);
99void vcpu_regs_set(struct kvm_vm *vm,
100 uint32_t vcpuid, struct kvm_regs *regs);
101void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
102void vcpu_sregs_get(struct kvm_vm *vm,
103 uint32_t vcpuid, struct kvm_sregs *sregs);
104void vcpu_sregs_set(struct kvm_vm *vm,
105 uint32_t vcpuid, struct kvm_sregs *sregs);
106int _vcpu_sregs_set(struct kvm_vm *vm,
107 uint32_t vcpuid, struct kvm_sregs *sregs);
108void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
109 struct kvm_vcpu_events *events);
110void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
111 struct kvm_vcpu_events *events);
8b56ee91
DS
112uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
113void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
114 uint64_t msr_value);
783e9e51
PB
115
116const char *exit_reason_str(unsigned int exit_reason);
117
118void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
119void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
120 uint32_t pgd_memslot);
121vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm,
122 vm_paddr_t paddr_min, uint32_t memslot);
123
d5edb7f8 124struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
783e9e51
PB
125void vcpu_set_cpuid(
126 struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid);
127
783e9e51 128struct kvm_cpuid_entry2 *
d5edb7f8 129kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
783e9e51
PB
130
131static inline struct kvm_cpuid_entry2 *
d5edb7f8 132kvm_get_supported_cpuid_entry(uint32_t function)
783e9e51 133{
d5edb7f8 134 return kvm_get_supported_cpuid_index(function, 0);
783e9e51
PB
135}
136
aee41be5
PX
137struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
138 void *guest_code);
783e9e51
PB
139void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
140
d5edb7f8
PB
141typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr,
142 vm_paddr_t vmxon_paddr,
143 vm_vaddr_t vmcs_vaddr,
144 vm_paddr_t vmcs_paddr);
145
783e9e51
PB
146struct kvm_userspace_memory_region *
147kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
148 uint64_t end);
149
150struct kvm_dirty_log *
151allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
152
153int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
154
4e18bccc
PX
155#define GUEST_PORT_SYNC 0x1000
156#define GUEST_PORT_ABORT 0x1001
157#define GUEST_PORT_DONE 0x1002
158
159static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1)
160{
161 __asm__ __volatile__("in %[port], %%al"
162 :
163 : [port]"d"(port), "D"(arg0), "S"(arg1)
164 : "rax");
165}
166
167/*
168 * Allows to pass three arguments to the host: port is 16bit wide,
169 * arg0 & arg1 are 64bit wide
170 */
171#define GUEST_SYNC_ARGS(_port, _arg0, _arg1) \
172 __exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1))
173
174#define GUEST_ASSERT(_condition) do { \
175 if (!(_condition)) \
176 GUEST_SYNC_ARGS(GUEST_PORT_ABORT, \
177 "Failed guest assert: " \
178 #_condition, __LINE__); \
179 } while (0)
180
181#define GUEST_SYNC(stage) GUEST_SYNC_ARGS(GUEST_PORT_SYNC, "hello", stage)
182
183#define GUEST_DONE() GUEST_SYNC_ARGS(GUEST_PORT_DONE, 0, 0)
184
185struct guest_args {
186 uint64_t arg0;
187 uint64_t arg1;
188 uint16_t port;
189} __attribute__ ((packed));
190
191void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
192 struct guest_args *args);
193
783e9e51 194#endif /* SELFTEST_KVM_UTIL_H */