Merge tag 'kvm-x86-misc-6.9' of https://github.com/kvm-x86/linux into HEAD
[linux-2.6-block.git] / tools / testing / selftests / kvm / include / kvm_util_base.h
CommitLineData
7d9a662e
MR
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * tools/testing/selftests/kvm/include/kvm_util_base.h
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7#ifndef SELFTEST_KVM_UTIL_BASE_H
8#define SELFTEST_KVM_UTIL_BASE_H
9
10#include "test_util.h"
11
b530eba1
SC
12#include <linux/compiler.h>
13#include "linux/hashtable.h"
7d9a662e 14#include "linux/list.h"
b530eba1
SC
15#include <linux/kernel.h>
16#include <linux/kvm.h>
17#include "linux/rbtree.h"
9177b715 18#include <linux/types.h>
b530eba1 19
03b47505 20#include <asm/atomic.h>
be1bd4c5 21#include <asm/kvm.h>
0cc64b08 22
7d9a662e
MR
23#include <sys/ioctl.h>
24
be1bd4c5 25#include "kvm_util_arch.h"
7d9a662e
MR
26#include "sparsebit.h"
27
0c326523
SC
28/*
29 * Provide a version of static_assert() that is guaranteed to have an optional
30 * message param. If _ISOC11_SOURCE is defined, glibc (/usr/include/assert.h)
31 * #undefs and #defines static_assert() as a direct alias to _Static_assert(),
32 * i.e. effectively makes the message mandatory. Many KVM selftests #define
33 * _GNU_SOURCE for various reasons, and _GNU_SOURCE implies _ISOC11_SOURCE. As
34 * a result, static_assert() behavior is non-deterministic and may or may not
35 * require a message depending on #include order.
36 */
37#define __kvm_static_assert(expr, msg, ...) _Static_assert(expr, msg)
38#define kvm_static_assert(expr, ...) __kvm_static_assert(expr, ##__VA_ARGS__, #expr)
39
7d9a662e
MR
40#define KVM_DEV_PATH "/dev/kvm"
41#define KVM_MAX_VCPUS 512
42
43#define NSEC_PER_SEC 1000000000L
44
b530eba1
SC
45typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
46typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
47
48struct userspace_mem_region {
8d99e347 49 struct kvm_userspace_memory_region2 region;
b530eba1 50 struct sparsebit *unused_phy_pages;
cd8eb291 51 struct sparsebit *protected_phy_pages;
b530eba1
SC
52 int fd;
53 off_t offset;
bd3ed7e1 54 enum vm_mem_backing_src_type backing_src_type;
b530eba1
SC
55 void *host_mem;
56 void *host_alias;
57 void *mmap_start;
58 void *mmap_alias;
59 size_t mmap_size;
60 struct rb_node gpa_node;
61 struct rb_node hva_node;
62 struct hlist_node slot_node;
63};
64
0cc64b08 65struct kvm_vcpu {
b530eba1
SC
66 struct list_head list;
67 uint32_t id;
68 int fd;
c472df1a 69 struct kvm_vm *vm;
1079c3d4 70 struct kvm_run *run;
7fbc6038
SC
71#ifdef __x86_64__
72 struct kvm_cpuid2 *cpuid;
73#endif
b530eba1
SC
74 struct kvm_dirty_gfn *dirty_gfns;
75 uint32_t fetch_index;
76 uint32_t dirty_gfns_count;
77};
78
79struct userspace_mem_regions {
80 struct rb_root gpa_tree;
81 struct rb_root hva_tree;
82 DECLARE_HASHTABLE(slot_hash, 9);
83};
84
290c5b54
RK
85enum kvm_mem_region_type {
86 MEM_REGION_CODE,
87 MEM_REGION_DATA,
88 MEM_REGION_PT,
89 MEM_REGION_TEST_DATA,
90 NR_MEM_REGIONS,
91};
92
b530eba1
SC
93struct kvm_vm {
94 int mode;
95 unsigned long type;
12619037 96 uint8_t subtype;
b530eba1
SC
97 int kvm_fd;
98 int fd;
99 unsigned int pgtable_levels;
100 unsigned int page_size;
101 unsigned int page_shift;
102 unsigned int pa_bits;
103 unsigned int va_bits;
104 uint64_t max_gfn;
105 struct list_head vcpus;
106 struct userspace_mem_regions regions;
107 struct sparsebit *vpages_valid;
108 struct sparsebit *vpages_mapped;
109 bool has_irqchip;
110 bool pgd_created;
03b47505 111 vm_paddr_t ucall_mmio_addr;
b530eba1
SC
112 vm_paddr_t pgd;
113 vm_vaddr_t gdt;
114 vm_vaddr_t tss;
115 vm_vaddr_t idt;
116 vm_vaddr_t handlers;
117 uint32_t dirty_ring_size;
be1bd4c5
PG
118 uint64_t gpa_tag_mask;
119
120 struct kvm_vm_arch arch;
83f6e109
BG
121
122 /* Cache of information for binary stats interface */
123 int stats_fd;
124 struct kvm_stats_header stats_header;
125 struct kvm_stats_desc *stats_desc;
290c5b54
RK
126
127 /*
128 * KVM region slots. These are the default memslots used by page
129 * allocators, e.g., lib/elf uses the memslots[MEM_REGION_CODE]
130 * memslot.
131 */
132 uint32_t memslots[NR_MEM_REGIONS];
b530eba1
SC
133};
134
9177b715
AJ
135struct vcpu_reg_sublist {
136 const char *name;
137 long capability;
138 int feature;
bdf6aa32 139 int feature_type;
9177b715
AJ
140 bool finalize;
141 __u64 *regs;
142 __u64 regs_n;
143 __u64 *rejects_set;
144 __u64 rejects_set_n;
cbc0daa6
HX
145 __u64 *skips_set;
146 __u64 skips_set_n;
9177b715
AJ
147};
148
149struct vcpu_reg_list {
150 char *name;
151 struct vcpu_reg_sublist sublists[];
152};
b530eba1 153
e8566033
HX
154#define for_each_sublist(c, s) \
155 for ((s) = &(c)->sublists[0]; (s)->regs; ++(s))
156
b530eba1
SC
157#define kvm_for_each_vcpu(vm, i, vcpu) \
158 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
159 if (!((vcpu) = vm->vcpus[i])) \
160 continue; \
161 else
162
b530eba1
SC
163struct userspace_mem_region *
164memslot2region(struct kvm_vm *vm, uint32_t memslot);
7d9a662e 165
290c5b54
RK
166static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm,
167 enum kvm_mem_region_type type)
168{
169 assert(type < NR_MEM_REGIONS);
170 return memslot2region(vm, vm->memslots[type]);
171}
172
7d9a662e
MR
173/* Minimum allocated guest virtual and physical addresses */
174#define KVM_UTIL_MIN_VADDR 0x2000
175#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
176
7d9a662e
MR
177#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
178#define DEFAULT_STACK_PGS 5
179
180enum vm_guest_mode {
181 VM_MODE_P52V48_4K,
10a0cc3b 182 VM_MODE_P52V48_16K,
7d9a662e
MR
183 VM_MODE_P52V48_64K,
184 VM_MODE_P48V48_4K,
185 VM_MODE_P48V48_16K,
186 VM_MODE_P48V48_64K,
187 VM_MODE_P40V48_4K,
188 VM_MODE_P40V48_16K,
189 VM_MODE_P40V48_64K,
190 VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
191 VM_MODE_P47V64_4K,
192 VM_MODE_P44V64_4K,
193 VM_MODE_P36V48_4K,
194 VM_MODE_P36V48_16K,
195 VM_MODE_P36V48_64K,
196 VM_MODE_P36V47_16K,
197 NUM_VM_MODES,
198};
199
672eaa35 200struct vm_shape {
12619037
SC
201 uint32_t type;
202 uint8_t mode;
203 uint8_t subtype;
204 uint16_t padding;
672eaa35
SC
205};
206
12619037
SC
207kvm_static_assert(sizeof(struct vm_shape) == sizeof(uint64_t));
208
672eaa35
SC
209#define VM_TYPE_DEFAULT 0
210
211#define VM_SHAPE(__mode) \
212({ \
213 struct vm_shape shape = { \
214 .mode = (__mode), \
215 .type = VM_TYPE_DEFAULT \
216 }; \
217 \
218 shape; \
219})
220
7d9a662e
MR
221#if defined(__aarch64__)
222
223extern enum vm_guest_mode vm_mode_default;
224
225#define VM_MODE_DEFAULT vm_mode_default
226#define MIN_PAGE_SHIFT 12U
227#define ptes_per_page(page_size) ((page_size) / 8)
228
229#elif defined(__x86_64__)
230
231#define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
232#define MIN_PAGE_SHIFT 12U
233#define ptes_per_page(page_size) ((page_size) / 8)
234
235#elif defined(__s390x__)
236
237#define VM_MODE_DEFAULT VM_MODE_P44V64_4K
238#define MIN_PAGE_SHIFT 12U
239#define ptes_per_page(page_size) ((page_size) / 16)
240
241#elif defined(__riscv)
242
243#if __riscv_xlen == 32
244#error "RISC-V 32-bit kvm selftests not supported"
245#endif
246
247#define VM_MODE_DEFAULT VM_MODE_P40V48_4K
248#define MIN_PAGE_SHIFT 12U
249#define ptes_per_page(page_size) ((page_size) / 8)
250
251#endif
252
672eaa35
SC
253#define VM_SHAPE_DEFAULT VM_SHAPE(VM_MODE_DEFAULT)
254
7d9a662e
MR
255#define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
256#define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)
257
258struct vm_guest_mode_params {
259 unsigned int pa_bits;
260 unsigned int va_bits;
261 unsigned int page_size;
262 unsigned int page_shift;
263};
264extern const struct vm_guest_mode_params vm_guest_mode_params[];
265
266int open_path_or_exit(const char *path, int flags);
267int open_kvm_dev_path_or_exit(void);
4d2bd143 268
d14d9139 269bool get_kvm_param_bool(const char *param);
4d2bd143
DM
270bool get_kvm_intel_param_bool(const char *param);
271bool get_kvm_amd_param_bool(const char *param);
272
d8ba3f14 273unsigned int kvm_check_cap(long cap);
71ab5a6f 274
3ea9b809
SC
275static inline bool kvm_has_cap(long cap)
276{
277 return kvm_check_cap(cap);
278}
279
71ab5a6f
SC
280#define __KVM_SYSCALL_ERROR(_name, _ret) \
281 "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
282
6542a003
SC
283/*
284 * Use the "inner", double-underscore macro when reporting errors from within
285 * other macros so that the name of ioctl() and not its literal numeric value
286 * is printed on error. The "outer" macro is strongly preferred when reporting
287 * errors "directly", i.e. without an additional layer of macros, as it reduces
288 * the probability of passing in the wrong string.
289 */
71ab5a6f
SC
290#define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
291#define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
292
fcba483e
SC
293#define kvm_do_ioctl(fd, cmd, arg) \
294({ \
0c326523 295 kvm_static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd)); \
fcba483e
SC
296 ioctl(fd, cmd, arg); \
297})
2de1b7b1 298
ad125f30 299#define __kvm_ioctl(kvm_fd, cmd, arg) \
fcba483e 300 kvm_do_ioctl(kvm_fd, cmd, arg)
2de1b7b1 301
6542a003 302#define kvm_ioctl(kvm_fd, cmd, arg) \
ad125f30
SC
303({ \
304 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \
305 \
6542a003 306 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(#cmd, ret)); \
fcba483e 307})
2de1b7b1 308
ad125f30
SC
309static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
310
311#define __vm_ioctl(vm, cmd, arg) \
312({ \
313 static_assert_is_vm(vm); \
314 kvm_do_ioctl((vm)->fd, cmd, arg); \
fcba483e
SC
315})
316
1b78d474
SC
317/*
318 * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
319 * the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM,
320 * probe KVM_CAP_USER_MEMORY, which (a) has been supported by KVM since before
321 * selftests existed and (b) should never outright fail, i.e. is supposed to
322 * return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
323 * VM and its vCPUs, including KVM_CHECK_EXTENSION.
324 */
325#define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \
326do { \
327 int __errno = errno; \
328 \
329 static_assert_is_vm(vm); \
330 \
331 if (cond) \
332 break; \
333 \
334 if (errno == EIO && \
335 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \
336 TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \
337 TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \
338 } \
339 errno = __errno; \
340 TEST_ASSERT(cond, __KVM_IOCTL_ERROR(name, ret)); \
341} while (0)
342
343#define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \
344 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
345
6542a003 346#define vm_ioctl(vm, cmd, arg) \
ad125f30
SC
347({ \
348 int ret = __vm_ioctl(vm, cmd, arg); \
349 \
1b78d474 350 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
fcba483e
SC
351})
352
ad125f30
SC
353static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
354
355#define __vcpu_ioctl(vcpu, cmd, arg) \
356({ \
357 static_assert_is_vcpu(vcpu); \
358 kvm_do_ioctl((vcpu)->fd, cmd, arg); \
fcba483e
SC
359})
360
6542a003 361#define vcpu_ioctl(vcpu, cmd, arg) \
ad125f30
SC
362({ \
363 int ret = __vcpu_ioctl(vcpu, cmd, arg); \
364 \
1b78d474 365 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \
fcba483e
SC
366})
367
10825b55
SC
368/*
369 * Looks up and returns the value corresponding to the capability
370 * (KVM_CAP_*) given by cap.
371 */
372static inline int vm_check_cap(struct kvm_vm *vm, long cap)
373{
374 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
375
1b78d474 376 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm);
10825b55
SC
377 return ret;
378}
379
a12c86c4 380static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
ac712209 381{
a12c86c4
SC
382 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
383
384 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
ac712209 385}
a12c86c4 386static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
10825b55 387{
a12c86c4
SC
388 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
389
390 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
10825b55
SC
391}
392
f7fa6749
VA
393static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa,
394 uint64_t size, uint64_t attributes)
395{
396 struct kvm_memory_attributes attr = {
397 .attributes = attributes,
398 .address = gpa,
399 .size = size,
400 .flags = 0,
401 };
402
403 /*
404 * KVM_SET_MEMORY_ATTRIBUTES overwrites _all_ attributes. These flows
405 * need significant enhancements to support multiple attributes.
406 */
407 TEST_ASSERT(!attributes || attributes == KVM_MEMORY_ATTRIBUTE_PRIVATE,
408 "Update me to support multiple attributes!");
409
410 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
411}
412
413
414static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa,
415 uint64_t size)
416{
417 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE);
418}
419
420static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa,
421 uint64_t size)
422{
423 vm_set_memory_attributes(vm, gpa, size, 0);
424}
425
426void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
427 bool punch_hole);
428
429static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa,
430 uint64_t size)
431{
432 vm_guest_mem_fallocate(vm, gpa, size, true);
433}
434
435static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa,
436 uint64_t size)
437{
438 vm_guest_mem_fallocate(vm, gpa, size, false);
439}
440
7d9a662e
MR
441void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
442const char *vm_guest_mode_string(uint32_t i);
443
7d9a662e 444void kvm_vm_free(struct kvm_vm *vmp);
ccc82ba6 445void kvm_vm_restart(struct kvm_vm *vmp);
7d9a662e 446void kvm_vm_release(struct kvm_vm *vmp);
7d9a662e
MR
447int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
448 size_t len);
7d9a662e 449void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
a4187c9b 450int kvm_memfd_alloc(size_t size, bool hugepages);
7d9a662e
MR
451
452void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
453
10825b55
SC
454static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
455{
456 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
457
458 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
459}
460
461static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
462 uint64_t first_page, uint32_t num_pages)
463{
464 struct kvm_clear_dirty_log args = {
465 .dirty_bitmap = log,
466 .slot = slot,
467 .first_page = first_page,
468 .num_pages = num_pages
469 };
470
471 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
472}
473
474static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
475{
476 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
477}
478
479static inline int vm_get_stats_fd(struct kvm_vm *vm)
480{
481 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
482
1b78d474 483 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm);
10825b55
SC
484 return fd;
485}
486
32faa064
BG
487static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
488{
489 ssize_t ret;
490
625646ae
SC
491 ret = pread(stats_fd, header, sizeof(*header), 0);
492 TEST_ASSERT(ret == sizeof(*header),
493 "Failed to read '%lu' header bytes, ret = '%ld'",
494 sizeof(*header), ret);
32faa064
BG
495}
496
4d0a0594
BG
497struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
498 struct kvm_stats_header *header);
499
500static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
501{
502 /*
503 * The base size of the descriptor is defined by KVM's ABI, but the
504 * size of the name field is variable, as far as KVM's ABI is
505 * concerned. For a given instance of KVM, the name field is the same
506 * size for all stats and is provided in the overall stats header.
507 */
508 return sizeof(struct kvm_stats_desc) + header->name_size;
509}
510
511static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
512 int index,
513 struct kvm_stats_header *header)
514{
515 /*
516 * Note, size_desc includes the size of the name field, which is
517 * variable. i.e. this is NOT equivalent to &stats_desc[i].
518 */
519 return (void *)stats + index * get_stats_descriptor_size(header);
520}
521
ed6b53ec
BG
522void read_stat_data(int stats_fd, struct kvm_stats_header *header,
523 struct kvm_stats_desc *desc, uint64_t *data,
524 size_t max_elements);
525
8448ec59
BG
526void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
527 size_t max_elements);
528
529static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
530{
531 uint64_t data;
532
533 __vm_get_stat(vm, stat_name, &data, 1);
534 return data;
535}
536
7d9a662e
MR
537void vm_create_irqchip(struct kvm_vm *vm);
538
bb2968ad
SC
539static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
540 uint64_t flags)
541{
542 struct kvm_create_guest_memfd guest_memfd = {
543 .size = size,
544 .flags = flags,
545 };
546
547 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd);
548}
549
550static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size,
551 uint64_t flags)
552{
553 int fd = __vm_create_guest_memfd(vm, size, flags);
554
555 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_GUEST_MEMFD, fd));
556 return fd;
557}
558
3d7d6043
SC
559void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
560 uint64_t gpa, uint64_t size, void *hva);
561int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
562 uint64_t gpa, uint64_t size, void *hva);
e6f4f345
CP
563void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
564 uint64_t gpa, uint64_t size, void *hva,
565 uint32_t guest_memfd, uint64_t guest_memfd_offset);
566int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
567 uint64_t gpa, uint64_t size, void *hva,
568 uint32_t guest_memfd, uint64_t guest_memfd_offset);
569
7d9a662e
MR
570void vm_userspace_mem_region_add(struct kvm_vm *vm,
571 enum vm_mem_backing_src_type src_type,
572 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
573 uint32_t flags);
bb2968ad
SC
574void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
575 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
576 uint32_t flags, int guest_memfd_fd, uint64_t guest_memfd_offset);
7d9a662e 577
cd8eb291
PG
578#ifndef vm_arch_has_protected_memory
579static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm)
580{
581 return false;
582}
583#endif
584
7d9a662e
MR
585void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
586void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
587void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
768e9a61 588struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
e8b9a055 589void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
2d4a5f91 590vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
7d9a662e 591vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
1446e331
RK
592vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
593 enum kvm_mem_region_type type);
d210eebb
MR
594vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
595 vm_vaddr_t vaddr_min,
596 enum kvm_mem_region_type type);
7d9a662e 597vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
1446e331
RK
598vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
599 enum kvm_mem_region_type type);
7d9a662e
MR
600vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
601
602void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
603 unsigned int npages);
604void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
605void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
606vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
607void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
608
be1bd4c5
PG
609
610static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa)
611{
612 return gpa & ~vm->gpa_tag_mask;
613}
614
768e9a61
SC
615void vcpu_run(struct kvm_vcpu *vcpu);
616int _vcpu_run(struct kvm_vcpu *vcpu);
38d4a385 617
768e9a61 618static inline int __vcpu_run(struct kvm_vcpu *vcpu)
38d4a385 619{
768e9a61 620 return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
38d4a385
SC
621}
622
768e9a61
SC
623void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
624struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
ffb7c77f 625
768e9a61
SC
626static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
627 uint64_t arg0)
ffb7c77f 628{
a12c86c4
SC
629 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
630
768e9a61 631 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
ffb7c77f
SC
632}
633
768e9a61 634static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
ffb7c77f
SC
635 struct kvm_guest_debug *debug)
636{
768e9a61 637 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
ffb7c77f
SC
638}
639
768e9a61 640static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
877bd399
SC
641 struct kvm_mp_state *mp_state)
642{
768e9a61 643 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
877bd399 644}
768e9a61 645static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
ffb7c77f
SC
646 struct kvm_mp_state *mp_state)
647{
768e9a61 648 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
ffb7c77f
SC
649}
650
768e9a61 651static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
ffb7c77f 652{
768e9a61 653 vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
ffb7c77f
SC
654}
655
768e9a61 656static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
ffb7c77f 657{
768e9a61 658 vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
ffb7c77f 659}
768e9a61 660static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
ffb7c77f 661{
768e9a61 662 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
ffb7c77f
SC
663
664}
768e9a61 665static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
ffb7c77f 666{
768e9a61 667 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
ffb7c77f 668}
768e9a61 669static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
ffb7c77f 670{
768e9a61 671 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
ffb7c77f 672}
768e9a61 673static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
ffb7c77f 674{
768e9a61 675 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
ffb7c77f 676}
768e9a61 677static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
ffb7c77f 678{
768e9a61 679 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
ffb7c77f 680}
bfff0f60 681
768e9a61 682static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
bfff0f60 683{
768e9a61 684 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
bfff0f60 685
768e9a61 686 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
bfff0f60 687}
768e9a61 688static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
bfff0f60 689{
768e9a61 690 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
bfff0f60 691
768e9a61 692 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
bfff0f60 693}
768e9a61 694static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
ffb7c77f 695{
768e9a61 696 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
bfff0f60 697
768e9a61 698 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, &reg);
ffb7c77f 699}
768e9a61 700static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
ffb7c77f 701{
768e9a61 702 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
bfff0f60 703
768e9a61 704 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, &reg);
ffb7c77f 705}
bfff0f60 706
ffb7c77f 707#ifdef __KVM_HAVE_VCPU_EVENTS
768e9a61 708static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
ffb7c77f
SC
709 struct kvm_vcpu_events *events)
710{
768e9a61 711 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
ffb7c77f 712}
768e9a61 713static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
ffb7c77f
SC
714 struct kvm_vcpu_events *events)
715{
768e9a61 716 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
ffb7c77f
SC
717}
718#endif
719#ifdef __x86_64__
768e9a61 720static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
ffb7c77f
SC
721 struct kvm_nested_state *state)
722{
768e9a61 723 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
ffb7c77f 724}
768e9a61 725static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
ffb7c77f
SC
726 struct kvm_nested_state *state)
727{
768e9a61 728 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
ffb7c77f
SC
729}
730
768e9a61 731static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
ffb7c77f
SC
732 struct kvm_nested_state *state)
733{
768e9a61 734 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
ffb7c77f
SC
735}
736#endif
768e9a61 737static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
ffb7c77f 738{
768e9a61 739 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
ffb7c77f 740
1b78d474 741 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm);
ffb7c77f
SC
742 return fd;
743}
744
40918184
SC
745int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
746
747static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
748{
749 int ret = __kvm_has_device_attr(dev_fd, group, attr);
750
751 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
752}
753
754int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
755
756static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
757 uint64_t attr, void *val)
758{
759 int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
760
761 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
762}
763
764int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
765
766static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
767 uint64_t attr, void *val)
768{
769 int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
770
771 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
772}
773
768e9a61
SC
774static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
775 uint64_t attr)
776{
777 return __kvm_has_device_attr(vcpu->fd, group, attr);
778}
779
780static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
781 uint64_t attr)
782{
783 kvm_has_device_attr(vcpu->fd, group, attr);
784}
785
786static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
787 uint64_t attr, void *val)
788{
789 return __kvm_device_attr_get(vcpu->fd, group, attr, val);
790}
40918184 791
768e9a61
SC
792static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
793 uint64_t attr, void *val)
40918184 794{
768e9a61
SC
795 kvm_device_attr_get(vcpu->fd, group, attr, val);
796}
40918184 797
768e9a61
SC
798static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
799 uint64_t attr, void *val)
800{
801 return __kvm_device_attr_set(vcpu->fd, group, attr, val);
802}
803
804static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
805 uint64_t attr, void *val)
806{
807 kvm_device_attr_set(vcpu->fd, group, attr, val);
40918184
SC
808}
809
40918184
SC
810int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
811int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
812
813static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
814{
815 int fd = __kvm_create_device(vm, type);
816
817 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
818 return fd;
819}
820
768e9a61 821void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
7d9a662e
MR
822
823/*
824 * VM VCPU Args Set
825 *
826 * Input Args:
827 * vm - Virtual Machine
7d9a662e
MR
828 * num - number of arguments
829 * ... - arguments, each of type uint64_t
830 *
831 * Output Args: None
832 *
833 * Return: None
834 *
768e9a61
SC
835 * Sets the first @num input parameters for the function at @vcpu's entry point,
836 * per the C calling convention of the architecture, to the values given as
837 * variable args. Each of the variable args is expected to be of type uint64_t.
838 * The maximum @num can be is specific to the architecture.
7d9a662e 839 */
768e9a61 840void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
7d9a662e 841
7d9a662e
MR
842void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
843int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
7d9a662e
MR
844
845#define KVM_MAX_IRQ_ROUTES 4096
846
847struct kvm_irq_routing *kvm_gsi_routing_create(void);
848void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
849 uint32_t gsi, uint32_t pin);
850int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
851void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
852
853const char *exit_reason_str(unsigned int exit_reason);
854
7d9a662e
MR
855vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
856 uint32_t memslot);
cd8eb291
PG
857vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
858 vm_paddr_t paddr_min, uint32_t memslot,
859 bool protected);
7d9a662e
MR
860vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
861
cd8eb291
PG
862static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
863 vm_paddr_t paddr_min, uint32_t memslot)
864{
865 /*
866 * By default, allocate memory as protected for VMs that support
867 * protected memory, as the majority of memory for such VMs is
868 * protected, i.e. using shared memory is effectively opt-in.
869 */
870 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot,
871 vm_arch_has_protected_memory(vm));
872}
873
3f44e7fd
SC
874/*
875 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
876 * loads the test binary into guest memory and creates an IRQ chip (x86 only).
6e1d13bf
SC
877 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
878 * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
3f44e7fd 879 */
672eaa35
SC
880struct kvm_vm *____vm_create(struct vm_shape shape);
881struct kvm_vm *__vm_create(struct vm_shape shape, uint32_t nr_runnable_vcpus,
6e1d13bf 882 uint64_t nr_extra_pages);
3f44e7fd 883
95fb0460
SC
884static inline struct kvm_vm *vm_create_barebones(void)
885{
672eaa35 886 return ____vm_create(VM_SHAPE_DEFAULT);
3f44e7fd
SC
887}
888
2feabb85
CP
889#ifdef __x86_64__
890static inline struct kvm_vm *vm_create_barebones_protected_vm(void)
891{
892 const struct vm_shape shape = {
893 .mode = VM_MODE_DEFAULT,
894 .type = KVM_X86_SW_PROTECTED_VM,
895 };
896
897 return ____vm_create(shape);
898}
899#endif
900
6e1d13bf 901static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
3f44e7fd 902{
672eaa35 903 return __vm_create(VM_SHAPE_DEFAULT, nr_runnable_vcpus, 0);
95fb0460
SC
904}
905
672eaa35 906struct kvm_vm *__vm_create_with_vcpus(struct vm_shape shape, uint32_t nr_vcpus,
acaf50ad 907 uint64_t extra_mem_pages,
3222d026 908 void *guest_code, struct kvm_vcpu *vcpus[]);
0ffc70ea
SC
909
910static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
911 void *guest_code,
912 struct kvm_vcpu *vcpus[])
913{
672eaa35 914 return __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus, 0,
5114c3e2 915 guest_code, vcpus);
0ffc70ea 916}
7d9a662e 917
672eaa35
SC
918
919struct kvm_vm *__vm_create_shape_with_one_vcpu(struct vm_shape shape,
920 struct kvm_vcpu **vcpu,
921 uint64_t extra_mem_pages,
922 void *guest_code);
923
f17686aa
SC
924/*
925 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
926 * additional pages of guest memory. Returns the VM and vCPU (via out param).
927 */
672eaa35
SC
928static inline struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
929 uint64_t extra_mem_pages,
930 void *guest_code)
931{
932 return __vm_create_shape_with_one_vcpu(VM_SHAPE_DEFAULT, vcpu,
933 extra_mem_pages, guest_code);
934}
f17686aa 935
0cc64b08 936static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
f17686aa
SC
937 void *guest_code)
938{
939 return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
940}
941
672eaa35
SC
942static inline struct kvm_vm *vm_create_shape_with_one_vcpu(struct vm_shape shape,
943 struct kvm_vcpu **vcpu,
944 void *guest_code)
945{
946 return __vm_create_shape_with_one_vcpu(shape, vcpu, 0, guest_code);
947}
948
0cc64b08 949struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
f17686aa 950
d886724e 951void kvm_pin_this_task_to_pcpu(uint32_t pcpu);
d4ec586c 952void kvm_print_vcpu_pinning_help(void);
d886724e
VS
953void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[],
954 int nr_vcpus);
955
7d9a662e 956unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
7d9a662e
MR
957unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
958unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
959unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
960static inline unsigned int
961vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
962{
963 unsigned int n;
964 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
965#ifdef __s390x__
966 /* s390 requires 1M aligned guest sizes */
967 n = (n + 255) & ~255;
968#endif
969 return n;
970}
971
7d9a662e
MR
972#define sync_global_to_guest(vm, g) ({ \
973 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
974 memcpy(_p, &(g), sizeof(g)); \
975})
976
977#define sync_global_from_guest(vm, g) ({ \
978 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
979 memcpy(&(g), _p, sizeof(g)); \
980})
981
03b47505
SC
982/*
983 * Write a global value, but only in the VM's (guest's) domain. Primarily used
984 * for "globals" that hold per-VM values (VMs always duplicate code and global
985 * data into their own region of physical memory), but can be used anytime it's
986 * undesirable to change the host's copy of the global.
987 */
988#define write_guest_global(vm, g, val) ({ \
989 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
990 typeof(g) _val = val; \
991 \
992 memcpy(_p, &(_val), sizeof(g)); \
993})
994
768e9a61 995void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
9931be3f 996
768e9a61 997void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
9931be3f
SC
998 uint8_t indent);
999
768e9a61 1000static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
9931be3f
SC
1001 uint8_t indent)
1002{
768e9a61 1003 vcpu_arch_dump(stream, vcpu, indent);
9931be3f
SC
1004}
1005
1006/*
1007 * Adds a vCPU with reasonable defaults (e.g. a stack)
1008 *
1009 * Input Args:
1010 * vm - Virtual Machine
768e9a61 1011 * vcpu_id - The id of the VCPU to add to the VM.
9931be3f 1012 */
53a43dd4
SC
1013struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1014void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code);
9931be3f 1015
f742d94f
SC
1016static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
1017 void *guest_code)
9931be3f 1018{
53a43dd4
SC
1019 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id);
1020
1021 vcpu_arch_set_entry_point(vcpu, guest_code);
1022
1023 return vcpu;
9931be3f
SC
1024}
1025
4c16fa3e
SC
1026/* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
1027struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1028
1029static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
1030 uint32_t vcpu_id)
1031{
1032 return vm_arch_vcpu_recreate(vm, vcpu_id);
1033}
1034
7fbc6038
SC
1035void vcpu_arch_free(struct kvm_vcpu *vcpu);
1036
9931be3f
SC
1037void virt_arch_pgd_alloc(struct kvm_vm *vm);
1038
1039static inline void virt_pgd_alloc(struct kvm_vm *vm)
1040{
1041 virt_arch_pgd_alloc(vm);
1042}
1043
1044/*
1045 * VM Virtual Page Map
1046 *
1047 * Input Args:
1048 * vm - Virtual Machine
1049 * vaddr - VM Virtual Address
1050 * paddr - VM Physical Address
1051 * memslot - Memory region slot for new virtual translation tables
1052 *
1053 * Output Args: None
1054 *
1055 * Return: None
1056 *
1057 * Within @vm, creates a virtual translation for the page starting
1058 * at @vaddr to the page starting at @paddr.
1059 */
1060void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1061
1062static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
1063{
1064 virt_arch_pg_map(vm, vaddr, paddr);
1065}
1066
1067
1068/*
1069 * Address Guest Virtual to Guest Physical
1070 *
1071 * Input Args:
1072 * vm - Virtual Machine
1073 * gva - VM virtual address
1074 *
1075 * Output Args: None
1076 *
1077 * Return:
1078 * Equivalent VM physical address
1079 *
1080 * Returns the VM physical address of the translated VM virtual
1081 * address given by @gva.
1082 */
1083vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1084
1085static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
1086{
1087 return addr_arch_gva2gpa(vm, gva);
1088}
1089
1090/*
1091 * Virtual Translation Tables Dump
1092 *
1093 * Input Args:
1094 * stream - Output FILE stream
1095 * vm - Virtual Machine
1096 * indent - Left margin indent amount
1097 *
1098 * Output Args: None
1099 *
1100 * Return: None
1101 *
1102 * Dumps to the FILE stream given by @stream, the contents of all the
1103 * virtual translation tables for the VM given by @vm.
1104 */
1105void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1106
1107static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1108{
1109 virt_arch_dump(stream, vm, indent);
1110}
1111
b774da3f
BG
1112
1113static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
1114{
1115 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
1116}
1117
e1ab3124
VA
1118/*
1119 * Arch hook that is invoked via a constructor, i.e. before exeucting main(),
1120 * to allow for arch-specific setup that is common to all tests, e.g. computing
1121 * the default guest "mode".
1122 */
1123void kvm_selftest_arch_init(void);
1124
2115713c
VA
1125void kvm_arch_vm_post_create(struct kvm_vm *vm);
1126
be1bd4c5
PG
1127bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);
1128
1e979288
HX
1129uint32_t guest_get_vcpuid(void);
1130
7d9a662e 1131#endif /* SELFTEST_KVM_UTIL_BASE_H */