Commit | Line | Data |
---|---|---|
b58c55d5 SC |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #define _GNU_SOURCE | |
3 | ||
4 | #include <stdio.h> | |
5 | #include <stdlib.h> | |
6 | #include <pthread.h> | |
7 | #include <semaphore.h> | |
8 | #include <sys/types.h> | |
9 | #include <signal.h> | |
10 | #include <errno.h> | |
11 | #include <linux/bitmap.h> | |
12 | #include <linux/bitops.h> | |
13 | #include <linux/atomic.h> | |
14 | ||
15 | #include "kvm_util.h" | |
16 | #include "test_util.h" | |
17 | #include "guest_modes.h" | |
18 | #include "processor.h" | |
19 | ||
20 | static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride) | |
21 | { | |
22 | uint64_t gpa; | |
23 | ||
24 | for (gpa = start_gpa; gpa < end_gpa; gpa += stride) | |
25 | *((volatile uint64_t *)gpa) = gpa; | |
26 | ||
27 | GUEST_DONE(); | |
28 | } | |
29 | ||
30 | struct vcpu_info { | |
3468fd7d | 31 | struct kvm_vcpu *vcpu; |
b58c55d5 SC |
32 | uint64_t start_gpa; |
33 | uint64_t end_gpa; | |
34 | }; | |
35 | ||
36 | static int nr_vcpus; | |
37 | static atomic_t rendezvous; | |
38 | ||
39 | static void rendezvous_with_boss(void) | |
40 | { | |
41 | int orig = atomic_read(&rendezvous); | |
42 | ||
43 | if (orig > 0) { | |
44 | atomic_dec_and_test(&rendezvous); | |
45 | while (atomic_read(&rendezvous) > 0) | |
46 | cpu_relax(); | |
47 | } else { | |
48 | atomic_inc(&rendezvous); | |
49 | while (atomic_read(&rendezvous) < 0) | |
50 | cpu_relax(); | |
51 | } | |
52 | } | |
53 | ||
768e9a61 | 54 | static void run_vcpu(struct kvm_vcpu *vcpu) |
b58c55d5 | 55 | { |
768e9a61 SC |
56 | vcpu_run(vcpu); |
57 | ASSERT_EQ(get_ucall(vcpu, NULL), UCALL_DONE); | |
b58c55d5 SC |
58 | } |
59 | ||
60 | static void *vcpu_worker(void *data) | |
61 | { | |
3468fd7d SC |
62 | struct vcpu_info *info = data; |
63 | struct kvm_vcpu *vcpu = info->vcpu; | |
b58c55d5 SC |
64 | struct kvm_vm *vm = vcpu->vm; |
65 | struct kvm_sregs sregs; | |
66 | struct kvm_regs regs; | |
67 | ||
768e9a61 | 68 | vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, |
b58c55d5 SC |
69 | vm_get_page_size(vm)); |
70 | ||
71 | /* Snapshot regs before the first run. */ | |
768e9a61 | 72 | vcpu_regs_get(vcpu, ®s); |
b58c55d5 SC |
73 | rendezvous_with_boss(); |
74 | ||
768e9a61 | 75 | run_vcpu(vcpu); |
b58c55d5 | 76 | rendezvous_with_boss(); |
768e9a61 SC |
77 | vcpu_regs_set(vcpu, ®s); |
78 | vcpu_sregs_get(vcpu, &sregs); | |
b58c55d5 SC |
79 | #ifdef __x86_64__ |
80 | /* Toggle CR0.WP to trigger a MMU context reset. */ | |
81 | sregs.cr0 ^= X86_CR0_WP; | |
82 | #endif | |
768e9a61 | 83 | vcpu_sregs_set(vcpu, &sregs); |
b58c55d5 SC |
84 | rendezvous_with_boss(); |
85 | ||
768e9a61 | 86 | run_vcpu(vcpu); |
b58c55d5 SC |
87 | rendezvous_with_boss(); |
88 | ||
89 | return NULL; | |
90 | } | |
91 | ||
3468fd7d SC |
92 | static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus, |
93 | uint64_t start_gpa, uint64_t end_gpa) | |
b58c55d5 SC |
94 | { |
95 | struct vcpu_info *info; | |
96 | uint64_t gpa, nr_bytes; | |
97 | pthread_t *threads; | |
98 | int i; | |
99 | ||
100 | threads = malloc(nr_vcpus * sizeof(*threads)); | |
101 | TEST_ASSERT(threads, "Failed to allocate vCPU threads"); | |
102 | ||
103 | info = malloc(nr_vcpus * sizeof(*info)); | |
104 | TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges"); | |
105 | ||
106 | nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) & | |
107 | ~((uint64_t)vm_get_page_size(vm) - 1); | |
108 | TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus); | |
109 | ||
110 | for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) { | |
3468fd7d | 111 | info[i].vcpu = vcpus[i]; |
b58c55d5 SC |
112 | info[i].start_gpa = gpa; |
113 | info[i].end_gpa = gpa + nr_bytes; | |
114 | pthread_create(&threads[i], NULL, vcpu_worker, &info[i]); | |
115 | } | |
116 | return threads; | |
117 | } | |
118 | ||
119 | static void rendezvous_with_vcpus(struct timespec *time, const char *name) | |
120 | { | |
121 | int i, rendezvoused; | |
122 | ||
123 | pr_info("Waiting for vCPUs to finish %s...\n", name); | |
124 | ||
125 | rendezvoused = atomic_read(&rendezvous); | |
126 | for (i = 0; abs(rendezvoused) != 1; i++) { | |
127 | usleep(100); | |
128 | if (!(i & 0x3f)) | |
129 | pr_info("\r%d vCPUs haven't rendezvoused...", | |
130 | abs(rendezvoused) - 1); | |
131 | rendezvoused = atomic_read(&rendezvous); | |
132 | } | |
133 | ||
134 | clock_gettime(CLOCK_MONOTONIC, time); | |
135 | ||
136 | /* Release the vCPUs after getting the time of the previous action. */ | |
137 | pr_info("\rAll vCPUs finished %s, releasing...\n", name); | |
138 | if (rendezvoused > 0) | |
139 | atomic_set(&rendezvous, -nr_vcpus - 1); | |
140 | else | |
141 | atomic_set(&rendezvous, nr_vcpus + 1); | |
142 | } | |
143 | ||
144 | static void calc_default_nr_vcpus(void) | |
145 | { | |
146 | cpu_set_t possible_mask; | |
147 | int r; | |
148 | ||
149 | r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask); | |
150 | TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", | |
151 | errno, strerror(errno)); | |
152 | ||
153 | nr_vcpus = CPU_COUNT(&possible_mask) * 3/4; | |
154 | TEST_ASSERT(nr_vcpus > 0, "Uh, no CPUs?"); | |
155 | } | |
156 | ||
157 | int main(int argc, char *argv[]) | |
158 | { | |
159 | /* | |
160 | * Skip the first 4gb and slot0. slot0 maps <1gb and is used to back | |
161 | * the guest's code, stack, and page tables. Because selftests creates | |
162 | * an IRQCHIP, a.k.a. a local APIC, KVM creates an internal memslot | |
163 | * just below the 4gb boundary. This test could create memory at | |
164 | * 1gb-3gb,but it's simpler to skip straight to 4gb. | |
165 | */ | |
166 | const uint64_t size_1gb = (1 << 30); | |
167 | const uint64_t start_gpa = (4ull * size_1gb); | |
168 | const int first_slot = 1; | |
169 | ||
170 | struct timespec time_start, time_run1, time_reset, time_run2; | |
171 | uint64_t max_gpa, gpa, slot_size, max_mem, i; | |
172 | int max_slots, slot, opt, fd; | |
173 | bool hugepages = false; | |
3468fd7d | 174 | struct kvm_vcpu **vcpus; |
b58c55d5 SC |
175 | pthread_t *threads; |
176 | struct kvm_vm *vm; | |
177 | void *mem; | |
178 | ||
179 | /* | |
180 | * Default to 2gb so that maxing out systems with MAXPHADDR=46, which | |
181 | * are quite common for x86, requires changing only max_mem (KVM allows | |
182 | * 32k memslots, 32k * 2gb == ~64tb of guest memory). | |
183 | */ | |
184 | slot_size = 2 * size_1gb; | |
185 | ||
186 | max_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); | |
187 | TEST_ASSERT(max_slots > first_slot, "KVM is broken"); | |
188 | ||
189 | /* All KVM MMUs should be able to survive a 128gb guest. */ | |
190 | max_mem = 128 * size_1gb; | |
191 | ||
192 | calc_default_nr_vcpus(); | |
193 | ||
194 | while ((opt = getopt(argc, argv, "c:h:m:s:H")) != -1) { | |
195 | switch (opt) { | |
196 | case 'c': | |
197 | nr_vcpus = atoi(optarg); | |
198 | TEST_ASSERT(nr_vcpus > 0, "number of vcpus must be >0"); | |
199 | break; | |
200 | case 'm': | |
201 | max_mem = atoi(optarg) * size_1gb; | |
202 | TEST_ASSERT(max_mem > 0, "memory size must be >0"); | |
203 | break; | |
204 | case 's': | |
205 | slot_size = atoi(optarg) * size_1gb; | |
206 | TEST_ASSERT(slot_size > 0, "slot size must be >0"); | |
207 | break; | |
208 | case 'H': | |
209 | hugepages = true; | |
210 | break; | |
211 | case 'h': | |
212 | default: | |
213 | printf("usage: %s [-c nr_vcpus] [-m max_mem_in_gb] [-s slot_size_in_gb] [-H]\n", argv[0]); | |
214 | exit(1); | |
215 | } | |
216 | } | |
217 | ||
3468fd7d SC |
218 | vcpus = malloc(nr_vcpus * sizeof(*vcpus)); |
219 | TEST_ASSERT(vcpus, "Failed to allocate vCPU array"); | |
220 | ||
221 | vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); | |
b58c55d5 SC |
222 | |
223 | max_gpa = vm_get_max_gfn(vm) << vm_get_page_shift(vm); | |
224 | TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb "); | |
225 | ||
226 | fd = kvm_memfd_alloc(slot_size, hugepages); | |
227 | mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); | |
228 | TEST_ASSERT(mem != MAP_FAILED, "mmap() failed"); | |
229 | ||
230 | TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed"); | |
231 | ||
232 | /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */ | |
233 | for (i = 0; i < slot_size; i += vm_get_page_size(vm)) | |
234 | ((uint8_t *)mem)[i] = 0xaa; | |
235 | ||
236 | gpa = 0; | |
237 | for (slot = first_slot; slot < max_slots; slot++) { | |
238 | gpa = start_gpa + ((slot - first_slot) * slot_size); | |
239 | if (gpa + slot_size > max_gpa) | |
240 | break; | |
241 | ||
242 | if ((gpa - start_gpa) >= max_mem) | |
243 | break; | |
244 | ||
245 | vm_set_user_memory_region(vm, slot, 0, gpa, slot_size, mem); | |
246 | ||
247 | #ifdef __x86_64__ | |
248 | /* Identity map memory in the guest using 1gb pages. */ | |
249 | for (i = 0; i < slot_size; i += size_1gb) | |
4ee602e7 | 250 | __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G); |
b58c55d5 SC |
251 | #else |
252 | for (i = 0; i < slot_size; i += vm_get_page_size(vm)) | |
253 | virt_pg_map(vm, gpa + i, gpa + i); | |
254 | #endif | |
255 | } | |
256 | ||
257 | atomic_set(&rendezvous, nr_vcpus + 1); | |
3468fd7d SC |
258 | threads = spawn_workers(vm, vcpus, start_gpa, gpa); |
259 | ||
260 | free(vcpus); | |
261 | vcpus = NULL; | |
b58c55d5 SC |
262 | |
263 | pr_info("Running with %lugb of guest memory and %u vCPUs\n", | |
264 | (gpa - start_gpa) / size_1gb, nr_vcpus); | |
265 | ||
266 | rendezvous_with_vcpus(&time_start, "spawning"); | |
267 | rendezvous_with_vcpus(&time_run1, "run 1"); | |
268 | rendezvous_with_vcpus(&time_reset, "reset"); | |
269 | rendezvous_with_vcpus(&time_run2, "run 2"); | |
270 | ||
271 | time_run2 = timespec_sub(time_run2, time_reset); | |
272 | time_reset = timespec_sub(time_reset, time_run1); | |
273 | time_run1 = timespec_sub(time_run1, time_start); | |
274 | ||
275 | pr_info("run1 = %ld.%.9lds, reset = %ld.%.9lds, run2 = %ld.%.9lds\n", | |
276 | time_run1.tv_sec, time_run1.tv_nsec, | |
277 | time_reset.tv_sec, time_reset.tv_nsec, | |
278 | time_run2.tv_sec, time_run2.tv_nsec); | |
279 | ||
280 | /* | |
281 | * Delete even numbered slots (arbitrary) and unmap the first half of | |
282 | * the backing (also arbitrary) to verify KVM correctly drops all | |
283 | * references to the removed regions. | |
284 | */ | |
285 | for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2) | |
286 | vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL); | |
287 | ||
288 | munmap(mem, slot_size / 2); | |
289 | ||
290 | /* Sanity check that the vCPUs actually ran. */ | |
291 | for (i = 0; i < nr_vcpus; i++) | |
292 | pthread_join(threads[i], NULL); | |
293 | ||
294 | /* | |
295 | * Deliberately exit without deleting the remaining memslots or closing | |
296 | * kvm_fd to test cleanup via mmu_notifier.release. | |
297 | */ | |
298 | } |