1 // SPDX-License-Identifier: GPL-2.0
3 * Hyper-V HvCallSendSyntheticClusterIpi{,Ex} tests
5 * Copyright (C) 2022, Red Hat, Inc.
9 #define _GNU_SOURCE /* for program_invocation_short_name */
15 #include "test_util.h"
18 #define RECEIVER_VCPU_ID_1 2
19 #define RECEIVER_VCPU_ID_2 65
21 #define IPI_VECTOR 0xfe
23 static volatile uint64_t ipis_rcvd[RECEIVER_VCPU_ID_2 + 1];
31 enum HV_GENERIC_SET_FORMAT {
32 HV_GENERIC_SET_SPARSE_4K,
36 /* HvCallSendSyntheticClusterIpi hypercall */
43 /* HvCallSendSyntheticClusterIpiEx hypercall */
44 struct hv_send_ipi_ex {
47 struct hv_vpset vp_set;
50 static inline void hv_init(vm_vaddr_t pgs_gpa)
52 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
53 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
56 static void receiver_code(void *hcall_page, vm_vaddr_t pgs_gpa)
63 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
65 /* Signal sender vCPU we're ready */
66 ipis_rcvd[vcpu_id] = (u64)-1;
69 asm volatile("sti; hlt; cli");
72 static void guest_ipi_handler(struct ex_regs *regs)
74 u32 vcpu_id = rdmsr(HV_X64_MSR_VP_INDEX);
77 wrmsr(HV_X64_MSR_EOI, 1);
80 static inline void nop_loop(void)
84 for (i = 0; i < 100000000; i++)
88 static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
90 struct hv_send_ipi *ipi = (struct hv_send_ipi *)hcall_page;
91 struct hv_send_ipi_ex *ipi_ex = (struct hv_send_ipi_ex *)hcall_page;
92 int stage = 1, ipis_expected[2] = {0};
97 /* Wait for receiver vCPUs to come up */
98 while (!ipis_rcvd[RECEIVER_VCPU_ID_1] || !ipis_rcvd[RECEIVER_VCPU_ID_2])
100 ipis_rcvd[RECEIVER_VCPU_ID_1] = ipis_rcvd[RECEIVER_VCPU_ID_2] = 0;
102 /* 'Slow' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
103 ipi->vector = IPI_VECTOR;
104 ipi->cpu_mask = 1 << RECEIVER_VCPU_ID_1;
105 hyperv_hypercall(HVCALL_SEND_IPI, pgs_gpa, pgs_gpa + 4096);
107 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
108 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
110 /* 'Fast' HvCallSendSyntheticClusterIpi to RECEIVER_VCPU_ID_1 */
111 hyperv_hypercall(HVCALL_SEND_IPI | HV_HYPERCALL_FAST_BIT,
112 IPI_VECTOR, 1 << RECEIVER_VCPU_ID_1);
114 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
115 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
118 /* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
119 memset(hcall_page, 0, 4096);
120 ipi_ex->vector = IPI_VECTOR;
121 ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
122 ipi_ex->vp_set.valid_bank_mask = 1 << 0;
123 ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
124 hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
125 pgs_gpa, pgs_gpa + 4096);
127 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
128 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
130 /* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_1 */
131 hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
132 hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
133 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
134 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
136 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
137 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ipis_expected[1]);
140 /* 'Slow' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
141 memset(hcall_page, 0, 4096);
142 ipi_ex->vector = IPI_VECTOR;
143 ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
144 ipi_ex->vp_set.valid_bank_mask = 1 << 1;
145 ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_2 - 64);
146 hyperv_hypercall(HVCALL_SEND_IPI_EX | (1 << HV_HYPERCALL_VARHEAD_OFFSET),
147 pgs_gpa, pgs_gpa + 4096);
149 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
150 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
152 /* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to RECEIVER_VCPU_ID_2 */
153 hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 1);
154 hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
155 (1 << HV_HYPERCALL_VARHEAD_OFFSET),
156 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
158 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ipis_expected[0]);
159 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
162 /* 'Slow' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1,2} */
163 memset(hcall_page, 0, 4096);
164 ipi_ex->vector = IPI_VECTOR;
165 ipi_ex->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
166 ipi_ex->vp_set.valid_bank_mask = 1 << 1 | 1;
167 ipi_ex->vp_set.bank_contents[0] = BIT(RECEIVER_VCPU_ID_1);
168 ipi_ex->vp_set.bank_contents[1] = BIT(RECEIVER_VCPU_ID_2 - 64);
169 hyperv_hypercall(HVCALL_SEND_IPI_EX | (2 << HV_HYPERCALL_VARHEAD_OFFSET),
170 pgs_gpa, pgs_gpa + 4096);
172 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
173 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
175 /* 'XMM Fast' HvCallSendSyntheticClusterIpiEx to both RECEIVER_VCPU_ID_{1, 2} */
176 hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
177 hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT |
178 (2 << HV_HYPERCALL_VARHEAD_OFFSET),
179 IPI_VECTOR, HV_GENERIC_SET_SPARSE_4K);
181 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
182 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
185 /* 'Slow' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL */
186 memset(hcall_page, 0, 4096);
187 ipi_ex->vector = IPI_VECTOR;
188 ipi_ex->vp_set.format = HV_GENERIC_SET_ALL;
189 hyperv_hypercall(HVCALL_SEND_IPI_EX, pgs_gpa, pgs_gpa + 4096);
191 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
192 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
195 * 'XMM Fast' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL.
197 ipi_ex->vp_set.valid_bank_mask = 0;
198 hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
199 hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT,
200 IPI_VECTOR, HV_GENERIC_SET_ALL);
202 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_1] == ++ipis_expected[0]);
203 GUEST_ASSERT(ipis_rcvd[RECEIVER_VCPU_ID_2] == ++ipis_expected[1]);
209 static void *vcpu_thread(void *arg)
211 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
214 r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
215 TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
220 TEST_FAIL("vCPU %u exited unexpectedly", vcpu->id);
225 static void cancel_join_vcpu_thread(pthread_t thread, struct kvm_vcpu *vcpu)
230 r = pthread_cancel(thread);
231 TEST_ASSERT(!r, "pthread_cancel on vcpu_id=%d failed with errno=%d",
234 r = pthread_join(thread, &retval);
235 TEST_ASSERT(!r, "pthread_join on vcpu_id=%d failed with errno=%d",
237 TEST_ASSERT(retval == PTHREAD_CANCELED,
238 "expected retval=%p, got %p", PTHREAD_CANCELED,
242 int main(int argc, char *argv[])
245 struct kvm_vcpu *vcpu[3];
246 vm_vaddr_t hcall_page;
247 pthread_t threads[2];
251 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_SEND_IPI));
253 vm = vm_create_with_one_vcpu(&vcpu[0], sender_guest_code);
255 /* Hypercall input/output */
256 hcall_page = vm_vaddr_alloc_pages(vm, 2);
257 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
259 vm_init_descriptor_tables(vm);
261 vcpu[1] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_1, receiver_code);
262 vcpu_init_descriptor_tables(vcpu[1]);
263 vcpu_args_set(vcpu[1], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
264 vcpu_set_msr(vcpu[1], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_1);
265 vcpu_set_hv_cpuid(vcpu[1]);
267 vcpu[2] = vm_vcpu_add(vm, RECEIVER_VCPU_ID_2, receiver_code);
268 vcpu_init_descriptor_tables(vcpu[2]);
269 vcpu_args_set(vcpu[2], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
270 vcpu_set_msr(vcpu[2], HV_X64_MSR_VP_INDEX, RECEIVER_VCPU_ID_2);
271 vcpu_set_hv_cpuid(vcpu[2]);
273 vm_install_exception_handler(vm, IPI_VECTOR, guest_ipi_handler);
275 vcpu_args_set(vcpu[0], 2, hcall_page, addr_gva2gpa(vm, hcall_page));
276 vcpu_set_hv_cpuid(vcpu[0]);
278 r = pthread_create(&threads[0], NULL, vcpu_thread, vcpu[1]);
279 TEST_ASSERT(!r, "pthread_create failed errno=%d", r);
281 r = pthread_create(&threads[1], NULL, vcpu_thread, vcpu[2]);
282 TEST_ASSERT(!r, "pthread_create failed errno=%d", errno);
287 TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO);
289 switch (get_ucall(vcpu[0], &uc)) {
291 TEST_ASSERT(uc.args[1] == stage,
292 "Unexpected stage: %ld (%d expected)",
298 REPORT_GUEST_ASSERT(uc);
301 TEST_FAIL("Unknown ucall %lu", uc.cmd);
308 cancel_join_vcpu_thread(threads[0], vcpu[1]);
309 cancel_join_vcpu_thread(threads[1], vcpu[2]);