Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e83d5887 AS |
2 | /* |
3 | * KVM Microsoft Hyper-V emulation | |
4 | * | |
5 | * derived from arch/x86/kvm/x86.c | |
6 | * | |
7 | * Copyright (C) 2006 Qumranet, Inc. | |
8 | * Copyright (C) 2008 Qumranet, Inc. | |
9 | * Copyright IBM Corporation, 2008 | |
10 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. | |
11 | * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> | |
12 | * | |
13 | * Authors: | |
14 | * Avi Kivity <avi@qumranet.com> | |
15 | * Yaniv Kamay <yaniv@qumranet.com> | |
16 | * Amit Shah <amit.shah@qumranet.com> | |
17 | * Ben-Ami Yassour <benami@il.ibm.com> | |
18 | * Andrey Smetanin <asmetanin@virtuozzo.com> | |
e83d5887 | 19 | */ |
8d20bd63 | 20 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
e83d5887 AS |
21 | |
22 | #include "x86.h" | |
23 | #include "lapic.h" | |
5c919412 | 24 | #include "ioapic.h" |
f97f5a56 | 25 | #include "cpuid.h" |
e83d5887 | 26 | #include "hyperv.h" |
aee73823 | 27 | #include "mmu.h" |
79033beb | 28 | #include "xen.h" |
e83d5887 | 29 | |
b2d8b167 | 30 | #include <linux/cpu.h> |
e83d5887 | 31 | #include <linux/kvm_host.h> |
765eaa0f | 32 | #include <linux/highmem.h> |
32ef5517 | 33 | #include <linux/sched/cputime.h> |
0823570f | 34 | #include <linux/spinlock.h> |
faeb7833 | 35 | #include <linux/eventfd.h> |
32ef5517 | 36 | |
5c919412 | 37 | #include <asm/apicdef.h> |
c58a318f | 38 | #include <asm/mshyperv.h> |
e83d5887 AS |
39 | #include <trace/events/kvm.h> |
40 | ||
41 | #include "trace.h" | |
59508b30 | 42 | #include "irq.h" |
5974565b | 43 | #include "fpu.h" |
e83d5887 | 44 | |
ca7372ac | 45 | #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, HV_VCPUS_PER_SPARSE_BANK) |
f21dd494 | 46 | |
db9cf24c VS |
47 | /* |
48 | * As per Hyper-V TLFS, extended hypercalls start from 0x8001 | |
49 | * (HvExtCallQueryCapabilities). Response of this hypercalls is a 64 bit value | |
50 | * where each bit tells which extended hypercall is available besides | |
51 | * HvExtCallQueryCapabilities. | |
52 | * | |
53 | * 0x8001 - First extended hypercall, HvExtCallQueryCapabilities, no bit | |
54 | * assigned. | |
55 | * | |
56 | * 0x8002 - Bit 0 | |
57 | * 0x8003 - Bit 1 | |
58 | * .. | |
59 | * 0x8041 - Bit 63 | |
60 | * | |
61 | * Therefore, HV_EXT_CALL_MAX = 0x8001 + 64 | |
62 | */ | |
63 | #define HV_EXT_CALL_MAX (HV_EXT_CALL_QUERY_CAPABILITIES + 64) | |
64 | ||
8644f771 VK |
65 | static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, |
66 | bool vcpu_kick); | |
67 | ||
5c919412 AS |
68 | static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint) |
69 | { | |
70 | return atomic64_read(&synic->sint[sint]); | |
71 | } | |
72 | ||
73 | static inline int synic_get_sint_vector(u64 sint_value) | |
74 | { | |
75 | if (sint_value & HV_SYNIC_SINT_MASKED) | |
76 | return -1; | |
77 | return sint_value & HV_SYNIC_SINT_VECTOR_MASK; | |
78 | } | |
79 | ||
80 | static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic, | |
81 | int vector) | |
82 | { | |
83 | int i; | |
84 | ||
85 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
86 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
87 | return true; | |
88 | } | |
89 | return false; | |
90 | } | |
91 | ||
92 | static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic, | |
93 | int vector) | |
94 | { | |
95 | int i; | |
96 | u64 sint_value; | |
97 | ||
98 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
99 | sint_value = synic_read_sint(synic, i); | |
100 | if (synic_get_sint_vector(sint_value) == vector && | |
101 | sint_value & HV_SYNIC_SINT_AUTO_EOI) | |
102 | return true; | |
103 | } | |
104 | return false; | |
105 | } | |
106 | ||
98f65ad4 VK |
107 | static void synic_update_vector(struct kvm_vcpu_hv_synic *synic, |
108 | int vector) | |
109 | { | |
0f250a64 VK |
110 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
111 | struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); | |
fe06a0c0 | 112 | bool auto_eoi_old, auto_eoi_new; |
0f250a64 | 113 | |
87a8d795 VK |
114 | if (vector < HV_SYNIC_FIRST_VALID_VECTOR) |
115 | return; | |
116 | ||
98f65ad4 VK |
117 | if (synic_has_vector_connected(synic, vector)) |
118 | __set_bit(vector, synic->vec_bitmap); | |
119 | else | |
120 | __clear_bit(vector, synic->vec_bitmap); | |
121 | ||
fe06a0c0 | 122 | auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256); |
0f250a64 | 123 | |
98f65ad4 VK |
124 | if (synic_has_vector_auto_eoi(synic, vector)) |
125 | __set_bit(vector, synic->auto_eoi_bitmap); | |
126 | else | |
127 | __clear_bit(vector, synic->auto_eoi_bitmap); | |
0f250a64 | 128 | |
fe06a0c0 | 129 | auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256); |
0f250a64 | 130 | |
fe06a0c0 | 131 | if (auto_eoi_old == auto_eoi_new) |
0f250a64 VK |
132 | return; |
133 | ||
f1575642 SC |
134 | if (!enable_apicv) |
135 | return; | |
136 | ||
187c8833 | 137 | down_write(&vcpu->kvm->arch.apicv_update_lock); |
0f250a64 VK |
138 | |
139 | if (auto_eoi_new) | |
140 | hv->synic_auto_eoi_used++; | |
141 | else | |
142 | hv->synic_auto_eoi_used--; | |
143 | ||
320af55a SC |
144 | /* |
145 | * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on | |
146 | * the hypervisor to manually inject IRQs. | |
147 | */ | |
148 | __kvm_set_or_clear_apicv_inhibit(vcpu->kvm, | |
149 | APICV_INHIBIT_REASON_HYPERV, | |
150 | !!hv->synic_auto_eoi_used); | |
0f250a64 | 151 | |
187c8833 | 152 | up_write(&vcpu->kvm->arch.apicv_update_lock); |
98f65ad4 VK |
153 | } |
154 | ||
7be58a64 AS |
155 | static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, |
156 | u64 data, bool host) | |
5c919412 | 157 | { |
98f65ad4 | 158 | int vector, old_vector; |
915e6f78 | 159 | bool masked; |
5c919412 AS |
160 | |
161 | vector = data & HV_SYNIC_SINT_VECTOR_MASK; | |
915e6f78 VK |
162 | masked = data & HV_SYNIC_SINT_MASKED; |
163 | ||
164 | /* | |
165 | * Valid vectors are 16-255, however, nested Hyper-V attempts to write | |
166 | * default '0x10000' value on boot and this should not #GP. We need to | |
167 | * allow zero-initing the register from host as well. | |
168 | */ | |
169 | if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked) | |
5c919412 AS |
170 | return 1; |
171 | /* | |
172 | * Guest may configure multiple SINTs to use the same vector, so | |
173 | * we maintain a bitmap of vectors handled by synic, and a | |
174 | * bitmap of vectors with auto-eoi behavior. The bitmaps are | |
175 | * updated here, and atomically queried on fast paths. | |
176 | */ | |
98f65ad4 | 177 | old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK; |
5c919412 AS |
178 | |
179 | atomic64_set(&synic->sint[sint], data); | |
180 | ||
98f65ad4 | 181 | synic_update_vector(synic, old_vector); |
5c919412 | 182 | |
98f65ad4 | 183 | synic_update_vector(synic, vector); |
5c919412 AS |
184 | |
185 | /* Load SynIC vectors into EOI exit bitmap */ | |
e0121fa2 | 186 | kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic)); |
5c919412 AS |
187 | return 0; |
188 | } | |
189 | ||
d3457c87 RK |
190 | static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) |
191 | { | |
192 | struct kvm_vcpu *vcpu = NULL; | |
46808a4c | 193 | unsigned long i; |
d3457c87 | 194 | |
9170200e VK |
195 | if (vpidx >= KVM_MAX_VCPUS) |
196 | return NULL; | |
197 | ||
198 | vcpu = kvm_get_vcpu(kvm, vpidx); | |
f2bc14b6 | 199 | if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx) |
d3457c87 RK |
200 | return vcpu; |
201 | kvm_for_each_vcpu(i, vcpu, kvm) | |
f2bc14b6 | 202 | if (kvm_hv_get_vpindex(vcpu) == vpidx) |
d3457c87 RK |
203 | return vcpu; |
204 | return NULL; | |
205 | } | |
206 | ||
207 | static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx) | |
5c919412 AS |
208 | { |
209 | struct kvm_vcpu *vcpu; | |
210 | struct kvm_vcpu_hv_synic *synic; | |
211 | ||
d3457c87 | 212 | vcpu = get_vcpu_by_vpidx(kvm, vpidx); |
919f4ebc | 213 | if (!vcpu || !to_hv_vcpu(vcpu)) |
5c919412 | 214 | return NULL; |
e0121fa2 | 215 | synic = to_hv_synic(vcpu); |
5c919412 AS |
216 | return (synic->active) ? synic : NULL; |
217 | } | |
218 | ||
219 | static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) | |
220 | { | |
221 | struct kvm *kvm = vcpu->kvm; | |
e0121fa2 | 222 | struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); |
ef3f3980 | 223 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1f4b34f8 | 224 | struct kvm_vcpu_hv_stimer *stimer; |
08a800ac | 225 | int gsi, idx; |
5c919412 | 226 | |
18659a9c | 227 | trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); |
5c919412 | 228 | |
1f4b34f8 | 229 | /* Try to deliver pending Hyper-V SynIC timers messages */ |
1f4b34f8 AS |
230 | for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { |
231 | stimer = &hv_vcpu->stimer[idx]; | |
6a058a1e | 232 | if (stimer->msg_pending && stimer->config.enable && |
8644f771 | 233 | !stimer->config.direct_mode && |
08a800ac VK |
234 | stimer->config.sintx == sint) |
235 | stimer_mark_pending(stimer, false); | |
1f4b34f8 | 236 | } |
1f4b34f8 | 237 | |
5c919412 | 238 | idx = srcu_read_lock(&kvm->irq_srcu); |
1f4b34f8 | 239 | gsi = atomic_read(&synic->sint_to_gsi[sint]); |
5c919412 AS |
240 | if (gsi != -1) |
241 | kvm_notify_acked_gsi(kvm, gsi); | |
242 | srcu_read_unlock(&kvm->irq_srcu, idx); | |
243 | } | |
244 | ||
db397571 AS |
245 | static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) |
246 | { | |
e0121fa2 | 247 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
9ff5e030 | 248 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
db397571 AS |
249 | |
250 | hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; | |
251 | hv_vcpu->exit.u.synic.msr = msr; | |
252 | hv_vcpu->exit.u.synic.control = synic->control; | |
253 | hv_vcpu->exit.u.synic.evt_page = synic->evt_page; | |
254 | hv_vcpu->exit.u.synic.msg_page = synic->msg_page; | |
255 | ||
256 | kvm_make_request(KVM_REQ_HV_EXIT, vcpu); | |
257 | } | |
258 | ||
5c919412 AS |
259 | static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, |
260 | u32 msr, u64 data, bool host) | |
261 | { | |
e0121fa2 | 262 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
5c919412 AS |
263 | int ret; |
264 | ||
b1e34d32 | 265 | if (!synic->active && (!host || data)) |
5c919412 AS |
266 | return 1; |
267 | ||
18659a9c AS |
268 | trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); |
269 | ||
5c919412 AS |
270 | ret = 0; |
271 | switch (msr) { | |
272 | case HV_X64_MSR_SCONTROL: | |
273 | synic->control = data; | |
db397571 AS |
274 | if (!host) |
275 | synic_exit(synic, msr); | |
5c919412 AS |
276 | break; |
277 | case HV_X64_MSR_SVERSION: | |
278 | if (!host) { | |
279 | ret = 1; | |
280 | break; | |
281 | } | |
282 | synic->version = data; | |
283 | break; | |
284 | case HV_X64_MSR_SIEFP: | |
efc479e6 RK |
285 | if ((data & HV_SYNIC_SIEFP_ENABLE) && !host && |
286 | !synic->dont_zero_synic_pages) | |
5c919412 AS |
287 | if (kvm_clear_guest(vcpu->kvm, |
288 | data & PAGE_MASK, PAGE_SIZE)) { | |
289 | ret = 1; | |
290 | break; | |
291 | } | |
292 | synic->evt_page = data; | |
db397571 AS |
293 | if (!host) |
294 | synic_exit(synic, msr); | |
5c919412 AS |
295 | break; |
296 | case HV_X64_MSR_SIMP: | |
efc479e6 RK |
297 | if ((data & HV_SYNIC_SIMP_ENABLE) && !host && |
298 | !synic->dont_zero_synic_pages) | |
5c919412 AS |
299 | if (kvm_clear_guest(vcpu->kvm, |
300 | data & PAGE_MASK, PAGE_SIZE)) { | |
301 | ret = 1; | |
302 | break; | |
303 | } | |
304 | synic->msg_page = data; | |
db397571 AS |
305 | if (!host) |
306 | synic_exit(synic, msr); | |
5c919412 AS |
307 | break; |
308 | case HV_X64_MSR_EOM: { | |
309 | int i; | |
310 | ||
b1e34d32 VK |
311 | if (!synic->active) |
312 | break; | |
313 | ||
5c919412 AS |
314 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) |
315 | kvm_hv_notify_acked_sint(vcpu, i); | |
316 | break; | |
317 | } | |
318 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
7be58a64 | 319 | ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); |
5c919412 AS |
320 | break; |
321 | default: | |
322 | ret = 1; | |
323 | break; | |
324 | } | |
325 | return ret; | |
326 | } | |
327 | ||
f97f5a56 JD |
328 | static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu) |
329 | { | |
10d7bf1e | 330 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
f97f5a56 | 331 | |
10d7bf1e VK |
332 | return hv_vcpu->cpuid_cache.syndbg_cap_eax & |
333 | HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; | |
f97f5a56 JD |
334 | } |
335 | ||
336 | static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu) | |
337 | { | |
05f04ae4 | 338 | struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); |
f97f5a56 JD |
339 | |
340 | if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) | |
341 | hv->hv_syndbg.control.status = | |
342 | vcpu->run->hyperv.u.syndbg.status; | |
343 | return 1; | |
344 | } | |
345 | ||
346 | static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr) | |
347 | { | |
f69b55ef | 348 | struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); |
9ff5e030 | 349 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
f97f5a56 JD |
350 | |
351 | hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; | |
352 | hv_vcpu->exit.u.syndbg.msr = msr; | |
353 | hv_vcpu->exit.u.syndbg.control = syndbg->control.control; | |
354 | hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; | |
355 | hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; | |
356 | hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; | |
357 | vcpu->arch.complete_userspace_io = | |
358 | kvm_hv_syndbg_complete_userspace; | |
359 | ||
360 | kvm_make_request(KVM_REQ_HV_EXIT, vcpu); | |
361 | } | |
362 | ||
363 | static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) | |
364 | { | |
f69b55ef | 365 | struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); |
f97f5a56 JD |
366 | |
367 | if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) | |
368 | return 1; | |
369 | ||
370 | trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, | |
ef3f3980 | 371 | to_hv_vcpu(vcpu)->vp_index, msr, data); |
f97f5a56 JD |
372 | switch (msr) { |
373 | case HV_X64_MSR_SYNDBG_CONTROL: | |
374 | syndbg->control.control = data; | |
375 | if (!host) | |
376 | syndbg_exit(vcpu, msr); | |
377 | break; | |
378 | case HV_X64_MSR_SYNDBG_STATUS: | |
379 | syndbg->control.status = data; | |
380 | break; | |
381 | case HV_X64_MSR_SYNDBG_SEND_BUFFER: | |
382 | syndbg->control.send_page = data; | |
383 | break; | |
384 | case HV_X64_MSR_SYNDBG_RECV_BUFFER: | |
385 | syndbg->control.recv_page = data; | |
386 | break; | |
387 | case HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
388 | syndbg->control.pending_page = data; | |
389 | if (!host) | |
390 | syndbg_exit(vcpu, msr); | |
391 | break; | |
392 | case HV_X64_MSR_SYNDBG_OPTIONS: | |
393 | syndbg->options = data; | |
394 | break; | |
395 | default: | |
396 | break; | |
397 | } | |
398 | ||
399 | return 0; | |
400 | } | |
401 | ||
402 | static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) | |
403 | { | |
f69b55ef | 404 | struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); |
f97f5a56 JD |
405 | |
406 | if (!kvm_hv_is_syndbg_enabled(vcpu) && !host) | |
407 | return 1; | |
408 | ||
409 | switch (msr) { | |
410 | case HV_X64_MSR_SYNDBG_CONTROL: | |
411 | *pdata = syndbg->control.control; | |
412 | break; | |
413 | case HV_X64_MSR_SYNDBG_STATUS: | |
414 | *pdata = syndbg->control.status; | |
415 | break; | |
416 | case HV_X64_MSR_SYNDBG_SEND_BUFFER: | |
417 | *pdata = syndbg->control.send_page; | |
418 | break; | |
419 | case HV_X64_MSR_SYNDBG_RECV_BUFFER: | |
420 | *pdata = syndbg->control.recv_page; | |
421 | break; | |
422 | case HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
423 | *pdata = syndbg->control.pending_page; | |
424 | break; | |
425 | case HV_X64_MSR_SYNDBG_OPTIONS: | |
426 | *pdata = syndbg->options; | |
427 | break; | |
428 | default: | |
429 | break; | |
430 | } | |
431 | ||
f2bc14b6 | 432 | trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata); |
f97f5a56 JD |
433 | |
434 | return 0; | |
435 | } | |
436 | ||
44883f01 PB |
437 | static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, |
438 | bool host) | |
5c919412 AS |
439 | { |
440 | int ret; | |
441 | ||
44883f01 | 442 | if (!synic->active && !host) |
5c919412 AS |
443 | return 1; |
444 | ||
445 | ret = 0; | |
446 | switch (msr) { | |
447 | case HV_X64_MSR_SCONTROL: | |
448 | *pdata = synic->control; | |
449 | break; | |
450 | case HV_X64_MSR_SVERSION: | |
451 | *pdata = synic->version; | |
452 | break; | |
453 | case HV_X64_MSR_SIEFP: | |
454 | *pdata = synic->evt_page; | |
455 | break; | |
456 | case HV_X64_MSR_SIMP: | |
457 | *pdata = synic->msg_page; | |
458 | break; | |
459 | case HV_X64_MSR_EOM: | |
460 | *pdata = 0; | |
461 | break; | |
462 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
463 | *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); | |
464 | break; | |
465 | default: | |
466 | ret = 1; | |
467 | break; | |
468 | } | |
469 | return ret; | |
470 | } | |
471 | ||
ecd8a8c2 | 472 | static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) |
5c919412 | 473 | { |
e0121fa2 | 474 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
5c919412 AS |
475 | struct kvm_lapic_irq irq; |
476 | int ret, vector; | |
477 | ||
7ec37d1c VK |
478 | if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm)) |
479 | return -EINVAL; | |
480 | ||
5c919412 AS |
481 | if (sint >= ARRAY_SIZE(synic->sint)) |
482 | return -EINVAL; | |
483 | ||
484 | vector = synic_get_sint_vector(synic_read_sint(synic, sint)); | |
485 | if (vector < 0) | |
486 | return -ENOENT; | |
487 | ||
488 | memset(&irq, 0, sizeof(irq)); | |
f98a3efb | 489 | irq.shorthand = APIC_DEST_SELF; |
5c919412 AS |
490 | irq.dest_mode = APIC_DEST_PHYSICAL; |
491 | irq.delivery_mode = APIC_DM_FIXED; | |
492 | irq.vector = vector; | |
493 | irq.level = 1; | |
494 | ||
f98a3efb | 495 | ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL); |
18659a9c | 496 | trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); |
5c919412 AS |
497 | return ret; |
498 | } | |
499 | ||
d3457c87 | 500 | int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint) |
5c919412 AS |
501 | { |
502 | struct kvm_vcpu_hv_synic *synic; | |
503 | ||
d3457c87 | 504 | synic = synic_get(kvm, vpidx); |
5c919412 AS |
505 | if (!synic) |
506 | return -EINVAL; | |
507 | ||
508 | return synic_set_irq(synic, sint); | |
509 | } | |
510 | ||
511 | void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) | |
512 | { | |
e0121fa2 | 513 | struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); |
5c919412 AS |
514 | int i; |
515 | ||
18659a9c | 516 | trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); |
5c919412 AS |
517 | |
518 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) | |
519 | if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector) | |
520 | kvm_hv_notify_acked_sint(vcpu, i); | |
521 | } | |
522 | ||
d3457c87 | 523 | static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi) |
5c919412 AS |
524 | { |
525 | struct kvm_vcpu_hv_synic *synic; | |
526 | ||
d3457c87 | 527 | synic = synic_get(kvm, vpidx); |
5c919412 AS |
528 | if (!synic) |
529 | return -EINVAL; | |
530 | ||
531 | if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) | |
532 | return -EINVAL; | |
533 | ||
534 | atomic_set(&synic->sint_to_gsi[sint], gsi); | |
535 | return 0; | |
536 | } | |
537 | ||
538 | void kvm_hv_irq_routing_update(struct kvm *kvm) | |
539 | { | |
540 | struct kvm_irq_routing_table *irq_rt; | |
541 | struct kvm_kernel_irq_routing_entry *e; | |
542 | u32 gsi; | |
543 | ||
544 | irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, | |
545 | lockdep_is_held(&kvm->irq_lock)); | |
546 | ||
547 | for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { | |
548 | hlist_for_each_entry(e, &irq_rt->map[gsi], link) { | |
549 | if (e->type == KVM_IRQ_ROUTING_HV_SINT) | |
550 | kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, | |
551 | e->hv_sint.sint, gsi); | |
552 | } | |
553 | } | |
554 | } | |
555 | ||
556 | static void synic_init(struct kvm_vcpu_hv_synic *synic) | |
557 | { | |
558 | int i; | |
559 | ||
560 | memset(synic, 0, sizeof(*synic)); | |
561 | synic->version = HV_SYNIC_VERSION_1; | |
562 | for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { | |
563 | atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); | |
564 | atomic_set(&synic->sint_to_gsi[i], -1); | |
565 | } | |
566 | } | |
567 | ||
93bf4172 AS |
568 | static u64 get_time_ref_counter(struct kvm *kvm) |
569 | { | |
05f04ae4 | 570 | struct kvm_hv *hv = to_kvm_hv(kvm); |
095cf55d PB |
571 | struct kvm_vcpu *vcpu; |
572 | u64 tsc; | |
573 | ||
574 | /* | |
cc9cfddb VK |
575 | * Fall back to get_kvmclock_ns() when TSC page hasn't been set up, |
576 | * is broken, disabled or being updated. | |
095cf55d | 577 | */ |
cc9cfddb | 578 | if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET) |
095cf55d PB |
579 | return div_u64(get_kvmclock_ns(kvm), 100); |
580 | ||
581 | vcpu = kvm_get_vcpu(kvm, 0); | |
582 | tsc = kvm_read_l1_tsc(vcpu, rdtsc()); | |
583 | return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) | |
584 | + hv->tsc_ref.tsc_offset; | |
93bf4172 AS |
585 | } |
586 | ||
f3b138c5 | 587 | static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer, |
1f4b34f8 AS |
588 | bool vcpu_kick) |
589 | { | |
aafa97fd | 590 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
1f4b34f8 AS |
591 | |
592 | set_bit(stimer->index, | |
ef3f3980 | 593 | to_hv_vcpu(vcpu)->stimer_pending_bitmap); |
1f4b34f8 AS |
594 | kvm_make_request(KVM_REQ_HV_STIMER, vcpu); |
595 | if (vcpu_kick) | |
596 | kvm_vcpu_kick(vcpu); | |
597 | } | |
598 | ||
1f4b34f8 AS |
599 | static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer) |
600 | { | |
aafa97fd | 601 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
1f4b34f8 | 602 | |
aafa97fd | 603 | trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id, |
ac3e5fca AS |
604 | stimer->index); |
605 | ||
019b9781 | 606 | hrtimer_cancel(&stimer->timer); |
1f4b34f8 | 607 | clear_bit(stimer->index, |
ef3f3980 | 608 | to_hv_vcpu(vcpu)->stimer_pending_bitmap); |
1f4b34f8 | 609 | stimer->msg_pending = false; |
f808495d | 610 | stimer->exp_time = 0; |
1f4b34f8 AS |
611 | } |
612 | ||
613 | static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer) | |
614 | { | |
615 | struct kvm_vcpu_hv_stimer *stimer; | |
616 | ||
617 | stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer); | |
aafa97fd | 618 | trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id, |
ac3e5fca | 619 | stimer->index); |
f3b138c5 | 620 | stimer_mark_pending(stimer, true); |
1f4b34f8 AS |
621 | |
622 | return HRTIMER_NORESTART; | |
623 | } | |
624 | ||
f808495d AS |
625 | /* |
626 | * stimer_start() assumptions: | |
627 | * a) stimer->count is not equal to 0 | |
628 | * b) stimer->config has HV_STIMER_ENABLE flag | |
629 | */ | |
1f4b34f8 AS |
630 | static int stimer_start(struct kvm_vcpu_hv_stimer *stimer) |
631 | { | |
632 | u64 time_now; | |
633 | ktime_t ktime_now; | |
634 | ||
aafa97fd | 635 | time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm); |
1f4b34f8 AS |
636 | ktime_now = ktime_get(); |
637 | ||
6a058a1e | 638 | if (stimer->config.periodic) { |
f808495d AS |
639 | if (stimer->exp_time) { |
640 | if (time_now >= stimer->exp_time) { | |
641 | u64 remainder; | |
642 | ||
643 | div64_u64_rem(time_now - stimer->exp_time, | |
644 | stimer->count, &remainder); | |
645 | stimer->exp_time = | |
646 | time_now + (stimer->count - remainder); | |
647 | } | |
648 | } else | |
649 | stimer->exp_time = time_now + stimer->count; | |
1f4b34f8 | 650 | |
ac3e5fca | 651 | trace_kvm_hv_stimer_start_periodic( |
aafa97fd | 652 | hv_stimer_to_vcpu(stimer)->vcpu_id, |
ac3e5fca AS |
653 | stimer->index, |
654 | time_now, stimer->exp_time); | |
655 | ||
1f4b34f8 | 656 | hrtimer_start(&stimer->timer, |
f808495d AS |
657 | ktime_add_ns(ktime_now, |
658 | 100 * (stimer->exp_time - time_now)), | |
1f4b34f8 AS |
659 | HRTIMER_MODE_ABS); |
660 | return 0; | |
661 | } | |
662 | stimer->exp_time = stimer->count; | |
663 | if (time_now >= stimer->count) { | |
664 | /* | |
665 | * Expire timer according to Hypervisor Top-Level Functional | |
666 | * specification v4(15.3.1): | |
667 | * "If a one shot is enabled and the specified count is in | |
668 | * the past, it will expire immediately." | |
669 | */ | |
f3b138c5 | 670 | stimer_mark_pending(stimer, false); |
1f4b34f8 AS |
671 | return 0; |
672 | } | |
673 | ||
aafa97fd | 674 | trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id, |
ac3e5fca AS |
675 | stimer->index, |
676 | time_now, stimer->count); | |
677 | ||
1f4b34f8 AS |
678 | hrtimer_start(&stimer->timer, |
679 | ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), | |
680 | HRTIMER_MODE_ABS); | |
681 | return 0; | |
682 | } | |
683 | ||
684 | static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, | |
685 | bool host) | |
686 | { | |
8644f771 VK |
687 | union hv_stimer_config new_config = {.as_uint64 = config}, |
688 | old_config = {.as_uint64 = stimer->config.as_uint64}; | |
aafa97fd | 689 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
1aa8a418 | 690 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
e0121fa2 | 691 | struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); |
dbcf3f96 | 692 | |
b1e34d32 | 693 | if (!synic->active && (!host || config)) |
dbcf3f96 | 694 | return 1; |
6a058a1e | 695 | |
1aa8a418 VK |
696 | if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && |
697 | !(hv_vcpu->cpuid_cache.features_edx & | |
698 | HV_STIMER_DIRECT_MODE_AVAILABLE))) | |
699 | return 1; | |
700 | ||
aafa97fd | 701 | trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id, |
ac3e5fca AS |
702 | stimer->index, config, host); |
703 | ||
f3b138c5 | 704 | stimer_cleanup(stimer); |
8644f771 VK |
705 | if (old_config.enable && |
706 | !new_config.direct_mode && new_config.sintx == 0) | |
6a058a1e VK |
707 | new_config.enable = 0; |
708 | stimer->config.as_uint64 = new_config.as_uint64; | |
8644f771 | 709 | |
013cc6eb VK |
710 | if (stimer->config.enable) |
711 | stimer_mark_pending(stimer, false); | |
712 | ||
1f4b34f8 AS |
713 | return 0; |
714 | } | |
715 | ||
716 | static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, | |
717 | bool host) | |
718 | { | |
aafa97fd | 719 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
e0121fa2 | 720 | struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu); |
dbcf3f96 | 721 | |
b1e34d32 | 722 | if (!synic->active && (!host || count)) |
dbcf3f96 VK |
723 | return 1; |
724 | ||
aafa97fd | 725 | trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id, |
ac3e5fca AS |
726 | stimer->index, count, host); |
727 | ||
1f4b34f8 | 728 | stimer_cleanup(stimer); |
f3b138c5 | 729 | stimer->count = count; |
1f4b34f8 | 730 | if (stimer->count == 0) |
6a058a1e VK |
731 | stimer->config.enable = 0; |
732 | else if (stimer->config.auto_enable) | |
733 | stimer->config.enable = 1; | |
013cc6eb VK |
734 | |
735 | if (stimer->config.enable) | |
736 | stimer_mark_pending(stimer, false); | |
737 | ||
1f4b34f8 AS |
738 | return 0; |
739 | } | |
740 | ||
741 | static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig) | |
742 | { | |
6a058a1e | 743 | *pconfig = stimer->config.as_uint64; |
1f4b34f8 AS |
744 | return 0; |
745 | } | |
746 | ||
747 | static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) | |
748 | { | |
749 | *pcount = stimer->count; | |
750 | return 0; | |
751 | } | |
752 | ||
753 | static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, | |
7deec5e0 | 754 | struct hv_message *src_msg, bool no_retry) |
1f4b34f8 | 755 | { |
e0121fa2 | 756 | struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic); |
3a0e7731 RK |
757 | int msg_off = offsetof(struct hv_message_page, sint_message[sint]); |
758 | gfn_t msg_page_gfn; | |
759 | struct hv_message_header hv_hdr; | |
1f4b34f8 | 760 | int r; |
1f4b34f8 AS |
761 | |
762 | if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) | |
763 | return -ENOENT; | |
764 | ||
3a0e7731 | 765 | msg_page_gfn = synic->msg_page >> PAGE_SHIFT; |
1f4b34f8 | 766 | |
3a0e7731 RK |
767 | /* |
768 | * Strictly following the spec-mandated ordering would assume setting | |
769 | * .msg_pending before checking .message_type. However, this function | |
770 | * is only called in vcpu context so the entire update is atomic from | |
771 | * guest POV and thus the exact order here doesn't matter. | |
772 | */ | |
773 | r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type, | |
774 | msg_off + offsetof(struct hv_message, | |
775 | header.message_type), | |
776 | sizeof(hv_hdr.message_type)); | |
777 | if (r < 0) | |
778 | return r; | |
779 | ||
780 | if (hv_hdr.message_type != HVMSG_NONE) { | |
7deec5e0 RK |
781 | if (no_retry) |
782 | return 0; | |
783 | ||
3a0e7731 RK |
784 | hv_hdr.message_flags.msg_pending = 1; |
785 | r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, | |
786 | &hv_hdr.message_flags, | |
787 | msg_off + | |
788 | offsetof(struct hv_message, | |
789 | header.message_flags), | |
790 | sizeof(hv_hdr.message_flags)); | |
791 | if (r < 0) | |
792 | return r; | |
793 | return -EAGAIN; | |
1f4b34f8 | 794 | } |
3a0e7731 RK |
795 | |
796 | r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off, | |
797 | sizeof(src_msg->header) + | |
798 | src_msg->header.payload_size); | |
799 | if (r < 0) | |
800 | return r; | |
801 | ||
802 | r = synic_set_irq(synic, sint); | |
803 | if (r < 0) | |
804 | return r; | |
805 | if (r == 0) | |
806 | return -EFAULT; | |
807 | return 0; | |
1f4b34f8 AS |
808 | } |
809 | ||
0cdeabb1 | 810 | static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) |
1f4b34f8 | 811 | { |
aafa97fd | 812 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
1f4b34f8 AS |
813 | struct hv_message *msg = &stimer->msg; |
814 | struct hv_timer_message_payload *payload = | |
815 | (struct hv_timer_message_payload *)&msg->u.payload; | |
1f4b34f8 | 816 | |
7deec5e0 RK |
817 | /* |
818 | * To avoid piling up periodic ticks, don't retry message | |
819 | * delivery for them (within "lazy" lost ticks policy). | |
820 | */ | |
6a058a1e | 821 | bool no_retry = stimer->config.periodic; |
7deec5e0 | 822 | |
1f4b34f8 AS |
823 | payload->expiration_time = stimer->exp_time; |
824 | payload->delivery_time = get_time_ref_counter(vcpu->kvm); | |
e0121fa2 | 825 | return synic_deliver_msg(to_hv_synic(vcpu), |
6a058a1e | 826 | stimer->config.sintx, msg, |
7deec5e0 | 827 | no_retry); |
1f4b34f8 AS |
828 | } |
829 | ||
8644f771 VK |
830 | static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer) |
831 | { | |
aafa97fd | 832 | struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer); |
8644f771 VK |
833 | struct kvm_lapic_irq irq = { |
834 | .delivery_mode = APIC_DM_FIXED, | |
835 | .vector = stimer->config.apic_vector | |
836 | }; | |
837 | ||
a073d7e3 WL |
838 | if (lapic_in_kernel(vcpu)) |
839 | return !kvm_apic_set_irq(vcpu, &irq, NULL); | |
840 | return 0; | |
8644f771 VK |
841 | } |
842 | ||
1f4b34f8 AS |
843 | static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer) |
844 | { | |
8644f771 | 845 | int r, direct = stimer->config.direct_mode; |
ac3e5fca | 846 | |
0cdeabb1 | 847 | stimer->msg_pending = true; |
8644f771 VK |
848 | if (!direct) |
849 | r = stimer_send_msg(stimer); | |
850 | else | |
851 | r = stimer_notify_direct(stimer); | |
aafa97fd | 852 | trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id, |
8644f771 | 853 | stimer->index, direct, r); |
ac3e5fca | 854 | if (!r) { |
0cdeabb1 | 855 | stimer->msg_pending = false; |
6a058a1e VK |
856 | if (!(stimer->config.periodic)) |
857 | stimer->config.enable = 0; | |
0cdeabb1 | 858 | } |
1f4b34f8 AS |
859 | } |
860 | ||
861 | void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) | |
862 | { | |
ef3f3980 | 863 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1f4b34f8 | 864 | struct kvm_vcpu_hv_stimer *stimer; |
f3b138c5 | 865 | u64 time_now, exp_time; |
1f4b34f8 AS |
866 | int i; |
867 | ||
f2bc14b6 VK |
868 | if (!hv_vcpu) |
869 | return; | |
870 | ||
1f4b34f8 AS |
871 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) |
872 | if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { | |
873 | stimer = &hv_vcpu->stimer[i]; | |
6a058a1e | 874 | if (stimer->config.enable) { |
f3b138c5 AS |
875 | exp_time = stimer->exp_time; |
876 | ||
877 | if (exp_time) { | |
878 | time_now = | |
879 | get_time_ref_counter(vcpu->kvm); | |
880 | if (time_now >= exp_time) | |
881 | stimer_expiration(stimer); | |
882 | } | |
0cdeabb1 | 883 | |
6a058a1e | 884 | if ((stimer->config.enable) && |
f1ff89ec RK |
885 | stimer->count) { |
886 | if (!stimer->msg_pending) | |
887 | stimer_start(stimer); | |
888 | } else | |
0cdeabb1 | 889 | stimer_cleanup(stimer); |
1f4b34f8 AS |
890 | } |
891 | } | |
892 | } | |
893 | ||
894 | void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu) | |
895 | { | |
ef3f3980 | 896 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1f4b34f8 AS |
897 | int i; |
898 | ||
fc08b628 VK |
899 | if (!hv_vcpu) |
900 | return; | |
901 | ||
1f4b34f8 AS |
902 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) |
903 | stimer_cleanup(&hv_vcpu->stimer[i]); | |
4592b7ea VK |
904 | |
905 | kfree(hv_vcpu); | |
906 | vcpu->arch.hyperv = NULL; | |
1f4b34f8 AS |
907 | } |
908 | ||
72bbf935 LP |
909 | bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) |
910 | { | |
9ff5e030 VK |
911 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
912 | ||
f2bc14b6 VK |
913 | if (!hv_vcpu) |
914 | return false; | |
915 | ||
9ff5e030 | 916 | if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) |
72bbf935 LP |
917 | return false; |
918 | return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; | |
919 | } | |
920 | EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled); | |
921 | ||
b415d8d4 | 922 | int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu) |
72bbf935 | 923 | { |
046f5756 VK |
924 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
925 | ||
926 | if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu)) | |
b415d8d4 | 927 | return -EFAULT; |
046f5756 | 928 | |
b415d8d4 VK |
929 | return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, |
930 | &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page)); | |
72bbf935 LP |
931 | } |
932 | EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page); | |
933 | ||
1f4b34f8 AS |
934 | static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer) |
935 | { | |
936 | struct hv_message *msg = &stimer->msg; | |
937 | struct hv_timer_message_payload *payload = | |
938 | (struct hv_timer_message_payload *)&msg->u.payload; | |
939 | ||
940 | memset(&msg->header, 0, sizeof(msg->header)); | |
941 | msg->header.message_type = HVMSG_TIMER_EXPIRED; | |
942 | msg->header.payload_size = sizeof(*payload); | |
943 | ||
944 | payload->timer_index = stimer->index; | |
945 | payload->expiration_time = 0; | |
946 | payload->delivery_time = 0; | |
947 | } | |
948 | ||
949 | static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index) | |
950 | { | |
951 | memset(stimer, 0, sizeof(*stimer)); | |
952 | stimer->index = timer_index; | |
953 | hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | |
954 | stimer->timer.function = stimer_timer_callback; | |
955 | stimer_prepare_msg(stimer); | |
956 | } | |
957 | ||
3be29eb7 | 958 | int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu) |
5c919412 | 959 | { |
1cac8d9f | 960 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1f4b34f8 AS |
961 | int i; |
962 | ||
1cac8d9f SC |
963 | if (hv_vcpu) |
964 | return 0; | |
965 | ||
4592b7ea VK |
966 | hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT); |
967 | if (!hv_vcpu) | |
968 | return -ENOMEM; | |
969 | ||
970 | vcpu->arch.hyperv = hv_vcpu; | |
971 | hv_vcpu->vcpu = vcpu; | |
972 | ||
1f4b34f8 AS |
973 | synic_init(&hv_vcpu->synic); |
974 | ||
975 | bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); | |
976 | for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) | |
977 | stimer_init(&hv_vcpu->stimer[i], i); | |
4592b7ea | 978 | |
4eeef242 | 979 | hv_vcpu->vp_index = vcpu->vcpu_idx; |
fc08b628 | 980 | |
53ca765a VK |
981 | for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) { |
982 | INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries); | |
983 | spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock); | |
984 | } | |
0823570f | 985 | |
4592b7ea | 986 | return 0; |
5c919412 AS |
987 | } |
988 | ||
fc08b628 | 989 | int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages) |
d3457c87 | 990 | { |
fc08b628 VK |
991 | struct kvm_vcpu_hv_synic *synic; |
992 | int r; | |
d3457c87 | 993 | |
1cac8d9f SC |
994 | r = kvm_hv_vcpu_init(vcpu); |
995 | if (r) | |
996 | return r; | |
d3457c87 | 997 | |
fc08b628 | 998 | synic = to_hv_synic(vcpu); |
efc479e6 | 999 | |
efc479e6 RK |
1000 | synic->active = true; |
1001 | synic->dont_zero_synic_pages = dont_zero_synic_pages; | |
99b48ecc | 1002 | synic->control = HV_SYNIC_CONTROL_ENABLE; |
5c919412 AS |
1003 | return 0; |
1004 | } | |
1005 | ||
e83d5887 AS |
1006 | static bool kvm_hv_msr_partition_wide(u32 msr) |
1007 | { | |
1008 | bool r = false; | |
1009 | ||
1010 | switch (msr) { | |
1011 | case HV_X64_MSR_GUEST_OS_ID: | |
1012 | case HV_X64_MSR_HYPERCALL: | |
1013 | case HV_X64_MSR_REFERENCE_TSC: | |
1014 | case HV_X64_MSR_TIME_REF_COUNT: | |
e7d9513b AS |
1015 | case HV_X64_MSR_CRASH_CTL: |
1016 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: | |
e516cebb | 1017 | case HV_X64_MSR_RESET: |
a2e164e7 VK |
1018 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1019 | case HV_X64_MSR_TSC_EMULATION_CONTROL: | |
1020 | case HV_X64_MSR_TSC_EMULATION_STATUS: | |
2be1bd3a | 1021 | case HV_X64_MSR_TSC_INVARIANT_CONTROL: |
f97f5a56 JD |
1022 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1023 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
e83d5887 AS |
1024 | r = true; |
1025 | break; | |
1026 | } | |
1027 | ||
1028 | return r; | |
1029 | } | |
1030 | ||
05f04ae4 | 1031 | static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata) |
e7d9513b | 1032 | { |
05f04ae4 | 1033 | struct kvm_hv *hv = to_kvm_hv(kvm); |
86187937 | 1034 | size_t size = ARRAY_SIZE(hv->hv_crash_param); |
e7d9513b | 1035 | |
86187937 | 1036 | if (WARN_ON_ONCE(index >= size)) |
e7d9513b AS |
1037 | return -EINVAL; |
1038 | ||
86187937 | 1039 | *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; |
e7d9513b AS |
1040 | return 0; |
1041 | } | |
1042 | ||
05f04ae4 | 1043 | static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata) |
e7d9513b | 1044 | { |
05f04ae4 | 1045 | struct kvm_hv *hv = to_kvm_hv(kvm); |
e7d9513b AS |
1046 | |
1047 | *pdata = hv->hv_crash_ctl; | |
1048 | return 0; | |
1049 | } | |
1050 | ||
05f04ae4 | 1051 | static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data) |
e7d9513b | 1052 | { |
05f04ae4 | 1053 | struct kvm_hv *hv = to_kvm_hv(kvm); |
e7d9513b | 1054 | |
05f04ae4 | 1055 | hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; |
e7d9513b AS |
1056 | |
1057 | return 0; | |
1058 | } | |
1059 | ||
05f04ae4 | 1060 | static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data) |
e7d9513b | 1061 | { |
05f04ae4 | 1062 | struct kvm_hv *hv = to_kvm_hv(kvm); |
86187937 | 1063 | size_t size = ARRAY_SIZE(hv->hv_crash_param); |
e7d9513b | 1064 | |
86187937 | 1065 | if (WARN_ON_ONCE(index >= size)) |
e7d9513b AS |
1066 | return -EINVAL; |
1067 | ||
86187937 | 1068 | hv->hv_crash_param[array_index_nospec(index, size)] = data; |
e7d9513b AS |
1069 | return 0; |
1070 | } | |
1071 | ||
095cf55d PB |
1072 | /* |
1073 | * The kvmclock and Hyper-V TSC page use similar formulas, and converting | |
1074 | * between them is possible: | |
1075 | * | |
1076 | * kvmclock formula: | |
1077 | * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32) | |
1078 | * + system_time | |
1079 | * | |
1080 | * Hyper-V formula: | |
1081 | * nsec/100 = ticks * scale / 2^64 + offset | |
1082 | * | |
1083 | * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula. | |
1084 | * By dividing the kvmclock formula by 100 and equating what's left we get: | |
1085 | * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
1086 | * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
1087 | * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100 | |
1088 | * | |
1089 | * Now expand the kvmclock formula and divide by 100: | |
1090 | * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32) | |
1091 | * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) | |
1092 | * + system_time | |
1093 | * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
1094 | * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100 | |
1095 | * + system_time / 100 | |
1096 | * | |
1097 | * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64: | |
1098 | * nsec/100 = ticks * scale / 2^64 | |
1099 | * - tsc_timestamp * scale / 2^64 | |
1100 | * + system_time / 100 | |
1101 | * | |
1102 | * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out: | |
1103 | * offset = system_time / 100 - tsc_timestamp * scale / 2^64 | |
1104 | * | |
1105 | * These two equivalencies are implemented in this function. | |
1106 | */ | |
1107 | static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock, | |
7357b1df | 1108 | struct ms_hyperv_tsc_page *tsc_ref) |
095cf55d PB |
1109 | { |
1110 | u64 max_mul; | |
1111 | ||
1112 | if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) | |
1113 | return false; | |
1114 | ||
1115 | /* | |
1116 | * check if scale would overflow, if so we use the time ref counter | |
1117 | * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64 | |
1118 | * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) | |
1119 | * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) | |
1120 | */ | |
1121 | max_mul = 100ull << (32 - hv_clock->tsc_shift); | |
1122 | if (hv_clock->tsc_to_system_mul >= max_mul) | |
1123 | return false; | |
1124 | ||
1125 | /* | |
1126 | * Otherwise compute the scale and offset according to the formulas | |
1127 | * derived above. | |
1128 | */ | |
1129 | tsc_ref->tsc_scale = | |
1130 | mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift), | |
1131 | hv_clock->tsc_to_system_mul, | |
1132 | 100); | |
1133 | ||
1134 | tsc_ref->tsc_offset = hv_clock->system_time; | |
1135 | do_div(tsc_ref->tsc_offset, 100); | |
1136 | tsc_ref->tsc_offset -= | |
1137 | mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64); | |
1138 | return true; | |
1139 | } | |
1140 | ||
0469f2f7 VK |
1141 | /* |
1142 | * Don't touch TSC page values if the guest has opted for TSC emulation after | |
1143 | * migration. KVM doesn't fully support reenlightenment notifications and TSC | |
1144 | * access emulation and Hyper-V is known to expect the values in TSC page to | |
1145 | * stay constant before TSC access emulation is disabled from guest side | |
1146 | * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC | |
1147 | * frequency and guest visible TSC value across migration (and prevent it when | |
1148 | * TSC scaling is unsupported). | |
1149 | */ | |
1150 | static inline bool tsc_page_update_unsafe(struct kvm_hv *hv) | |
1151 | { | |
1152 | return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) && | |
1153 | hv->hv_tsc_emulation_control; | |
1154 | } | |
1155 | ||
095cf55d PB |
1156 | void kvm_hv_setup_tsc_page(struct kvm *kvm, |
1157 | struct pvclock_vcpu_time_info *hv_clock) | |
1158 | { | |
05f04ae4 | 1159 | struct kvm_hv *hv = to_kvm_hv(kvm); |
095cf55d PB |
1160 | u32 tsc_seq; |
1161 | u64 gfn; | |
1162 | ||
1163 | BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); | |
7357b1df | 1164 | BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0); |
095cf55d | 1165 | |
42dcbe7d VK |
1166 | mutex_lock(&hv->hv_lock); |
1167 | ||
cc9cfddb | 1168 | if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || |
42dcbe7d | 1169 | hv->hv_tsc_page_status == HV_TSC_PAGE_SET || |
cc9cfddb | 1170 | hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET) |
42dcbe7d | 1171 | goto out_unlock; |
095cf55d | 1172 | |
3f5ad8be PB |
1173 | if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) |
1174 | goto out_unlock; | |
1175 | ||
095cf55d PB |
1176 | gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; |
1177 | /* | |
1178 | * Because the TSC parameters only vary when there is a | |
1179 | * change in the master clock, do not bother with caching. | |
1180 | */ | |
1181 | if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn), | |
1182 | &tsc_seq, sizeof(tsc_seq)))) | |
cc9cfddb | 1183 | goto out_err; |
095cf55d | 1184 | |
0469f2f7 VK |
1185 | if (tsc_seq && tsc_page_update_unsafe(hv)) { |
1186 | if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) | |
1187 | goto out_err; | |
1188 | ||
1189 | hv->hv_tsc_page_status = HV_TSC_PAGE_SET; | |
1190 | goto out_unlock; | |
1191 | } | |
1192 | ||
095cf55d PB |
1193 | /* |
1194 | * While we're computing and writing the parameters, force the | |
1195 | * guest to use the time reference count MSR. | |
1196 | */ | |
1197 | hv->tsc_ref.tsc_sequence = 0; | |
1198 | if (kvm_write_guest(kvm, gfn_to_gpa(gfn), | |
1199 | &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) | |
cc9cfddb | 1200 | goto out_err; |
095cf55d PB |
1201 | |
1202 | if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) | |
cc9cfddb | 1203 | goto out_err; |
095cf55d PB |
1204 | |
1205 | /* Ensure sequence is zero before writing the rest of the struct. */ | |
1206 | smp_wmb(); | |
1207 | if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) | |
cc9cfddb | 1208 | goto out_err; |
095cf55d PB |
1209 | |
1210 | /* | |
1211 | * Now switch to the TSC page mechanism by writing the sequence. | |
1212 | */ | |
1213 | tsc_seq++; | |
1214 | if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0) | |
1215 | tsc_seq = 1; | |
1216 | ||
1217 | /* Write the struct entirely before the non-zero sequence. */ | |
1218 | smp_wmb(); | |
1219 | ||
1220 | hv->tsc_ref.tsc_sequence = tsc_seq; | |
cc9cfddb VK |
1221 | if (kvm_write_guest(kvm, gfn_to_gpa(gfn), |
1222 | &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) | |
1223 | goto out_err; | |
1224 | ||
1225 | hv->hv_tsc_page_status = HV_TSC_PAGE_SET; | |
1226 | goto out_unlock; | |
1227 | ||
1228 | out_err: | |
1229 | hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; | |
3f5ad8be | 1230 | out_unlock: |
05f04ae4 | 1231 | mutex_unlock(&hv->hv_lock); |
095cf55d PB |
1232 | } |
1233 | ||
42dcbe7d | 1234 | void kvm_hv_request_tsc_page_update(struct kvm *kvm) |
e880c6ea VK |
1235 | { |
1236 | struct kvm_hv *hv = to_kvm_hv(kvm); | |
e880c6ea VK |
1237 | |
1238 | mutex_lock(&hv->hv_lock); | |
1239 | ||
42dcbe7d VK |
1240 | if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET && |
1241 | !tsc_page_update_unsafe(hv)) | |
1242 | hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; | |
cc9cfddb | 1243 | |
e880c6ea VK |
1244 | mutex_unlock(&hv->hv_lock); |
1245 | } | |
1246 | ||
b4128000 VK |
1247 | static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr) |
1248 | { | |
1561c2cb VK |
1249 | if (!hv_vcpu->enforce_cpuid) |
1250 | return true; | |
1251 | ||
1252 | switch (msr) { | |
1253 | case HV_X64_MSR_GUEST_OS_ID: | |
1254 | case HV_X64_MSR_HYPERCALL: | |
1255 | return hv_vcpu->cpuid_cache.features_eax & | |
1256 | HV_MSR_HYPERCALL_AVAILABLE; | |
b80a92ff VK |
1257 | case HV_X64_MSR_VP_RUNTIME: |
1258 | return hv_vcpu->cpuid_cache.features_eax & | |
1259 | HV_MSR_VP_RUNTIME_AVAILABLE; | |
c2b32867 VK |
1260 | case HV_X64_MSR_TIME_REF_COUNT: |
1261 | return hv_vcpu->cpuid_cache.features_eax & | |
1262 | HV_MSR_TIME_REF_COUNT_AVAILABLE; | |
d2ac25d4 VK |
1263 | case HV_X64_MSR_VP_INDEX: |
1264 | return hv_vcpu->cpuid_cache.features_eax & | |
1265 | HV_MSR_VP_INDEX_AVAILABLE; | |
679008e4 VK |
1266 | case HV_X64_MSR_RESET: |
1267 | return hv_vcpu->cpuid_cache.features_eax & | |
1268 | HV_MSR_RESET_AVAILABLE; | |
a1ec661c VK |
1269 | case HV_X64_MSR_REFERENCE_TSC: |
1270 | return hv_vcpu->cpuid_cache.features_eax & | |
1271 | HV_MSR_REFERENCE_TSC_AVAILABLE; | |
9e2715ca VK |
1272 | case HV_X64_MSR_SCONTROL: |
1273 | case HV_X64_MSR_SVERSION: | |
1274 | case HV_X64_MSR_SIEFP: | |
1275 | case HV_X64_MSR_SIMP: | |
1276 | case HV_X64_MSR_EOM: | |
1277 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
1278 | return hv_vcpu->cpuid_cache.features_eax & | |
1279 | HV_MSR_SYNIC_AVAILABLE; | |
eba60dda VK |
1280 | case HV_X64_MSR_STIMER0_CONFIG: |
1281 | case HV_X64_MSR_STIMER1_CONFIG: | |
1282 | case HV_X64_MSR_STIMER2_CONFIG: | |
1283 | case HV_X64_MSR_STIMER3_CONFIG: | |
1284 | case HV_X64_MSR_STIMER0_COUNT: | |
1285 | case HV_X64_MSR_STIMER1_COUNT: | |
1286 | case HV_X64_MSR_STIMER2_COUNT: | |
1287 | case HV_X64_MSR_STIMER3_COUNT: | |
1288 | return hv_vcpu->cpuid_cache.features_eax & | |
1289 | HV_MSR_SYNTIMER_AVAILABLE; | |
978b5747 VK |
1290 | case HV_X64_MSR_EOI: |
1291 | case HV_X64_MSR_ICR: | |
1292 | case HV_X64_MSR_TPR: | |
1293 | case HV_X64_MSR_VP_ASSIST_PAGE: | |
1294 | return hv_vcpu->cpuid_cache.features_eax & | |
1295 | HV_MSR_APIC_ACCESS_AVAILABLE; | |
1296 | break; | |
9442f3bd VK |
1297 | case HV_X64_MSR_TSC_FREQUENCY: |
1298 | case HV_X64_MSR_APIC_FREQUENCY: | |
1299 | return hv_vcpu->cpuid_cache.features_eax & | |
1300 | HV_ACCESS_FREQUENCY_MSRS; | |
234d01ba VK |
1301 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1302 | case HV_X64_MSR_TSC_EMULATION_CONTROL: | |
1303 | case HV_X64_MSR_TSC_EMULATION_STATUS: | |
1304 | return hv_vcpu->cpuid_cache.features_eax & | |
1305 | HV_ACCESS_REENLIGHTENMENT; | |
2be1bd3a VK |
1306 | case HV_X64_MSR_TSC_INVARIANT_CONTROL: |
1307 | return hv_vcpu->cpuid_cache.features_eax & | |
1308 | HV_ACCESS_TSC_INVARIANT; | |
0a19c899 VK |
1309 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
1310 | case HV_X64_MSR_CRASH_CTL: | |
1311 | return hv_vcpu->cpuid_cache.features_edx & | |
1312 | HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; | |
17b6d517 VK |
1313 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1314 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
1315 | return hv_vcpu->cpuid_cache.features_edx & | |
1316 | HV_FEATURE_DEBUG_MSRS_AVAILABLE; | |
1561c2cb VK |
1317 | default: |
1318 | break; | |
1319 | } | |
1320 | ||
d66bfa36 | 1321 | return false; |
b4128000 VK |
1322 | } |
1323 | ||
e7d9513b AS |
1324 | static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, |
1325 | bool host) | |
e83d5887 AS |
1326 | { |
1327 | struct kvm *kvm = vcpu->kvm; | |
05f04ae4 | 1328 | struct kvm_hv *hv = to_kvm_hv(kvm); |
e83d5887 | 1329 | |
b4128000 VK |
1330 | if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr))) |
1331 | return 1; | |
1332 | ||
e83d5887 AS |
1333 | switch (msr) { |
1334 | case HV_X64_MSR_GUEST_OS_ID: | |
1335 | hv->hv_guest_os_id = data; | |
1336 | /* setting guest os id to zero disables hypercall page */ | |
1337 | if (!hv->hv_guest_os_id) | |
1338 | hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; | |
1339 | break; | |
1340 | case HV_X64_MSR_HYPERCALL: { | |
79033beb JM |
1341 | u8 instructions[9]; |
1342 | int i = 0; | |
1343 | u64 addr; | |
e83d5887 AS |
1344 | |
1345 | /* if guest os id is not set hypercall should remain disabled */ | |
1346 | if (!hv->hv_guest_os_id) | |
1347 | break; | |
1348 | if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) { | |
1349 | hv->hv_hypercall = data; | |
1350 | break; | |
1351 | } | |
79033beb JM |
1352 | |
1353 | /* | |
1354 | * If Xen and Hyper-V hypercalls are both enabled, disambiguate | |
1355 | * the same way Xen itself does, by setting the bit 31 of EAX | |
1356 | * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just | |
1357 | * going to be clobbered on 64-bit. | |
1358 | */ | |
1359 | if (kvm_xen_hypercall_enabled(kvm)) { | |
1360 | /* orl $0x80000000, %eax */ | |
1361 | instructions[i++] = 0x0d; | |
1362 | instructions[i++] = 0x00; | |
1363 | instructions[i++] = 0x00; | |
1364 | instructions[i++] = 0x00; | |
1365 | instructions[i++] = 0x80; | |
1366 | } | |
1367 | ||
1368 | /* vmcall/vmmcall */ | |
1369 | static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i); | |
1370 | i += 3; | |
1371 | ||
1372 | /* ret */ | |
1373 | ((unsigned char *)instructions)[i++] = 0xc3; | |
1374 | ||
1375 | addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK; | |
1376 | if (kvm_vcpu_write_guest(vcpu, addr, instructions, i)) | |
e83d5887 AS |
1377 | return 1; |
1378 | hv->hv_hypercall = data; | |
e83d5887 AS |
1379 | break; |
1380 | } | |
095cf55d | 1381 | case HV_X64_MSR_REFERENCE_TSC: |
e83d5887 | 1382 | hv->hv_tsc_page = data; |
cc9cfddb VK |
1383 | if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) { |
1384 | if (!host) | |
1385 | hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED; | |
1386 | else | |
1387 | hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; | |
095cf55d | 1388 | kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); |
cc9cfddb VK |
1389 | } else { |
1390 | hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET; | |
1391 | } | |
e83d5887 | 1392 | break; |
e7d9513b | 1393 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
05f04ae4 | 1394 | return kvm_hv_msr_set_crash_data(kvm, |
e7d9513b AS |
1395 | msr - HV_X64_MSR_CRASH_P0, |
1396 | data); | |
1397 | case HV_X64_MSR_CRASH_CTL: | |
05f04ae4 VK |
1398 | if (host) |
1399 | return kvm_hv_msr_set_crash_ctl(kvm, data); | |
1400 | ||
1401 | if (data & HV_CRASH_CTL_CRASH_NOTIFY) { | |
1402 | vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", | |
1403 | hv->hv_crash_param[0], | |
1404 | hv->hv_crash_param[1], | |
1405 | hv->hv_crash_param[2], | |
1406 | hv->hv_crash_param[3], | |
1407 | hv->hv_crash_param[4]); | |
1408 | ||
1409 | /* Send notification about crash to user space */ | |
1410 | kvm_make_request(KVM_REQ_HV_CRASH, vcpu); | |
1411 | } | |
1412 | break; | |
e516cebb AS |
1413 | case HV_X64_MSR_RESET: |
1414 | if (data == 1) { | |
1415 | vcpu_debug(vcpu, "hyper-v reset requested\n"); | |
1416 | kvm_make_request(KVM_REQ_HV_RESET, vcpu); | |
1417 | } | |
1418 | break; | |
a2e164e7 VK |
1419 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1420 | hv->hv_reenlightenment_control = data; | |
1421 | break; | |
1422 | case HV_X64_MSR_TSC_EMULATION_CONTROL: | |
1423 | hv->hv_tsc_emulation_control = data; | |
1424 | break; | |
1425 | case HV_X64_MSR_TSC_EMULATION_STATUS: | |
d2547cf5 VK |
1426 | if (data && !host) |
1427 | return 1; | |
1428 | ||
a2e164e7 VK |
1429 | hv->hv_tsc_emulation_status = data; |
1430 | break; | |
44883f01 PB |
1431 | case HV_X64_MSR_TIME_REF_COUNT: |
1432 | /* read-only, but still ignore it if host-initiated */ | |
1433 | if (!host) | |
1434 | return 1; | |
1435 | break; | |
2be1bd3a VK |
1436 | case HV_X64_MSR_TSC_INVARIANT_CONTROL: |
1437 | /* Only bit 0 is supported */ | |
1438 | if (data & ~HV_EXPOSE_INVARIANT_TSC) | |
1439 | return 1; | |
1440 | ||
1441 | /* The feature can't be disabled from the guest */ | |
1442 | if (!host && hv->hv_invtsc_control && !data) | |
1443 | return 1; | |
1444 | ||
1445 | hv->hv_invtsc_control = data; | |
1446 | break; | |
f97f5a56 JD |
1447 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1448 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
1449 | return syndbg_set_msr(vcpu, msr, data, host); | |
e83d5887 | 1450 | default: |
e76ae527 | 1451 | kvm_pr_unimpl_wrmsr(vcpu, msr, data); |
e83d5887 AS |
1452 | return 1; |
1453 | } | |
1454 | return 0; | |
1455 | } | |
1456 | ||
9eec50b8 AS |
1457 | /* Calculate cpu time spent by current task in 100ns units */ |
1458 | static u64 current_task_runtime_100ns(void) | |
1459 | { | |
5613fda9 | 1460 | u64 utime, stime; |
9eec50b8 AS |
1461 | |
1462 | task_cputime_adjusted(current, &utime, &stime); | |
5613fda9 FW |
1463 | |
1464 | return div_u64(utime + stime, 100); | |
9eec50b8 AS |
1465 | } |
1466 | ||
1467 | static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) | |
e83d5887 | 1468 | { |
9ff5e030 | 1469 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
e83d5887 | 1470 | |
b4128000 VK |
1471 | if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) |
1472 | return 1; | |
1473 | ||
e83d5887 | 1474 | switch (msr) { |
87ee613d | 1475 | case HV_X64_MSR_VP_INDEX: { |
05f04ae4 | 1476 | struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); |
87ee613d VK |
1477 | u32 new_vp_index = (u32)data; |
1478 | ||
1479 | if (!host || new_vp_index >= KVM_MAX_VCPUS) | |
d3457c87 | 1480 | return 1; |
87ee613d VK |
1481 | |
1482 | if (new_vp_index == hv_vcpu->vp_index) | |
1483 | return 0; | |
1484 | ||
1485 | /* | |
1486 | * The VP index is initialized to vcpu_index by | |
1487 | * kvm_hv_vcpu_postcreate so they initially match. Now the | |
1488 | * VP index is changing, adjust num_mismatched_vp_indexes if | |
1489 | * it now matches or no longer matches vcpu_idx. | |
1490 | */ | |
4eeef242 | 1491 | if (hv_vcpu->vp_index == vcpu->vcpu_idx) |
87ee613d | 1492 | atomic_inc(&hv->num_mismatched_vp_indexes); |
4eeef242 | 1493 | else if (new_vp_index == vcpu->vcpu_idx) |
87ee613d VK |
1494 | atomic_dec(&hv->num_mismatched_vp_indexes); |
1495 | ||
1496 | hv_vcpu->vp_index = new_vp_index; | |
d3457c87 | 1497 | break; |
87ee613d | 1498 | } |
d4abc577 | 1499 | case HV_X64_MSR_VP_ASSIST_PAGE: { |
e83d5887 AS |
1500 | u64 gfn; |
1501 | unsigned long addr; | |
1502 | ||
d4abc577 | 1503 | if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) { |
1779a39f | 1504 | hv_vcpu->hv_vapic = data; |
77c3323f | 1505 | if (kvm_lapic_set_pv_eoi(vcpu, 0, 0)) |
e83d5887 AS |
1506 | return 1; |
1507 | break; | |
1508 | } | |
d4abc577 | 1509 | gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT; |
e83d5887 AS |
1510 | addr = kvm_vcpu_gfn_to_hva(vcpu, gfn); |
1511 | if (kvm_is_error_hva(addr)) | |
1512 | return 1; | |
12e0c618 VK |
1513 | |
1514 | /* | |
67b0ae43 | 1515 | * Clear apic_assist portion of struct hv_vp_assist_page |
12e0c618 VK |
1516 | * only, there can be valuable data in the rest which needs |
1517 | * to be preserved e.g. on migration. | |
1518 | */ | |
9eb41c52 | 1519 | if (__put_user(0, (u32 __user *)addr)) |
e83d5887 | 1520 | return 1; |
1779a39f | 1521 | hv_vcpu->hv_vapic = data; |
e83d5887 | 1522 | kvm_vcpu_mark_page_dirty(vcpu, gfn); |
77c3323f | 1523 | if (kvm_lapic_set_pv_eoi(vcpu, |
72bbf935 LP |
1524 | gfn_to_gpa(gfn) | KVM_MSR_ENABLED, |
1525 | sizeof(struct hv_vp_assist_page))) | |
e83d5887 AS |
1526 | return 1; |
1527 | break; | |
1528 | } | |
1529 | case HV_X64_MSR_EOI: | |
1530 | return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); | |
1531 | case HV_X64_MSR_ICR: | |
1532 | return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); | |
1533 | case HV_X64_MSR_TPR: | |
1534 | return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); | |
9eec50b8 AS |
1535 | case HV_X64_MSR_VP_RUNTIME: |
1536 | if (!host) | |
1537 | return 1; | |
1779a39f | 1538 | hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); |
9eec50b8 | 1539 | break; |
5c919412 AS |
1540 | case HV_X64_MSR_SCONTROL: |
1541 | case HV_X64_MSR_SVERSION: | |
1542 | case HV_X64_MSR_SIEFP: | |
1543 | case HV_X64_MSR_SIMP: | |
1544 | case HV_X64_MSR_EOM: | |
1545 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
e0121fa2 | 1546 | return synic_set_msr(to_hv_synic(vcpu), msr, data, host); |
1f4b34f8 AS |
1547 | case HV_X64_MSR_STIMER0_CONFIG: |
1548 | case HV_X64_MSR_STIMER1_CONFIG: | |
1549 | case HV_X64_MSR_STIMER2_CONFIG: | |
1550 | case HV_X64_MSR_STIMER3_CONFIG: { | |
1551 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
1552 | ||
aafa97fd | 1553 | return stimer_set_config(to_hv_stimer(vcpu, timer_index), |
1f4b34f8 AS |
1554 | data, host); |
1555 | } | |
1556 | case HV_X64_MSR_STIMER0_COUNT: | |
1557 | case HV_X64_MSR_STIMER1_COUNT: | |
1558 | case HV_X64_MSR_STIMER2_COUNT: | |
1559 | case HV_X64_MSR_STIMER3_COUNT: { | |
1560 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
1561 | ||
aafa97fd | 1562 | return stimer_set_count(to_hv_stimer(vcpu, timer_index), |
1f4b34f8 AS |
1563 | data, host); |
1564 | } | |
44883f01 PB |
1565 | case HV_X64_MSR_TSC_FREQUENCY: |
1566 | case HV_X64_MSR_APIC_FREQUENCY: | |
1567 | /* read-only, but still ignore it if host-initiated */ | |
1568 | if (!host) | |
1569 | return 1; | |
1570 | break; | |
e83d5887 | 1571 | default: |
e76ae527 | 1572 | kvm_pr_unimpl_wrmsr(vcpu, msr, data); |
e83d5887 AS |
1573 | return 1; |
1574 | } | |
1575 | ||
1576 | return 0; | |
1577 | } | |
1578 | ||
f97f5a56 JD |
1579 | static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, |
1580 | bool host) | |
e83d5887 AS |
1581 | { |
1582 | u64 data = 0; | |
1583 | struct kvm *kvm = vcpu->kvm; | |
05f04ae4 | 1584 | struct kvm_hv *hv = to_kvm_hv(kvm); |
e83d5887 | 1585 | |
b4128000 VK |
1586 | if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr))) |
1587 | return 1; | |
1588 | ||
e83d5887 AS |
1589 | switch (msr) { |
1590 | case HV_X64_MSR_GUEST_OS_ID: | |
1591 | data = hv->hv_guest_os_id; | |
1592 | break; | |
1593 | case HV_X64_MSR_HYPERCALL: | |
1594 | data = hv->hv_hypercall; | |
1595 | break; | |
93bf4172 AS |
1596 | case HV_X64_MSR_TIME_REF_COUNT: |
1597 | data = get_time_ref_counter(kvm); | |
e83d5887 | 1598 | break; |
e83d5887 AS |
1599 | case HV_X64_MSR_REFERENCE_TSC: |
1600 | data = hv->hv_tsc_page; | |
1601 | break; | |
e7d9513b | 1602 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
05f04ae4 | 1603 | return kvm_hv_msr_get_crash_data(kvm, |
e7d9513b AS |
1604 | msr - HV_X64_MSR_CRASH_P0, |
1605 | pdata); | |
1606 | case HV_X64_MSR_CRASH_CTL: | |
05f04ae4 | 1607 | return kvm_hv_msr_get_crash_ctl(kvm, pdata); |
e516cebb AS |
1608 | case HV_X64_MSR_RESET: |
1609 | data = 0; | |
1610 | break; | |
a2e164e7 VK |
1611 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1612 | data = hv->hv_reenlightenment_control; | |
1613 | break; | |
1614 | case HV_X64_MSR_TSC_EMULATION_CONTROL: | |
1615 | data = hv->hv_tsc_emulation_control; | |
1616 | break; | |
1617 | case HV_X64_MSR_TSC_EMULATION_STATUS: | |
1618 | data = hv->hv_tsc_emulation_status; | |
1619 | break; | |
2be1bd3a VK |
1620 | case HV_X64_MSR_TSC_INVARIANT_CONTROL: |
1621 | data = hv->hv_invtsc_control; | |
1622 | break; | |
f97f5a56 JD |
1623 | case HV_X64_MSR_SYNDBG_OPTIONS: |
1624 | case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER: | |
1625 | return syndbg_get_msr(vcpu, msr, pdata, host); | |
e83d5887 | 1626 | default: |
e76ae527 | 1627 | kvm_pr_unimpl_rdmsr(vcpu, msr); |
e83d5887 AS |
1628 | return 1; |
1629 | } | |
1630 | ||
1631 | *pdata = data; | |
1632 | return 0; | |
1633 | } | |
1634 | ||
44883f01 PB |
1635 | static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, |
1636 | bool host) | |
e83d5887 AS |
1637 | { |
1638 | u64 data = 0; | |
9ff5e030 | 1639 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
e83d5887 | 1640 | |
b4128000 VK |
1641 | if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) |
1642 | return 1; | |
1643 | ||
e83d5887 | 1644 | switch (msr) { |
d3457c87 | 1645 | case HV_X64_MSR_VP_INDEX: |
1779a39f | 1646 | data = hv_vcpu->vp_index; |
e83d5887 | 1647 | break; |
e83d5887 AS |
1648 | case HV_X64_MSR_EOI: |
1649 | return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata); | |
1650 | case HV_X64_MSR_ICR: | |
1651 | return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata); | |
1652 | case HV_X64_MSR_TPR: | |
1653 | return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata); | |
d4abc577 | 1654 | case HV_X64_MSR_VP_ASSIST_PAGE: |
1779a39f | 1655 | data = hv_vcpu->hv_vapic; |
e83d5887 | 1656 | break; |
9eec50b8 | 1657 | case HV_X64_MSR_VP_RUNTIME: |
1779a39f | 1658 | data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; |
9eec50b8 | 1659 | break; |
5c919412 AS |
1660 | case HV_X64_MSR_SCONTROL: |
1661 | case HV_X64_MSR_SVERSION: | |
1662 | case HV_X64_MSR_SIEFP: | |
1663 | case HV_X64_MSR_SIMP: | |
1664 | case HV_X64_MSR_EOM: | |
1665 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: | |
e0121fa2 | 1666 | return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host); |
1f4b34f8 AS |
1667 | case HV_X64_MSR_STIMER0_CONFIG: |
1668 | case HV_X64_MSR_STIMER1_CONFIG: | |
1669 | case HV_X64_MSR_STIMER2_CONFIG: | |
1670 | case HV_X64_MSR_STIMER3_CONFIG: { | |
1671 | int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; | |
1672 | ||
aafa97fd | 1673 | return stimer_get_config(to_hv_stimer(vcpu, timer_index), |
1f4b34f8 AS |
1674 | pdata); |
1675 | } | |
1676 | case HV_X64_MSR_STIMER0_COUNT: | |
1677 | case HV_X64_MSR_STIMER1_COUNT: | |
1678 | case HV_X64_MSR_STIMER2_COUNT: | |
1679 | case HV_X64_MSR_STIMER3_COUNT: { | |
1680 | int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; | |
1681 | ||
aafa97fd | 1682 | return stimer_get_count(to_hv_stimer(vcpu, timer_index), |
1f4b34f8 AS |
1683 | pdata); |
1684 | } | |
72c139ba LP |
1685 | case HV_X64_MSR_TSC_FREQUENCY: |
1686 | data = (u64)vcpu->arch.virtual_tsc_khz * 1000; | |
1687 | break; | |
1688 | case HV_X64_MSR_APIC_FREQUENCY: | |
1689 | data = APIC_BUS_FREQUENCY; | |
1690 | break; | |
e83d5887 | 1691 | default: |
e76ae527 | 1692 | kvm_pr_unimpl_rdmsr(vcpu, msr); |
e83d5887 AS |
1693 | return 1; |
1694 | } | |
1695 | *pdata = data; | |
1696 | return 0; | |
1697 | } | |
1698 | ||
e7d9513b | 1699 | int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) |
e83d5887 | 1700 | { |
05f04ae4 VK |
1701 | struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); |
1702 | ||
8f014550 VK |
1703 | if (!host && !vcpu->arch.hyperv_enabled) |
1704 | return 1; | |
1705 | ||
1cac8d9f SC |
1706 | if (kvm_hv_vcpu_init(vcpu)) |
1707 | return 1; | |
fc08b628 | 1708 | |
e83d5887 AS |
1709 | if (kvm_hv_msr_partition_wide(msr)) { |
1710 | int r; | |
1711 | ||
05f04ae4 | 1712 | mutex_lock(&hv->hv_lock); |
e7d9513b | 1713 | r = kvm_hv_set_msr_pw(vcpu, msr, data, host); |
05f04ae4 | 1714 | mutex_unlock(&hv->hv_lock); |
e83d5887 AS |
1715 | return r; |
1716 | } else | |
9eec50b8 | 1717 | return kvm_hv_set_msr(vcpu, msr, data, host); |
e83d5887 AS |
1718 | } |
1719 | ||
44883f01 | 1720 | int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) |
e83d5887 | 1721 | { |
05f04ae4 VK |
1722 | struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); |
1723 | ||
8f014550 VK |
1724 | if (!host && !vcpu->arch.hyperv_enabled) |
1725 | return 1; | |
1726 | ||
1cac8d9f SC |
1727 | if (kvm_hv_vcpu_init(vcpu)) |
1728 | return 1; | |
fc08b628 | 1729 | |
e83d5887 AS |
1730 | if (kvm_hv_msr_partition_wide(msr)) { |
1731 | int r; | |
1732 | ||
05f04ae4 | 1733 | mutex_lock(&hv->hv_lock); |
f97f5a56 | 1734 | r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host); |
05f04ae4 | 1735 | mutex_unlock(&hv->hv_lock); |
e83d5887 AS |
1736 | return r; |
1737 | } else | |
44883f01 | 1738 | return kvm_hv_get_msr(vcpu, msr, pdata, host); |
e83d5887 AS |
1739 | } |
1740 | ||
9c52f6b3 SC |
1741 | static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks, |
1742 | u64 valid_bank_mask, unsigned long *vcpu_mask) | |
c7012676 | 1743 | { |
05f04ae4 | 1744 | struct kvm_hv *hv = to_kvm_hv(kvm); |
9c52f6b3 SC |
1745 | bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes); |
1746 | u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; | |
f21dd494 | 1747 | struct kvm_vcpu *vcpu; |
46808a4c MZ |
1748 | int bank, sbank = 0; |
1749 | unsigned long i; | |
9c52f6b3 SC |
1750 | u64 *bitmap; |
1751 | ||
1752 | BUILD_BUG_ON(sizeof(vp_bitmap) > | |
1753 | sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS)); | |
1754 | ||
1755 | /* | |
1756 | * If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else | |
1757 | * fill a temporary buffer and manually test each vCPU's VP index. | |
1758 | */ | |
1759 | if (likely(!has_mismatch)) | |
1760 | bitmap = (u64 *)vcpu_mask; | |
1761 | else | |
1762 | bitmap = vp_bitmap; | |
c7012676 | 1763 | |
9c52f6b3 SC |
1764 | /* |
1765 | * Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask | |
1766 | * having a '1' for each bank that exists in sparse_banks. Sets must | |
1767 | * be in ascending order, i.e. bank0..bankN. | |
1768 | */ | |
1769 | memset(bitmap, 0, sizeof(vp_bitmap)); | |
f21dd494 VK |
1770 | for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, |
1771 | KVM_HV_MAX_SPARSE_VCPU_SET_BITS) | |
9c52f6b3 | 1772 | bitmap[bank] = sparse_banks[sbank++]; |
c7012676 | 1773 | |
9c52f6b3 SC |
1774 | if (likely(!has_mismatch)) |
1775 | return; | |
2cefc5fe | 1776 | |
9c52f6b3 | 1777 | bitmap_zero(vcpu_mask, KVM_MAX_VCPUS); |
f21dd494 | 1778 | kvm_for_each_vcpu(i, vcpu, kvm) { |
f2bc14b6 | 1779 | if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap)) |
9c52f6b3 | 1780 | __set_bit(i, vcpu_mask); |
f21dd494 | 1781 | } |
c7012676 VK |
1782 | } |
1783 | ||
b6c2c22f VK |
1784 | static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[]) |
1785 | { | |
1786 | int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK; | |
1787 | unsigned long sbank; | |
1788 | ||
1789 | if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask)) | |
1790 | return false; | |
1791 | ||
1792 | /* | |
1793 | * The index into the sparse bank is the number of preceding bits in | |
1794 | * the valid mask. Optimize for VMs with <64 vCPUs by skipping the | |
1795 | * fancy math if there can't possibly be preceding bits. | |
1796 | */ | |
1797 | if (valid_bit_nr) | |
1798 | sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0)); | |
1799 | else | |
1800 | sbank = 0; | |
1801 | ||
1802 | return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK, | |
1803 | (unsigned long *)&sparse_banks[sbank]); | |
1804 | } | |
1805 | ||
bd38b320 | 1806 | struct kvm_hv_hcall { |
8b9e13d2 | 1807 | /* Hypercall input data */ |
bd38b320 SC |
1808 | u64 param; |
1809 | u64 ingpa; | |
1810 | u64 outgpa; | |
1811 | u16 code; | |
bd1ba573 | 1812 | u16 var_cnt; |
bd38b320 SC |
1813 | u16 rep_cnt; |
1814 | u16 rep_idx; | |
1815 | bool fast; | |
1816 | bool rep; | |
5974565b | 1817 | sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS]; |
8b9e13d2 VK |
1818 | |
1819 | /* | |
1820 | * Current read offset when KVM reads hypercall input data gradually, | |
1821 | * either offset in bytes from 'ingpa' for regular hypercalls or the | |
1822 | * number of already consumed 'XMM halves' for 'fast' hypercalls. | |
1823 | */ | |
1824 | union { | |
1825 | gpa_t data_offset; | |
1826 | int consumed_xmm_halves; | |
1827 | }; | |
bd38b320 SC |
1828 | }; |
1829 | ||
79661c37 | 1830 | |
56b5354f | 1831 | static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc, |
8b9e13d2 | 1832 | u16 orig_cnt, u16 cnt_cap, u64 *data) |
56b5354f SC |
1833 | { |
1834 | /* | |
1835 | * Preserve the original count when ignoring entries via a "cap", KVM | |
1836 | * still needs to validate the guest input (though the non-XMM path | |
1837 | * punts on the checks). | |
1838 | */ | |
1839 | u16 cnt = min(orig_cnt, cnt_cap); | |
1840 | int i, j; | |
79661c37 | 1841 | |
c0f1eaeb PB |
1842 | if (hc->fast) { |
1843 | /* | |
1844 | * Each XMM holds two sparse banks, but do not count halves that | |
1845 | * have already been consumed for hypercall parameters. | |
1846 | */ | |
8b9e13d2 | 1847 | if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves) |
c0f1eaeb | 1848 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
56b5354f SC |
1849 | |
1850 | for (i = 0; i < cnt; i++) { | |
8b9e13d2 | 1851 | j = i + hc->consumed_xmm_halves; |
c0f1eaeb | 1852 | if (j % 2) |
56b5354f | 1853 | data[i] = sse128_hi(hc->xmm[j / 2]); |
c0f1eaeb | 1854 | else |
56b5354f | 1855 | data[i] = sse128_lo(hc->xmm[j / 2]); |
c0f1eaeb PB |
1856 | } |
1857 | return 0; | |
1858 | } | |
1859 | ||
8b9e13d2 | 1860 | return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data, |
56b5354f SC |
1861 | cnt * sizeof(*data)); |
1862 | } | |
1863 | ||
1864 | static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc, | |
8b9e13d2 | 1865 | u64 *sparse_banks) |
56b5354f | 1866 | { |
ca7372ac | 1867 | if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS) |
56b5354f SC |
1868 | return -EINVAL; |
1869 | ||
1870 | /* Cap var_cnt to ignore banks that cannot contain a legal VP index. */ | |
1871 | return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS, | |
8b9e13d2 | 1872 | sparse_banks); |
a0dd008f SC |
1873 | } |
1874 | ||
8b9e13d2 | 1875 | static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[]) |
26097086 | 1876 | { |
8b9e13d2 | 1877 | return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries); |
26097086 VK |
1878 | } |
1879 | ||
c58a318f VK |
1880 | static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu, |
1881 | struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo, | |
1882 | u64 *entries, int count) | |
0823570f | 1883 | { |
0823570f VK |
1884 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1885 | u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY; | |
1886 | ||
1887 | if (!hv_vcpu) | |
1888 | return; | |
1889 | ||
26097086 VK |
1890 | spin_lock(&tlb_flush_fifo->write_lock); |
1891 | ||
1892 | /* | |
1893 | * All entries should fit on the fifo leaving one free for 'flush all' | |
1894 | * entry in case another request comes in. In case there's not enough | |
1895 | * space, just put 'flush all' entry there. | |
1896 | */ | |
1897 | if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) { | |
1898 | WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count); | |
1899 | goto out_unlock; | |
1900 | } | |
1901 | ||
1902 | /* | |
1903 | * Note: full fifo always contains 'flush all' entry, no need to check the | |
1904 | * return value. | |
1905 | */ | |
1906 | kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1); | |
1907 | ||
1908 | out_unlock: | |
1909 | spin_unlock(&tlb_flush_fifo->write_lock); | |
0823570f VK |
1910 | } |
1911 | ||
1912 | int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu) | |
1913 | { | |
1914 | struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; | |
1915 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); | |
26097086 VK |
1916 | u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE]; |
1917 | int i, j, count; | |
1918 | gva_t gva; | |
0823570f | 1919 | |
26097086 | 1920 | if (!tdp_enabled || !hv_vcpu) |
0823570f VK |
1921 | return -EINVAL; |
1922 | ||
53ca765a | 1923 | tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu)); |
0823570f | 1924 | |
26097086 VK |
1925 | count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE); |
1926 | ||
1927 | for (i = 0; i < count; i++) { | |
1928 | if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY) | |
1929 | goto out_flush_all; | |
1930 | ||
1931 | /* | |
1932 | * Lower 12 bits of 'address' encode the number of additional | |
1933 | * pages to flush. | |
1934 | */ | |
1935 | gva = entries[i] & PAGE_MASK; | |
1936 | for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++) | |
1937 | static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE); | |
1938 | ||
1939 | ++vcpu->stat.tlb_flush; | |
1940 | } | |
1941 | return 0; | |
1942 | ||
1943 | out_flush_all: | |
0823570f VK |
1944 | kfifo_reset_out(&tlb_flush_fifo->entries); |
1945 | ||
26097086 VK |
1946 | /* Fall back to full flush. */ |
1947 | return -ENOSPC; | |
0823570f VK |
1948 | } |
1949 | ||
c0f1eaeb | 1950 | static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) |
e2f11f42 | 1951 | { |
7d5e88d3 VK |
1952 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
1953 | u64 *sparse_banks = hv_vcpu->sparse_banks; | |
72167a9d | 1954 | struct kvm *kvm = vcpu->kvm; |
c7012676 | 1955 | struct hv_tlb_flush_ex flush_ex; |
e2f11f42 | 1956 | struct hv_tlb_flush flush; |
9c52f6b3 | 1957 | DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); |
c58a318f | 1958 | struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo; |
26097086 VK |
1959 | /* |
1960 | * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE' | |
1961 | * entries on the TLB flush fifo. The last entry, however, needs to be | |
1962 | * always left free for 'flush all' entry which gets placed when | |
1963 | * there is not enough space to put all the requested entries. | |
1964 | */ | |
1965 | u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1]; | |
1966 | u64 *tlb_flush_entries; | |
2cefc5fe | 1967 | u64 valid_bank_mask; |
0823570f VK |
1968 | struct kvm_vcpu *v; |
1969 | unsigned long i; | |
c7012676 | 1970 | bool all_cpus; |
e2f11f42 | 1971 | |
79661c37 | 1972 | /* |
ca7372ac VK |
1973 | * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS |
1974 | * sparse banks. Fail the build if KVM's max allowed number of | |
1975 | * vCPUs (>4096) exceeds this limit. | |
79661c37 | 1976 | */ |
ca7372ac | 1977 | BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > HV_MAX_SPARSE_VCPU_BANKS); |
79661c37 | 1978 | |
aee73823 VK |
1979 | /* |
1980 | * 'Slow' hypercall's first parameter is the address in guest's memory | |
1981 | * where hypercall parameters are placed. This is either a GPA or a | |
1982 | * nested GPA when KVM is handling the call from L2 ('direct' TLB | |
1983 | * flush). Translate the address here so the memory can be uniformly | |
1984 | * read with kvm_read_guest(). | |
1985 | */ | |
1986 | if (!hc->fast && is_guest_mode(vcpu)) { | |
1987 | hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL); | |
1988 | if (unlikely(hc->ingpa == INVALID_GPA)) | |
1989 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
1990 | } | |
1991 | ||
82c1ead0 VK |
1992 | if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST || |
1993 | hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) { | |
5974565b SC |
1994 | if (hc->fast) { |
1995 | flush.address_space = hc->ingpa; | |
1996 | flush.flags = hc->outgpa; | |
1997 | flush.processor_mask = sse128_lo(hc->xmm[0]); | |
8b9e13d2 | 1998 | hc->consumed_xmm_halves = 1; |
5974565b SC |
1999 | } else { |
2000 | if (unlikely(kvm_read_guest(kvm, hc->ingpa, | |
2001 | &flush, sizeof(flush)))) | |
2002 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
8b9e13d2 | 2003 | hc->data_offset = sizeof(flush); |
5974565b | 2004 | } |
e2f11f42 | 2005 | |
c7012676 | 2006 | trace_kvm_hv_flush_tlb(flush.processor_mask, |
c58a318f VK |
2007 | flush.address_space, flush.flags, |
2008 | is_guest_mode(vcpu)); | |
c7012676 | 2009 | |
2cefc5fe | 2010 | valid_bank_mask = BIT_ULL(0); |
c7012676 | 2011 | sparse_banks[0] = flush.processor_mask; |
da66761c VK |
2012 | |
2013 | /* | |
2014 | * Work around possible WS2012 bug: it sends hypercalls | |
2015 | * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear, | |
2016 | * while also expecting us to flush something and crashing if | |
2017 | * we don't. Let's treat processor_mask == 0 same as | |
2018 | * HV_FLUSH_ALL_PROCESSORS. | |
2019 | */ | |
2020 | all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) || | |
2021 | flush.processor_mask == 0; | |
c7012676 | 2022 | } else { |
5974565b SC |
2023 | if (hc->fast) { |
2024 | flush_ex.address_space = hc->ingpa; | |
2025 | flush_ex.flags = hc->outgpa; | |
2026 | memcpy(&flush_ex.hv_vp_set, | |
2027 | &hc->xmm[0], sizeof(hc->xmm[0])); | |
8b9e13d2 | 2028 | hc->consumed_xmm_halves = 2; |
5974565b SC |
2029 | } else { |
2030 | if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex, | |
2031 | sizeof(flush_ex)))) | |
2032 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
8b9e13d2 | 2033 | hc->data_offset = sizeof(flush_ex); |
5974565b | 2034 | } |
c7012676 VK |
2035 | |
2036 | trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask, | |
2037 | flush_ex.hv_vp_set.format, | |
2038 | flush_ex.address_space, | |
c58a318f | 2039 | flush_ex.flags, is_guest_mode(vcpu)); |
c7012676 VK |
2040 | |
2041 | valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask; | |
2042 | all_cpus = flush_ex.hv_vp_set.format != | |
2043 | HV_GENERIC_SET_SPARSE_4K; | |
2044 | ||
d603fd8d | 2045 | if (hc->var_cnt != hweight64(valid_bank_mask)) |
bd1ba573 | 2046 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
c7012676 | 2047 | |
26097086 VK |
2048 | if (!all_cpus) { |
2049 | if (!hc->var_cnt) | |
2050 | goto ret_success; | |
25af9081 | 2051 | |
8b9e13d2 | 2052 | if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks)) |
26097086 VK |
2053 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2054 | } | |
c7012676 | 2055 | |
26097086 VK |
2056 | /* |
2057 | * Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU | |
2058 | * banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs' | |
2059 | * case (HV_GENERIC_SET_ALL). Always adjust data_offset and | |
2060 | * consumed_xmm_halves to make sure TLB flush entries are read | |
2061 | * from the correct offset. | |
2062 | */ | |
8b9e13d2 VK |
2063 | if (hc->fast) |
2064 | hc->consumed_xmm_halves += hc->var_cnt; | |
2065 | else | |
2066 | hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]); | |
26097086 VK |
2067 | } |
2068 | ||
2069 | if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE || | |
2070 | hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX || | |
2071 | hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) { | |
2072 | tlb_flush_entries = NULL; | |
2073 | } else { | |
8b9e13d2 | 2074 | if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries)) |
25af9081 | 2075 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
26097086 | 2076 | tlb_flush_entries = __tlb_flush_entries; |
c7012676 | 2077 | } |
e2f11f42 | 2078 | |
2cefc5fe | 2079 | /* |
f21dd494 VK |
2080 | * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't |
2081 | * analyze it here, flush TLB regardless of the specified address space. | |
2cefc5fe | 2082 | */ |
c58a318f VK |
2083 | if (all_cpus && !is_guest_mode(vcpu)) { |
2084 | kvm_for_each_vcpu(i, v, kvm) { | |
2085 | tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false); | |
2086 | hv_tlb_flush_enqueue(v, tlb_flush_fifo, | |
2087 | tlb_flush_entries, hc->rep_cnt); | |
2088 | } | |
0823570f | 2089 | |
adc43caa | 2090 | kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH); |
c58a318f | 2091 | } else if (!is_guest_mode(vcpu)) { |
9c52f6b3 | 2092 | sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask); |
6470accc | 2093 | |
0823570f VK |
2094 | for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) { |
2095 | v = kvm_get_vcpu(kvm, i); | |
2096 | if (!v) | |
2097 | continue; | |
c58a318f VK |
2098 | tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false); |
2099 | hv_tlb_flush_enqueue(v, tlb_flush_fifo, | |
2100 | tlb_flush_entries, hc->rep_cnt); | |
2101 | } | |
2102 | ||
2103 | kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask); | |
2104 | } else { | |
2105 | struct kvm_vcpu_hv *hv_v; | |
2106 | ||
2107 | bitmap_zero(vcpu_mask, KVM_MAX_VCPUS); | |
2108 | ||
2109 | kvm_for_each_vcpu(i, v, kvm) { | |
2110 | hv_v = to_hv_vcpu(v); | |
2111 | ||
2112 | /* | |
2113 | * The following check races with nested vCPUs entering/exiting | |
2114 | * and/or migrating between L1's vCPUs, however the only case when | |
2115 | * KVM *must* flush the TLB is when the target L2 vCPU keeps | |
2116 | * running on the same L1 vCPU from the moment of the request until | |
2117 | * kvm_hv_flush_tlb() returns. TLB is fully flushed in all other | |
2118 | * cases, e.g. when the target L2 vCPU migrates to a different L1 | |
2119 | * vCPU or when the corresponding L1 vCPU temporary switches to a | |
2120 | * different L2 vCPU while the request is being processed. | |
2121 | */ | |
2122 | if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id) | |
2123 | continue; | |
2124 | ||
2125 | if (!all_cpus && | |
2126 | !hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask, | |
2127 | sparse_banks)) | |
2128 | continue; | |
2129 | ||
2130 | __set_bit(i, vcpu_mask); | |
2131 | tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true); | |
2132 | hv_tlb_flush_enqueue(v, tlb_flush_fifo, | |
2133 | tlb_flush_entries, hc->rep_cnt); | |
0823570f VK |
2134 | } |
2135 | ||
adc43caa | 2136 | kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask); |
6470accc | 2137 | } |
e2f11f42 | 2138 | |
c7012676 | 2139 | ret_success: |
bd38b320 | 2140 | /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */ |
e2f11f42 | 2141 | return (u64)HV_STATUS_SUCCESS | |
bd38b320 | 2142 | ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); |
e2f11f42 VK |
2143 | } |
2144 | ||
b6c2c22f VK |
2145 | static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector, |
2146 | u64 *sparse_banks, u64 valid_bank_mask) | |
f21dd494 VK |
2147 | { |
2148 | struct kvm_lapic_irq irq = { | |
2149 | .delivery_mode = APIC_DM_FIXED, | |
2150 | .vector = vector | |
2151 | }; | |
2152 | struct kvm_vcpu *vcpu; | |
46808a4c | 2153 | unsigned long i; |
f21dd494 VK |
2154 | |
2155 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
b6c2c22f VK |
2156 | if (sparse_banks && |
2157 | !hv_is_vp_in_sparse_set(kvm_hv_get_vpindex(vcpu), | |
2158 | valid_bank_mask, sparse_banks)) | |
f21dd494 VK |
2159 | continue; |
2160 | ||
2161 | /* We fail only when APIC is disabled */ | |
2162 | kvm_apic_set_irq(vcpu, &irq, NULL); | |
2163 | } | |
2164 | } | |
2165 | ||
50e523dd | 2166 | static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) |
214ff83d | 2167 | { |
7d5e88d3 VK |
2168 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
2169 | u64 *sparse_banks = hv_vcpu->sparse_banks; | |
72167a9d | 2170 | struct kvm *kvm = vcpu->kvm; |
214ff83d VK |
2171 | struct hv_send_ipi_ex send_ipi_ex; |
2172 | struct hv_send_ipi send_ipi; | |
ea8c66fe | 2173 | u64 valid_bank_mask; |
f21dd494 | 2174 | u32 vector; |
214ff83d VK |
2175 | bool all_cpus; |
2176 | ||
50e523dd | 2177 | if (hc->code == HVCALL_SEND_IPI) { |
bd38b320 SC |
2178 | if (!hc->fast) { |
2179 | if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi, | |
214ff83d VK |
2180 | sizeof(send_ipi)))) |
2181 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
2182 | sparse_banks[0] = send_ipi.cpu_mask; | |
f21dd494 | 2183 | vector = send_ipi.vector; |
214ff83d VK |
2184 | } else { |
2185 | /* 'reserved' part of hv_send_ipi should be 0 */ | |
bd38b320 | 2186 | if (unlikely(hc->ingpa >> 32 != 0)) |
214ff83d | 2187 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
bd38b320 SC |
2188 | sparse_banks[0] = hc->outgpa; |
2189 | vector = (u32)hc->ingpa; | |
214ff83d VK |
2190 | } |
2191 | all_cpus = false; | |
2192 | valid_bank_mask = BIT_ULL(0); | |
2193 | ||
f21dd494 | 2194 | trace_kvm_hv_send_ipi(vector, sparse_banks[0]); |
214ff83d | 2195 | } else { |
47d3e5cd VK |
2196 | if (!hc->fast) { |
2197 | if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex, | |
2198 | sizeof(send_ipi_ex)))) | |
2199 | return HV_STATUS_INVALID_HYPERCALL_INPUT; | |
2200 | } else { | |
2201 | send_ipi_ex.vector = (u32)hc->ingpa; | |
2202 | send_ipi_ex.vp_set.format = hc->outgpa; | |
2203 | send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]); | |
2204 | } | |
214ff83d VK |
2205 | |
2206 | trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector, | |
2207 | send_ipi_ex.vp_set.format, | |
2208 | send_ipi_ex.vp_set.valid_bank_mask); | |
2209 | ||
f21dd494 | 2210 | vector = send_ipi_ex.vector; |
214ff83d | 2211 | valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask; |
214ff83d VK |
2212 | all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL; |
2213 | ||
d603fd8d | 2214 | if (hc->var_cnt != hweight64(valid_bank_mask)) |
bd1ba573 SC |
2215 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2216 | ||
3244867a SC |
2217 | if (all_cpus) |
2218 | goto check_and_send_ipi; | |
2219 | ||
bd1ba573 | 2220 | if (!hc->var_cnt) |
214ff83d VK |
2221 | goto ret_success; |
2222 | ||
8b9e13d2 VK |
2223 | if (!hc->fast) |
2224 | hc->data_offset = offsetof(struct hv_send_ipi_ex, | |
2225 | vp_set.bank_contents); | |
2226 | else | |
2227 | hc->consumed_xmm_halves = 1; | |
2228 | ||
2229 | if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks)) | |
214ff83d VK |
2230 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2231 | } | |
2232 | ||
3244867a | 2233 | check_and_send_ipi: |
f21dd494 | 2234 | if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) |
214ff83d VK |
2235 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2236 | ||
b6c2c22f VK |
2237 | if (all_cpus) |
2238 | kvm_hv_send_ipi_to_many(kvm, vector, NULL, 0); | |
2239 | else | |
2240 | kvm_hv_send_ipi_to_many(kvm, vector, sparse_banks, valid_bank_mask); | |
214ff83d VK |
2241 | |
2242 | ret_success: | |
2243 | return HV_STATUS_SUCCESS; | |
2244 | } | |
2245 | ||
3be29eb7 | 2246 | void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled) |
8f014550 | 2247 | { |
3be29eb7 | 2248 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
8f014550 VK |
2249 | struct kvm_cpuid_entry2 *entry; |
2250 | ||
3be29eb7 | 2251 | vcpu->arch.hyperv_enabled = hyperv_enabled; |
10d7bf1e | 2252 | |
3be29eb7 SC |
2253 | if (!hv_vcpu) { |
2254 | /* | |
2255 | * KVM should have already allocated kvm_vcpu_hv if Hyper-V is | |
2256 | * enabled in CPUID. | |
2257 | */ | |
2258 | WARN_ON_ONCE(vcpu->arch.hyperv_enabled); | |
10d7bf1e | 2259 | return; |
3be29eb7 | 2260 | } |
10d7bf1e | 2261 | |
ce2196b8 VK |
2262 | memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache)); |
2263 | ||
3be29eb7 SC |
2264 | if (!vcpu->arch.hyperv_enabled) |
2265 | return; | |
2266 | ||
277ad7d5 | 2267 | entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES); |
10d7bf1e VK |
2268 | if (entry) { |
2269 | hv_vcpu->cpuid_cache.features_eax = entry->eax; | |
2270 | hv_vcpu->cpuid_cache.features_ebx = entry->ebx; | |
2271 | hv_vcpu->cpuid_cache.features_edx = entry->edx; | |
10d7bf1e VK |
2272 | } |
2273 | ||
277ad7d5 | 2274 | entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO); |
10d7bf1e VK |
2275 | if (entry) { |
2276 | hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax; | |
2277 | hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx; | |
10d7bf1e VK |
2278 | } |
2279 | ||
277ad7d5 | 2280 | entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES); |
10d7bf1e VK |
2281 | if (entry) |
2282 | hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax; | |
dea6e140 VK |
2283 | |
2284 | entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES); | |
2285 | if (entry) { | |
2286 | hv_vcpu->cpuid_cache.nested_eax = entry->eax; | |
2287 | hv_vcpu->cpuid_cache.nested_ebx = entry->ebx; | |
2288 | } | |
8f014550 VK |
2289 | } |
2290 | ||
644f7067 VK |
2291 | int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce) |
2292 | { | |
2293 | struct kvm_vcpu_hv *hv_vcpu; | |
2294 | int ret = 0; | |
2295 | ||
2296 | if (!to_hv_vcpu(vcpu)) { | |
2297 | if (enforce) { | |
2298 | ret = kvm_hv_vcpu_init(vcpu); | |
2299 | if (ret) | |
2300 | return ret; | |
2301 | } else { | |
2302 | return 0; | |
2303 | } | |
2304 | } | |
2305 | ||
2306 | hv_vcpu = to_hv_vcpu(vcpu); | |
2307 | hv_vcpu->enforce_cpuid = enforce; | |
2308 | ||
2309 | return ret; | |
2310 | } | |
2311 | ||
83326e43 AS |
2312 | static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result) |
2313 | { | |
2314 | bool longmode; | |
2315 | ||
b5aead00 | 2316 | longmode = is_64_bit_hypercall(vcpu); |
83326e43 | 2317 | if (longmode) |
de3cd117 | 2318 | kvm_rax_write(vcpu, result); |
83326e43 | 2319 | else { |
de3cd117 SC |
2320 | kvm_rdx_write(vcpu, result >> 32); |
2321 | kvm_rax_write(vcpu, result & 0xffffffff); | |
83326e43 AS |
2322 | } |
2323 | } | |
2324 | ||
696ca779 | 2325 | static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result) |
83326e43 | 2326 | { |
c58a318f VK |
2327 | u32 tlb_lock_count = 0; |
2328 | int ret; | |
2329 | ||
2330 | if (hv_result_success(result) && is_guest_mode(vcpu) && | |
2331 | kvm_hv_is_tlb_flush_hcall(vcpu) && | |
2332 | kvm_read_guest(vcpu->kvm, to_hv_vcpu(vcpu)->nested.pa_page_gpa, | |
2333 | &tlb_lock_count, sizeof(tlb_lock_count))) | |
2334 | result = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
2335 | ||
f5714bbb | 2336 | trace_kvm_hv_hypercall_done(result); |
696ca779 RK |
2337 | kvm_hv_hypercall_set_result(vcpu, result); |
2338 | ++vcpu->stat.hypercalls; | |
c58a318f VK |
2339 | |
2340 | ret = kvm_skip_emulated_instruction(vcpu); | |
2341 | ||
2342 | if (tlb_lock_count) | |
2343 | kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu); | |
2344 | ||
2345 | return ret; | |
83326e43 AS |
2346 | } |
2347 | ||
696ca779 RK |
2348 | static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu) |
2349 | { | |
2350 | return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); | |
2351 | } | |
2352 | ||
bd38b320 | 2353 | static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) |
faeb7833 | 2354 | { |
05f04ae4 | 2355 | struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); |
faeb7833 RK |
2356 | struct eventfd_ctx *eventfd; |
2357 | ||
bd38b320 | 2358 | if (unlikely(!hc->fast)) { |
faeb7833 | 2359 | int ret; |
bd38b320 | 2360 | gpa_t gpa = hc->ingpa; |
faeb7833 | 2361 | |
bd38b320 SC |
2362 | if ((gpa & (__alignof__(hc->ingpa) - 1)) || |
2363 | offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE) | |
faeb7833 RK |
2364 | return HV_STATUS_INVALID_ALIGNMENT; |
2365 | ||
bd38b320 SC |
2366 | ret = kvm_vcpu_read_guest(vcpu, gpa, |
2367 | &hc->ingpa, sizeof(hc->ingpa)); | |
faeb7833 RK |
2368 | if (ret < 0) |
2369 | return HV_STATUS_INVALID_ALIGNMENT; | |
2370 | } | |
2371 | ||
2372 | /* | |
2373 | * Per spec, bits 32-47 contain the extra "flag number". However, we | |
2374 | * have no use for it, and in all known usecases it is zero, so just | |
2375 | * report lookup failure if it isn't. | |
2376 | */ | |
bd38b320 | 2377 | if (hc->ingpa & 0xffff00000000ULL) |
faeb7833 RK |
2378 | return HV_STATUS_INVALID_PORT_ID; |
2379 | /* remaining bits are reserved-zero */ | |
bd38b320 | 2380 | if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK) |
faeb7833 RK |
2381 | return HV_STATUS_INVALID_HYPERCALL_INPUT; |
2382 | ||
452a68d0 PB |
2383 | /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ |
2384 | rcu_read_lock(); | |
bd38b320 | 2385 | eventfd = idr_find(&hv->conn_to_evt, hc->ingpa); |
452a68d0 | 2386 | rcu_read_unlock(); |
faeb7833 RK |
2387 | if (!eventfd) |
2388 | return HV_STATUS_INVALID_PORT_ID; | |
2389 | ||
2390 | eventfd_signal(eventfd, 1); | |
2391 | return HV_STATUS_SUCCESS; | |
2392 | } | |
2393 | ||
5974565b SC |
2394 | static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc) |
2395 | { | |
2396 | switch (hc->code) { | |
2397 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: | |
2398 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: | |
2399 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: | |
2400 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: | |
47d3e5cd | 2401 | case HVCALL_SEND_IPI_EX: |
5974565b SC |
2402 | return true; |
2403 | } | |
2404 | ||
2405 | return false; | |
2406 | } | |
2407 | ||
2408 | static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc) | |
2409 | { | |
2410 | int reg; | |
2411 | ||
2412 | kvm_fpu_get(); | |
2413 | for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++) | |
2414 | _kvm_read_sse_reg(reg, &hc->xmm[reg]); | |
2415 | kvm_fpu_put(); | |
2416 | } | |
2417 | ||
4ad81a91 VK |
2418 | static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code) |
2419 | { | |
34ef7d7b VK |
2420 | if (!hv_vcpu->enforce_cpuid) |
2421 | return true; | |
2422 | ||
2423 | switch (code) { | |
2424 | case HVCALL_NOTIFY_LONG_SPIN_WAIT: | |
2425 | return hv_vcpu->cpuid_cache.enlightenments_ebx && | |
2426 | hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX; | |
4f532b7f VK |
2427 | case HVCALL_POST_MESSAGE: |
2428 | return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES; | |
a60b3c59 VK |
2429 | case HVCALL_SIGNAL_EVENT: |
2430 | return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS; | |
a921cf83 VK |
2431 | case HVCALL_POST_DEBUG_DATA: |
2432 | case HVCALL_RETRIEVE_DEBUG_DATA: | |
2433 | case HVCALL_RESET_DEBUG_SESSION: | |
2434 | /* | |
2435 | * Return 'true' when SynDBG is disabled so the resulting code | |
2436 | * will be HV_STATUS_INVALID_HYPERCALL_CODE. | |
2437 | */ | |
2438 | return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) || | |
2439 | hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING; | |
bb53ecb4 VK |
2440 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: |
2441 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: | |
445caed0 VK |
2442 | if (!(hv_vcpu->cpuid_cache.enlightenments_eax & |
2443 | HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) | |
2444 | return false; | |
2445 | fallthrough; | |
bb53ecb4 VK |
2446 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: |
2447 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: | |
2448 | return hv_vcpu->cpuid_cache.enlightenments_eax & | |
2449 | HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; | |
d264eb3c | 2450 | case HVCALL_SEND_IPI_EX: |
445caed0 VK |
2451 | if (!(hv_vcpu->cpuid_cache.enlightenments_eax & |
2452 | HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) | |
2453 | return false; | |
2454 | fallthrough; | |
d264eb3c VK |
2455 | case HVCALL_SEND_IPI: |
2456 | return hv_vcpu->cpuid_cache.enlightenments_eax & | |
2457 | HV_X64_CLUSTER_IPI_RECOMMENDED; | |
db9cf24c VS |
2458 | case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX: |
2459 | return hv_vcpu->cpuid_cache.features_ebx & | |
2460 | HV_ENABLE_EXTENDED_HYPERCALLS; | |
34ef7d7b VK |
2461 | default: |
2462 | break; | |
2463 | } | |
2464 | ||
4ad81a91 VK |
2465 | return true; |
2466 | } | |
2467 | ||
e83d5887 AS |
2468 | int kvm_hv_hypercall(struct kvm_vcpu *vcpu) |
2469 | { | |
4e62aa96 | 2470 | struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); |
bd38b320 SC |
2471 | struct kvm_hv_hcall hc; |
2472 | u64 ret = HV_STATUS_SUCCESS; | |
e83d5887 AS |
2473 | |
2474 | /* | |
2475 | * hypercall generates UD from non zero cpl and real mode | |
2476 | * per HYPER-V spec | |
2477 | */ | |
b3646477 | 2478 | if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) { |
e83d5887 | 2479 | kvm_queue_exception(vcpu, UD_VECTOR); |
0d9c055e | 2480 | return 1; |
e83d5887 AS |
2481 | } |
2482 | ||
f4e4805e | 2483 | #ifdef CONFIG_X86_64 |
b5aead00 | 2484 | if (is_64_bit_hypercall(vcpu)) { |
bd38b320 SC |
2485 | hc.param = kvm_rcx_read(vcpu); |
2486 | hc.ingpa = kvm_rdx_read(vcpu); | |
2487 | hc.outgpa = kvm_r8_read(vcpu); | |
f4e4805e AB |
2488 | } else |
2489 | #endif | |
2490 | { | |
bd38b320 SC |
2491 | hc.param = ((u64)kvm_rdx_read(vcpu) << 32) | |
2492 | (kvm_rax_read(vcpu) & 0xffffffff); | |
2493 | hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) | | |
2494 | (kvm_rcx_read(vcpu) & 0xffffffff); | |
2495 | hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) | | |
2496 | (kvm_rsi_read(vcpu) & 0xffffffff); | |
e83d5887 | 2497 | } |
e83d5887 | 2498 | |
bd38b320 | 2499 | hc.code = hc.param & 0xffff; |
bd1ba573 | 2500 | hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET; |
bd38b320 SC |
2501 | hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT); |
2502 | hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff; | |
2503 | hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff; | |
2504 | hc.rep = !!(hc.rep_cnt || hc.rep_idx); | |
e83d5887 | 2505 | |
bd1ba573 SC |
2506 | trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt, |
2507 | hc.rep_idx, hc.ingpa, hc.outgpa); | |
e83d5887 | 2508 | |
4e62aa96 | 2509 | if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) { |
4ad81a91 VK |
2510 | ret = HV_STATUS_ACCESS_DENIED; |
2511 | goto hypercall_complete; | |
413af660 SC |
2512 | } |
2513 | ||
2514 | if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) { | |
2515 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; | |
2516 | goto hypercall_complete; | |
4ad81a91 VK |
2517 | } |
2518 | ||
4e62aa96 VK |
2519 | if (hc.fast && is_xmm_fast_hypercall(&hc)) { |
2520 | if (unlikely(hv_vcpu->enforce_cpuid && | |
2521 | !(hv_vcpu->cpuid_cache.features_edx & | |
2522 | HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) { | |
2523 | kvm_queue_exception(vcpu, UD_VECTOR); | |
2524 | return 1; | |
2525 | } | |
2526 | ||
2e2f1e8d | 2527 | kvm_hv_hypercall_read_xmm(&hc); |
4e62aa96 | 2528 | } |
2e2f1e8d | 2529 | |
bd38b320 | 2530 | switch (hc.code) { |
8ed6d767 | 2531 | case HVCALL_NOTIFY_LONG_SPIN_WAIT: |
40421f38 | 2532 | if (unlikely(hc.rep || hc.var_cnt)) { |
56b9ae78 VK |
2533 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2534 | break; | |
2535 | } | |
de63ad4c | 2536 | kvm_vcpu_on_spin(vcpu, true); |
e83d5887 | 2537 | break; |
83326e43 | 2538 | case HVCALL_SIGNAL_EVENT: |
40421f38 | 2539 | if (unlikely(hc.rep || hc.var_cnt)) { |
56b9ae78 VK |
2540 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2541 | break; | |
2542 | } | |
bd38b320 | 2543 | ret = kvm_hvcall_signal_event(vcpu, &hc); |
d32ef547 | 2544 | if (ret != HV_STATUS_INVALID_PORT_ID) |
faeb7833 | 2545 | break; |
df561f66 | 2546 | fallthrough; /* maybe userspace knows this conn_id */ |
faeb7833 | 2547 | case HVCALL_POST_MESSAGE: |
a2b5c3c0 | 2548 | /* don't bother userspace if it has no way to handle it */ |
40421f38 | 2549 | if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) { |
56b9ae78 | 2550 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
a2b5c3c0 PB |
2551 | break; |
2552 | } | |
1a9df326 | 2553 | goto hypercall_userspace_exit; |
e2f11f42 | 2554 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST: |
c0f1eaeb | 2555 | if (unlikely(hc.var_cnt)) { |
e2f11f42 VK |
2556 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2557 | break; | |
2558 | } | |
c0f1eaeb | 2559 | fallthrough; |
c7012676 | 2560 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX: |
5974565b | 2561 | if (unlikely(!hc.rep_cnt || hc.rep_idx)) { |
e2f11f42 VK |
2562 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2563 | break; | |
2564 | } | |
82c1ead0 | 2565 | ret = kvm_hv_flush_tlb(vcpu, &hc); |
c7012676 | 2566 | break; |
82c1ead0 | 2567 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE: |
c0f1eaeb | 2568 | if (unlikely(hc.var_cnt)) { |
c7012676 VK |
2569 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2570 | break; | |
2571 | } | |
c0f1eaeb | 2572 | fallthrough; |
c7012676 | 2573 | case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX: |
5974565b | 2574 | if (unlikely(hc.rep)) { |
c7012676 VK |
2575 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2576 | break; | |
2577 | } | |
82c1ead0 | 2578 | ret = kvm_hv_flush_tlb(vcpu, &hc); |
e2f11f42 | 2579 | break; |
214ff83d | 2580 | case HVCALL_SEND_IPI: |
c0f1eaeb | 2581 | if (unlikely(hc.var_cnt)) { |
214ff83d VK |
2582 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2583 | break; | |
2584 | } | |
c0f1eaeb | 2585 | fallthrough; |
214ff83d | 2586 | case HVCALL_SEND_IPI_EX: |
47d3e5cd | 2587 | if (unlikely(hc.rep)) { |
214ff83d VK |
2588 | ret = HV_STATUS_INVALID_HYPERCALL_INPUT; |
2589 | break; | |
2590 | } | |
50e523dd | 2591 | ret = kvm_hv_send_ipi(vcpu, &hc); |
214ff83d | 2592 | break; |
b187038b JD |
2593 | case HVCALL_POST_DEBUG_DATA: |
2594 | case HVCALL_RETRIEVE_DEBUG_DATA: | |
bd38b320 | 2595 | if (unlikely(hc.fast)) { |
b187038b JD |
2596 | ret = HV_STATUS_INVALID_PARAMETER; |
2597 | break; | |
2598 | } | |
2599 | fallthrough; | |
2600 | case HVCALL_RESET_DEBUG_SESSION: { | |
f69b55ef | 2601 | struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu); |
b187038b JD |
2602 | |
2603 | if (!kvm_hv_is_syndbg_enabled(vcpu)) { | |
2604 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; | |
2605 | break; | |
2606 | } | |
2607 | ||
2608 | if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) { | |
2609 | ret = HV_STATUS_OPERATION_DENIED; | |
2610 | break; | |
2611 | } | |
1a9df326 | 2612 | goto hypercall_userspace_exit; |
b187038b | 2613 | } |
db9cf24c VS |
2614 | case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX: |
2615 | if (unlikely(hc.fast)) { | |
2616 | ret = HV_STATUS_INVALID_PARAMETER; | |
2617 | break; | |
2618 | } | |
2619 | goto hypercall_userspace_exit; | |
e83d5887 | 2620 | default: |
d32ef547 | 2621 | ret = HV_STATUS_INVALID_HYPERCALL_CODE; |
e83d5887 AS |
2622 | break; |
2623 | } | |
2624 | ||
4ad81a91 | 2625 | hypercall_complete: |
696ca779 | 2626 | return kvm_hv_hypercall_complete(vcpu, ret); |
1a9df326 VS |
2627 | |
2628 | hypercall_userspace_exit: | |
2629 | vcpu->run->exit_reason = KVM_EXIT_HYPERV; | |
2630 | vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; | |
2631 | vcpu->run->hyperv.u.hcall.input = hc.param; | |
2632 | vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa; | |
2633 | vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa; | |
2634 | vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace; | |
2635 | return 0; | |
e83d5887 | 2636 | } |
cbc0236a RK |
2637 | |
2638 | void kvm_hv_init_vm(struct kvm *kvm) | |
2639 | { | |
05f04ae4 VK |
2640 | struct kvm_hv *hv = to_kvm_hv(kvm); |
2641 | ||
2642 | mutex_init(&hv->hv_lock); | |
2643 | idr_init(&hv->conn_to_evt); | |
cbc0236a RK |
2644 | } |
2645 | ||
2646 | void kvm_hv_destroy_vm(struct kvm *kvm) | |
2647 | { | |
05f04ae4 | 2648 | struct kvm_hv *hv = to_kvm_hv(kvm); |
faeb7833 RK |
2649 | struct eventfd_ctx *eventfd; |
2650 | int i; | |
2651 | ||
05f04ae4 | 2652 | idr_for_each_entry(&hv->conn_to_evt, eventfd, i) |
faeb7833 | 2653 | eventfd_ctx_put(eventfd); |
05f04ae4 | 2654 | idr_destroy(&hv->conn_to_evt); |
faeb7833 RK |
2655 | } |
2656 | ||
2657 | static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd) | |
2658 | { | |
05f04ae4 | 2659 | struct kvm_hv *hv = to_kvm_hv(kvm); |
faeb7833 RK |
2660 | struct eventfd_ctx *eventfd; |
2661 | int ret; | |
2662 | ||
2663 | eventfd = eventfd_ctx_fdget(fd); | |
2664 | if (IS_ERR(eventfd)) | |
2665 | return PTR_ERR(eventfd); | |
2666 | ||
2667 | mutex_lock(&hv->hv_lock); | |
2668 | ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1, | |
254272ce | 2669 | GFP_KERNEL_ACCOUNT); |
faeb7833 RK |
2670 | mutex_unlock(&hv->hv_lock); |
2671 | ||
2672 | if (ret >= 0) | |
2673 | return 0; | |
2674 | ||
2675 | if (ret == -ENOSPC) | |
2676 | ret = -EEXIST; | |
2677 | eventfd_ctx_put(eventfd); | |
2678 | return ret; | |
2679 | } | |
2680 | ||
2681 | static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id) | |
2682 | { | |
05f04ae4 | 2683 | struct kvm_hv *hv = to_kvm_hv(kvm); |
faeb7833 RK |
2684 | struct eventfd_ctx *eventfd; |
2685 | ||
2686 | mutex_lock(&hv->hv_lock); | |
2687 | eventfd = idr_remove(&hv->conn_to_evt, conn_id); | |
2688 | mutex_unlock(&hv->hv_lock); | |
2689 | ||
2690 | if (!eventfd) | |
2691 | return -ENOENT; | |
2692 | ||
2693 | synchronize_srcu(&kvm->srcu); | |
2694 | eventfd_ctx_put(eventfd); | |
2695 | return 0; | |
2696 | } | |
2697 | ||
2698 | int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args) | |
2699 | { | |
2700 | if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) || | |
2701 | (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK)) | |
2702 | return -EINVAL; | |
2703 | ||
2704 | if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN) | |
2705 | return kvm_hv_eventfd_deassign(kvm, args->conn_id); | |
2706 | return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd); | |
cbc0236a | 2707 | } |
2bc39970 | 2708 | |
c21d54f0 VK |
2709 | int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, |
2710 | struct kvm_cpuid_entry2 __user *entries) | |
2bc39970 | 2711 | { |
ea152987 | 2712 | uint16_t evmcs_ver = 0; |
2bc39970 VK |
2713 | struct kvm_cpuid_entry2 cpuid_entries[] = { |
2714 | { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS }, | |
2715 | { .function = HYPERV_CPUID_INTERFACE }, | |
2716 | { .function = HYPERV_CPUID_VERSION }, | |
2717 | { .function = HYPERV_CPUID_FEATURES }, | |
2718 | { .function = HYPERV_CPUID_ENLIGHTMENT_INFO }, | |
2719 | { .function = HYPERV_CPUID_IMPLEMENT_LIMITS }, | |
f97f5a56 JD |
2720 | { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS }, |
2721 | { .function = HYPERV_CPUID_SYNDBG_INTERFACE }, | |
2722 | { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES }, | |
2bc39970 VK |
2723 | { .function = HYPERV_CPUID_NESTED_FEATURES }, |
2724 | }; | |
2725 | int i, nent = ARRAY_SIZE(cpuid_entries); | |
2726 | ||
33b22172 PB |
2727 | if (kvm_x86_ops.nested_ops->get_evmcs_version) |
2728 | evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu); | |
ea152987 | 2729 | |
2bc39970 VK |
2730 | if (cpuid->nent < nent) |
2731 | return -E2BIG; | |
2732 | ||
2733 | if (cpuid->nent > nent) | |
2734 | cpuid->nent = nent; | |
2735 | ||
2736 | for (i = 0; i < nent; i++) { | |
2737 | struct kvm_cpuid_entry2 *ent = &cpuid_entries[i]; | |
2738 | u32 signature[3]; | |
2739 | ||
2740 | switch (ent->function) { | |
2741 | case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS: | |
2742 | memcpy(signature, "Linux KVM Hv", 12); | |
2743 | ||
f97f5a56 | 2744 | ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; |
2bc39970 VK |
2745 | ent->ebx = signature[0]; |
2746 | ent->ecx = signature[1]; | |
2747 | ent->edx = signature[2]; | |
2748 | break; | |
2749 | ||
2750 | case HYPERV_CPUID_INTERFACE: | |
8f014550 | 2751 | ent->eax = HYPERV_CPUID_SIGNATURE_EAX; |
2bc39970 VK |
2752 | break; |
2753 | ||
2754 | case HYPERV_CPUID_VERSION: | |
2755 | /* | |
2756 | * We implement some Hyper-V 2016 functions so let's use | |
2757 | * this version. | |
2758 | */ | |
2759 | ent->eax = 0x00003839; | |
2760 | ent->ebx = 0x000A0000; | |
2761 | break; | |
2762 | ||
2763 | case HYPERV_CPUID_FEATURES: | |
dfc53baa | 2764 | ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; |
2bc39970 | 2765 | ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; |
dfc53baa | 2766 | ent->eax |= HV_MSR_SYNIC_AVAILABLE; |
2bc39970 | 2767 | ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; |
dfc53baa JS |
2768 | ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; |
2769 | ent->eax |= HV_MSR_HYPERCALL_AVAILABLE; | |
2770 | ent->eax |= HV_MSR_VP_INDEX_AVAILABLE; | |
2771 | ent->eax |= HV_MSR_RESET_AVAILABLE; | |
2bc39970 | 2772 | ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; |
dfc53baa JS |
2773 | ent->eax |= HV_ACCESS_FREQUENCY_MSRS; |
2774 | ent->eax |= HV_ACCESS_REENLIGHTENMENT; | |
2be1bd3a | 2775 | ent->eax |= HV_ACCESS_TSC_INVARIANT; |
2bc39970 | 2776 | |
dfc53baa JS |
2777 | ent->ebx |= HV_POST_MESSAGES; |
2778 | ent->ebx |= HV_SIGNAL_EVENTS; | |
db9cf24c | 2779 | ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS; |
2bc39970 | 2780 | |
d8f5537a | 2781 | ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; |
2bc39970 VK |
2782 | ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; |
2783 | ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; | |
a073d7e3 | 2784 | |
039aeb9d | 2785 | ent->ebx |= HV_DEBUGGING; |
f97f5a56 JD |
2786 | ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE; |
2787 | ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; | |
f84fcb66 | 2788 | ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH; |
f97f5a56 | 2789 | |
a073d7e3 WL |
2790 | /* |
2791 | * Direct Synthetic timers only make sense with in-kernel | |
2792 | * LAPIC | |
2793 | */ | |
c21d54f0 | 2794 | if (!vcpu || lapic_in_kernel(vcpu)) |
a073d7e3 | 2795 | ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; |
2bc39970 VK |
2796 | |
2797 | break; | |
2798 | ||
2799 | case HYPERV_CPUID_ENLIGHTMENT_INFO: | |
2800 | ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; | |
2801 | ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; | |
2bc39970 VK |
2802 | ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; |
2803 | ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; | |
2804 | ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; | |
f1adceaf VK |
2805 | if (evmcs_ver) |
2806 | ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; | |
b2d8b167 VK |
2807 | if (!cpu_smt_possible()) |
2808 | ent->eax |= HV_X64_NO_NONARCH_CORESHARING; | |
0f250a64 VK |
2809 | |
2810 | ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED; | |
2bc39970 VK |
2811 | /* |
2812 | * Default number of spinlock retry attempts, matches | |
2813 | * HyperV 2016. | |
2814 | */ | |
2815 | ent->ebx = 0x00000FFF; | |
2816 | ||
2817 | break; | |
2818 | ||
2819 | case HYPERV_CPUID_IMPLEMENT_LIMITS: | |
2820 | /* Maximum number of virtual processors */ | |
2821 | ent->eax = KVM_MAX_VCPUS; | |
2822 | /* | |
2823 | * Maximum number of logical processors, matches | |
2824 | * HyperV 2016. | |
2825 | */ | |
2826 | ent->ebx = 64; | |
2827 | ||
2828 | break; | |
2829 | ||
2830 | case HYPERV_CPUID_NESTED_FEATURES: | |
2831 | ent->eax = evmcs_ver; | |
f4de6a1f | 2832 | ent->eax |= HV_X64_NESTED_DIRECT_FLUSH; |
66c03a92 | 2833 | ent->eax |= HV_X64_NESTED_MSR_BITMAP; |
4da77090 | 2834 | ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL; |
2bc39970 VK |
2835 | break; |
2836 | ||
f97f5a56 JD |
2837 | case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS: |
2838 | memcpy(signature, "Linux KVM Hv", 12); | |
2839 | ||
2840 | ent->eax = 0; | |
2841 | ent->ebx = signature[0]; | |
2842 | ent->ecx = signature[1]; | |
2843 | ent->edx = signature[2]; | |
2844 | break; | |
2845 | ||
2846 | case HYPERV_CPUID_SYNDBG_INTERFACE: | |
2847 | memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12); | |
2848 | ent->eax = signature[0]; | |
2849 | break; | |
2850 | ||
2851 | case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES: | |
2852 | ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; | |
2853 | break; | |
2854 | ||
2bc39970 VK |
2855 | default: |
2856 | break; | |
2857 | } | |
2858 | } | |
2859 | ||
2860 | if (copy_to_user(entries, cpuid_entries, | |
2861 | nent * sizeof(struct kvm_cpuid_entry2))) | |
2862 | return -EFAULT; | |
2863 | ||
2864 | return 0; | |
2865 | } |