kvm: vmx: add nested virtualization support for xsaves
[linux-2.6-block.git] / arch / x86 / kvm / cpuid.c
CommitLineData
00b27a3e
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/kvm_host.h>
16#include <linux/module.h>
bb5a798a
JK
17#include <linux/vmalloc.h>
18#include <linux/uaccess.h>
00b27a3e
AK
19#include <asm/user.h>
20#include <asm/xsave.h>
21#include "cpuid.h"
22#include "lapic.h"
23#include "mmu.h"
24#include "trace.h"
25
412a3c41 26static u32 xstate_required_size(u64 xstate_bv, bool compacted)
4344ee98
PB
27{
28 int feature_bit = 0;
29 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
30
56c103ec 31 xstate_bv &= XSTATE_EXTEND_MASK;
4344ee98
PB
32 while (xstate_bv) {
33 if (xstate_bv & 0x1) {
412a3c41 34 u32 eax, ebx, ecx, edx, offset;
4344ee98 35 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
412a3c41
PB
36 offset = compacted ? ret : ebx;
37 ret = max(ret, offset + eax);
4344ee98
PB
38 }
39
40 xstate_bv >>= 1;
41 feature_bit++;
42 }
43
44 return ret;
45}
46
4ff41732
PB
47u64 kvm_supported_xcr0(void)
48{
49 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
50
93c4adc7 51 if (!kvm_x86_ops->mpx_supported())
4ff41732
PB
52 xcr0 &= ~(XSTATE_BNDREGS | XSTATE_BNDCSR);
53
54 return xcr0;
55}
56
5c404cab
PB
57#define F(x) bit(X86_FEATURE_##x)
58
dd598091 59int kvm_update_cpuid(struct kvm_vcpu *vcpu)
00b27a3e
AK
60{
61 struct kvm_cpuid_entry2 *best;
62 struct kvm_lapic *apic = vcpu->arch.apic;
63
64 best = kvm_find_cpuid_entry(vcpu, 1, 0);
65 if (!best)
dd598091 66 return 0;
00b27a3e
AK
67
68 /* Update OSXSAVE bit */
69 if (cpu_has_xsave && best->function == 0x1) {
5c404cab 70 best->ecx &= ~F(OSXSAVE);
00b27a3e 71 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
5c404cab 72 best->ecx |= F(OSXSAVE);
00b27a3e
AK
73 }
74
75 if (apic) {
5c404cab 76 if (best->ecx & F(TSC_DEADLINE_TIMER))
00b27a3e
AK
77 apic->lapic_timer.timer_mode_mask = 3 << 17;
78 else
79 apic->lapic_timer.timer_mode_mask = 1 << 17;
80 }
f5132b01 81
d7876f1b 82 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
4344ee98 83 if (!best) {
d7876f1b 84 vcpu->arch.guest_supported_xcr0 = 0;
4344ee98
PB
85 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
86 } else {
d7876f1b
PB
87 vcpu->arch.guest_supported_xcr0 =
88 (best->eax | ((u64)best->edx << 32)) &
4ff41732 89 kvm_supported_xcr0();
56c103ec 90 vcpu->arch.guest_xstate_size = best->ebx =
412a3c41 91 xstate_required_size(vcpu->arch.xcr0, false);
4344ee98 92 }
d7876f1b 93
412a3c41
PB
94 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
95 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
96 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
97
dd598091
NA
98 /*
99 * The existing code assumes virtual address is 48-bit in the canonical
100 * address checks; exit if it is ever changed.
101 */
102 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
103 if (best && ((best->eax & 0xff00) >> 8) != 48 &&
104 ((best->eax & 0xff00) >> 8) != 0)
105 return -EINVAL;
106
f5132b01 107 kvm_pmu_cpuid_update(vcpu);
dd598091 108 return 0;
00b27a3e
AK
109}
110
111static int is_efer_nx(void)
112{
113 unsigned long long efer = 0;
114
115 rdmsrl_safe(MSR_EFER, &efer);
116 return efer & EFER_NX;
117}
118
119static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
120{
121 int i;
122 struct kvm_cpuid_entry2 *e, *entry;
123
124 entry = NULL;
125 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
126 e = &vcpu->arch.cpuid_entries[i];
127 if (e->function == 0x80000001) {
128 entry = e;
129 break;
130 }
131 }
5c404cab
PB
132 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
133 entry->edx &= ~F(NX);
00b27a3e
AK
134 printk(KERN_INFO "kvm: guest NX capability removed\n");
135 }
136}
137
138/* when an old userspace process fills a new kernel module */
139int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
140 struct kvm_cpuid *cpuid,
141 struct kvm_cpuid_entry __user *entries)
142{
143 int r, i;
144 struct kvm_cpuid_entry *cpuid_entries;
145
146 r = -E2BIG;
147 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
148 goto out;
149 r = -ENOMEM;
150 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
151 if (!cpuid_entries)
152 goto out;
153 r = -EFAULT;
154 if (copy_from_user(cpuid_entries, entries,
155 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
156 goto out_free;
157 for (i = 0; i < cpuid->nent; i++) {
158 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
159 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
160 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
161 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
162 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
163 vcpu->arch.cpuid_entries[i].index = 0;
164 vcpu->arch.cpuid_entries[i].flags = 0;
165 vcpu->arch.cpuid_entries[i].padding[0] = 0;
166 vcpu->arch.cpuid_entries[i].padding[1] = 0;
167 vcpu->arch.cpuid_entries[i].padding[2] = 0;
168 }
169 vcpu->arch.cpuid_nent = cpuid->nent;
170 cpuid_fix_nx_cap(vcpu);
00b27a3e
AK
171 kvm_apic_set_version(vcpu);
172 kvm_x86_ops->cpuid_update(vcpu);
dd598091 173 r = kvm_update_cpuid(vcpu);
00b27a3e
AK
174
175out_free:
176 vfree(cpuid_entries);
177out:
178 return r;
179}
180
181int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
182 struct kvm_cpuid2 *cpuid,
183 struct kvm_cpuid_entry2 __user *entries)
184{
185 int r;
186
187 r = -E2BIG;
188 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
189 goto out;
190 r = -EFAULT;
191 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
192 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
193 goto out;
194 vcpu->arch.cpuid_nent = cpuid->nent;
195 kvm_apic_set_version(vcpu);
196 kvm_x86_ops->cpuid_update(vcpu);
dd598091 197 r = kvm_update_cpuid(vcpu);
00b27a3e
AK
198out:
199 return r;
200}
201
202int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
203 struct kvm_cpuid2 *cpuid,
204 struct kvm_cpuid_entry2 __user *entries)
205{
206 int r;
207
208 r = -E2BIG;
209 if (cpuid->nent < vcpu->arch.cpuid_nent)
210 goto out;
211 r = -EFAULT;
212 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
213 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
214 goto out;
215 return 0;
216
217out:
218 cpuid->nent = vcpu->arch.cpuid_nent;
219 return r;
220}
221
222static void cpuid_mask(u32 *word, int wordnum)
223{
224 *word &= boot_cpu_data.x86_capability[wordnum];
225}
226
227static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
228 u32 index)
229{
230 entry->function = function;
231 entry->index = index;
232 cpuid_count(entry->function, entry->index,
233 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
234 entry->flags = 0;
235}
236
9c15bb1d
BP
237static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
238 u32 func, u32 index, int *nent, int maxnent)
239{
84cffe49
BP
240 switch (func) {
241 case 0:
242 entry->eax = 1; /* only one leaf currently */
243 ++*nent;
244 break;
245 case 1:
246 entry->ecx = F(MOVBE);
247 ++*nent;
248 break;
249 default:
250 break;
251 }
252
253 entry->function = func;
254 entry->index = index;
255
9c15bb1d
BP
256 return 0;
257}
258
259static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
260 u32 index, int *nent, int maxnent)
00b27a3e 261{
831bf664 262 int r;
00b27a3e
AK
263 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
264#ifdef CONFIG_X86_64
265 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
266 ? F(GBPAGES) : 0;
267 unsigned f_lm = F(LM);
268#else
269 unsigned f_gbpages = 0;
270 unsigned f_lm = 0;
271#endif
272 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
ad756a16 273 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
93c4adc7 274 unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
55412b2e 275 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
00b27a3e
AK
276
277 /* cpuid 1.edx */
278 const u32 kvm_supported_word0_x86_features =
279 F(FPU) | F(VME) | F(DE) | F(PSE) |
280 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
281 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
282 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
840d2830 283 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
00b27a3e
AK
284 0 /* Reserved, DS, ACPI */ | F(MMX) |
285 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
286 0 /* HTT, TM, Reserved, PBE */;
287 /* cpuid 0x80000001.edx */
288 const u32 kvm_supported_word1_x86_features =
289 F(FPU) | F(VME) | F(DE) | F(PSE) |
290 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
291 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
292 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
293 F(PAT) | F(PSE36) | 0 /* Reserved */ |
294 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
295 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
296 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
297 /* cpuid 1.ecx */
298 const u32 kvm_supported_word4_x86_features =
87c00572
GS
299 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
300 * but *not* advertised to guests via CPUID ! */
00b27a3e
AK
301 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
302 0 /* DS-CPL, VMX, SMX, EST */ |
303 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
fb215366 304 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
ad756a16 305 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
00b27a3e
AK
306 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
307 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
308 F(F16C) | F(RDRAND);
309 /* cpuid 0x80000001.ecx */
310 const u32 kvm_supported_word6_x86_features =
311 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
312 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2b036c6b 313 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
00b27a3e
AK
314 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
315
316 /* cpuid 0xC0000001.edx */
317 const u32 kvm_supported_word5_x86_features =
318 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
319 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
320 F(PMM) | F(PMM_EN);
321
322 /* cpuid 7.0.ebx */
323 const u32 kvm_supported_word9_x86_features =
83c52915 324 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
390bd528 325 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
612263b3
CP
326 F(ADX) | F(SMAP) | F(AVX512F) | F(AVX512PF) | F(AVX512ER) |
327 F(AVX512CD);
00b27a3e 328
b65d6e17
PB
329 /* cpuid 0xD.1.eax */
330 const u32 kvm_supported_word10_x86_features =
55412b2e 331 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
b65d6e17 332
00b27a3e
AK
333 /* all calls to cpuid_count() should be made on the same cpu */
334 get_cpu();
831bf664
SL
335
336 r = -E2BIG;
337
338 if (*nent >= maxnent)
339 goto out;
340
00b27a3e
AK
341 do_cpuid_1_ent(entry, function, index);
342 ++*nent;
343
344 switch (function) {
345 case 0:
346 entry->eax = min(entry->eax, (u32)0xd);
347 break;
348 case 1:
349 entry->edx &= kvm_supported_word0_x86_features;
350 cpuid_mask(&entry->edx, 0);
351 entry->ecx &= kvm_supported_word4_x86_features;
352 cpuid_mask(&entry->ecx, 4);
353 /* we support x2apic emulation even if host does not support
354 * it since we emulate x2apic in software */
355 entry->ecx |= F(X2APIC);
356 break;
357 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
358 * may return different values. This forces us to get_cpu() before
359 * issuing the first command, and also to emulate this annoying behavior
360 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
361 case 2: {
362 int t, times = entry->eax & 0xff;
363
364 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
365 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
831bf664
SL
366 for (t = 1; t < times; ++t) {
367 if (*nent >= maxnent)
368 goto out;
369
00b27a3e
AK
370 do_cpuid_1_ent(&entry[t], function, 0);
371 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
372 ++*nent;
373 }
374 break;
375 }
376 /* function 4 has additional index. */
377 case 4: {
378 int i, cache_type;
379
380 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
381 /* read more entries until cache_type is zero */
831bf664
SL
382 for (i = 1; ; ++i) {
383 if (*nent >= maxnent)
384 goto out;
385
00b27a3e
AK
386 cache_type = entry[i - 1].eax & 0x1f;
387 if (!cache_type)
388 break;
389 do_cpuid_1_ent(&entry[i], function, i);
390 entry[i].flags |=
391 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
392 ++*nent;
393 }
394 break;
395 }
396 case 7: {
397 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
bbbda795 398 /* Mask ebx against host capability word 9 */
00b27a3e
AK
399 if (index == 0) {
400 entry->ebx &= kvm_supported_word9_x86_features;
401 cpuid_mask(&entry->ebx, 9);
ba904635
WA
402 // TSC_ADJUST is emulated
403 entry->ebx |= F(TSC_ADJUST);
00b27a3e
AK
404 } else
405 entry->ebx = 0;
406 entry->eax = 0;
407 entry->ecx = 0;
408 entry->edx = 0;
409 break;
410 }
411 case 9:
412 break;
a6c06ed1
GN
413 case 0xa: { /* Architectural Performance Monitoring */
414 struct x86_pmu_capability cap;
415 union cpuid10_eax eax;
416 union cpuid10_edx edx;
417
418 perf_get_x86_pmu_capability(&cap);
419
420 /*
421 * Only support guest architectural pmu on a host
422 * with architectural pmu.
423 */
424 if (!cap.version)
425 memset(&cap, 0, sizeof(cap));
426
427 eax.split.version_id = min(cap.version, 2);
428 eax.split.num_counters = cap.num_counters_gp;
429 eax.split.bit_width = cap.bit_width_gp;
430 eax.split.mask_length = cap.events_mask_len;
431
432 edx.split.num_counters_fixed = cap.num_counters_fixed;
433 edx.split.bit_width_fixed = cap.bit_width_fixed;
434 edx.split.reserved = 0;
435
436 entry->eax = eax.full;
437 entry->ebx = cap.events_mask;
438 entry->ecx = 0;
439 entry->edx = edx.full;
440 break;
441 }
00b27a3e
AK
442 /* function 0xb has additional index. */
443 case 0xb: {
444 int i, level_type;
445
446 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
447 /* read more entries until level_type is zero */
831bf664
SL
448 for (i = 1; ; ++i) {
449 if (*nent >= maxnent)
450 goto out;
451
00b27a3e
AK
452 level_type = entry[i - 1].ecx & 0xff00;
453 if (!level_type)
454 break;
455 do_cpuid_1_ent(&entry[i], function, i);
456 entry[i].flags |=
457 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
458 ++*nent;
459 }
460 break;
461 }
462 case 0xd: {
463 int idx, i;
4ff41732 464 u64 supported = kvm_supported_xcr0();
00b27a3e 465
4ff41732
PB
466 entry->eax &= supported;
467 entry->edx &= supported >> 32;
00b27a3e 468 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
b65d6e17
PB
469 if (!supported)
470 break;
471
831bf664 472 for (idx = 1, i = 1; idx < 64; ++idx) {
4ff41732 473 u64 mask = ((u64)1 << idx);
831bf664
SL
474 if (*nent >= maxnent)
475 goto out;
476
00b27a3e 477 do_cpuid_1_ent(&entry[i], function, idx);
412a3c41 478 if (idx == 1) {
b65d6e17 479 entry[i].eax &= kvm_supported_word10_x86_features;
412a3c41
PB
480 entry[i].ebx = 0;
481 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
482 entry[i].ebx =
483 xstate_required_size(supported,
484 true);
404e0a19
PB
485 } else {
486 if (entry[i].eax == 0 || !(supported & mask))
487 continue;
488 if (WARN_ON_ONCE(entry[i].ecx & 1))
489 continue;
490 }
491 entry[i].ecx = 0;
492 entry[i].edx = 0;
00b27a3e
AK
493 entry[i].flags |=
494 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
495 ++*nent;
496 ++i;
497 }
498 break;
499 }
500 case KVM_CPUID_SIGNATURE: {
326d07cb
MK
501 static const char signature[12] = "KVMKVMKVM\0\0";
502 const u32 *sigptr = (const u32 *)signature;
57c22e5f 503 entry->eax = KVM_CPUID_FEATURES;
00b27a3e
AK
504 entry->ebx = sigptr[0];
505 entry->ecx = sigptr[1];
506 entry->edx = sigptr[2];
507 break;
508 }
509 case KVM_CPUID_FEATURES:
510 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
511 (1 << KVM_FEATURE_NOP_IO_DELAY) |
512 (1 << KVM_FEATURE_CLOCKSOURCE2) |
513 (1 << KVM_FEATURE_ASYNC_PF) |
ae7a2a3f 514 (1 << KVM_FEATURE_PV_EOI) |
6aef266c
SV
515 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
516 (1 << KVM_FEATURE_PV_UNHALT);
00b27a3e
AK
517
518 if (sched_info_on())
519 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
520
521 entry->ebx = 0;
522 entry->ecx = 0;
523 entry->edx = 0;
524 break;
525 case 0x80000000:
526 entry->eax = min(entry->eax, 0x8000001a);
527 break;
528 case 0x80000001:
529 entry->edx &= kvm_supported_word1_x86_features;
530 cpuid_mask(&entry->edx, 1);
531 entry->ecx &= kvm_supported_word6_x86_features;
532 cpuid_mask(&entry->ecx, 6);
533 break;
e4c9a5a1
MT
534 case 0x80000007: /* Advanced power management */
535 /* invariant TSC is CPUID.80000007H:EDX[8] */
536 entry->edx &= (1 << 8);
537 /* mask against host */
538 entry->edx &= boot_cpu_data.x86_power;
539 entry->eax = entry->ebx = entry->ecx = 0;
540 break;
00b27a3e
AK
541 case 0x80000008: {
542 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
543 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
544 unsigned phys_as = entry->eax & 0xff;
545
546 if (!g_phys_as)
547 g_phys_as = phys_as;
548 entry->eax = g_phys_as | (virt_as << 8);
549 entry->ebx = entry->edx = 0;
550 break;
551 }
552 case 0x80000019:
553 entry->ecx = entry->edx = 0;
554 break;
555 case 0x8000001a:
556 break;
557 case 0x8000001d:
558 break;
559 /*Add support for Centaur's CPUID instruction*/
560 case 0xC0000000:
561 /*Just support up to 0xC0000004 now*/
562 entry->eax = min(entry->eax, 0xC0000004);
563 break;
564 case 0xC0000001:
565 entry->edx &= kvm_supported_word5_x86_features;
566 cpuid_mask(&entry->edx, 5);
567 break;
568 case 3: /* Processor serial number */
569 case 5: /* MONITOR/MWAIT */
570 case 6: /* Thermal management */
00b27a3e
AK
571 case 0xC0000002:
572 case 0xC0000003:
573 case 0xC0000004:
574 default:
575 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
576 break;
577 }
578
579 kvm_x86_ops->set_supported_cpuid(function, entry);
580
831bf664
SL
581 r = 0;
582
583out:
00b27a3e 584 put_cpu();
831bf664
SL
585
586 return r;
00b27a3e
AK
587}
588
9c15bb1d
BP
589static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
590 u32 idx, int *nent, int maxnent, unsigned int type)
591{
592 if (type == KVM_GET_EMULATED_CPUID)
593 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
594
595 return __do_cpuid_ent(entry, func, idx, nent, maxnent);
596}
597
00b27a3e
AK
598#undef F
599
831bf664
SL
600struct kvm_cpuid_param {
601 u32 func;
602 u32 idx;
603 bool has_leaf_count;
326d07cb 604 bool (*qualifier)(const struct kvm_cpuid_param *param);
831bf664
SL
605};
606
326d07cb 607static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
831bf664
SL
608{
609 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
610}
611
9c15bb1d
BP
612static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
613 __u32 num_entries, unsigned int ioctl_type)
614{
615 int i;
1b2ca422 616 __u32 pad[3];
9c15bb1d
BP
617
618 if (ioctl_type != KVM_GET_EMULATED_CPUID)
619 return false;
620
621 /*
622 * We want to make sure that ->padding is being passed clean from
623 * userspace in case we want to use it for something in the future.
624 *
625 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
626 * have to give ourselves satisfied only with the emulated side. /me
627 * sheds a tear.
628 */
629 for (i = 0; i < num_entries; i++) {
1b2ca422
BP
630 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
631 return true;
632
633 if (pad[0] || pad[1] || pad[2])
9c15bb1d
BP
634 return true;
635 }
636 return false;
637}
638
639int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
640 struct kvm_cpuid_entry2 __user *entries,
641 unsigned int type)
00b27a3e
AK
642{
643 struct kvm_cpuid_entry2 *cpuid_entries;
831bf664 644 int limit, nent = 0, r = -E2BIG, i;
00b27a3e 645 u32 func;
326d07cb 646 static const struct kvm_cpuid_param param[] = {
831bf664
SL
647 { .func = 0, .has_leaf_count = true },
648 { .func = 0x80000000, .has_leaf_count = true },
649 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
650 { .func = KVM_CPUID_SIGNATURE },
651 { .func = KVM_CPUID_FEATURES },
652 };
00b27a3e
AK
653
654 if (cpuid->nent < 1)
655 goto out;
656 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
657 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
9c15bb1d
BP
658
659 if (sanity_check_entries(entries, cpuid->nent, type))
660 return -EINVAL;
661
00b27a3e 662 r = -ENOMEM;
84cffe49 663 cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
00b27a3e
AK
664 if (!cpuid_entries)
665 goto out;
666
831bf664
SL
667 r = 0;
668 for (i = 0; i < ARRAY_SIZE(param); i++) {
326d07cb 669 const struct kvm_cpuid_param *ent = &param[i];
00b27a3e 670
831bf664
SL
671 if (ent->qualifier && !ent->qualifier(ent))
672 continue;
00b27a3e 673
831bf664 674 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
9c15bb1d 675 &nent, cpuid->nent, type);
00b27a3e 676
831bf664 677 if (r)
00b27a3e
AK
678 goto out_free;
679
831bf664
SL
680 if (!ent->has_leaf_count)
681 continue;
682
00b27a3e 683 limit = cpuid_entries[nent - 1].eax;
831bf664
SL
684 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
685 r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
9c15bb1d 686 &nent, cpuid->nent, type);
00b27a3e 687
831bf664 688 if (r)
00b27a3e
AK
689 goto out_free;
690 }
691
00b27a3e
AK
692 r = -EFAULT;
693 if (copy_to_user(entries, cpuid_entries,
694 nent * sizeof(struct kvm_cpuid_entry2)))
695 goto out_free;
696 cpuid->nent = nent;
697 r = 0;
698
699out_free:
700 vfree(cpuid_entries);
701out:
702 return r;
703}
704
705static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
706{
707 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
708 int j, nent = vcpu->arch.cpuid_nent;
709
710 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
711 /* when no next entry is found, the current entry[i] is reselected */
712 for (j = i + 1; ; j = (j + 1) % nent) {
713 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
714 if (ej->function == e->function) {
715 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
716 return j;
717 }
718 }
719 return 0; /* silence gcc, even though control never reaches here */
720}
721
722/* find an entry with matching function, matching index (if needed), and that
723 * should be read next (if it's stateful) */
724static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
725 u32 function, u32 index)
726{
727 if (e->function != function)
728 return 0;
729 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
730 return 0;
731 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
732 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
733 return 0;
734 return 1;
735}
736
737struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
738 u32 function, u32 index)
739{
740 int i;
741 struct kvm_cpuid_entry2 *best = NULL;
742
743 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
744 struct kvm_cpuid_entry2 *e;
745
746 e = &vcpu->arch.cpuid_entries[i];
747 if (is_matching_cpuid_entry(e, function, index)) {
748 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
749 move_to_next_stateful_cpuid_entry(vcpu, i);
750 best = e;
751 break;
752 }
753 }
754 return best;
755}
756EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
757
758int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
759{
760 struct kvm_cpuid_entry2 *best;
761
762 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
763 if (!best || best->eax < 0x80000008)
764 goto not_found;
765 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
766 if (best)
767 return best->eax & 0xff;
768not_found:
769 return 36;
770}
3573e22c 771EXPORT_SYMBOL_GPL(cpuid_maxphyaddr);
00b27a3e
AK
772
773/*
774 * If no match is found, check whether we exceed the vCPU's limit
775 * and return the content of the highest valid _standard_ leaf instead.
776 * This is to satisfy the CPUID specification.
777 */
778static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
779 u32 function, u32 index)
780{
781 struct kvm_cpuid_entry2 *maxlevel;
782
783 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
784 if (!maxlevel || maxlevel->eax >= function)
785 return NULL;
786 if (function & 0x80000000) {
787 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
788 if (!maxlevel)
789 return NULL;
790 }
791 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
792}
793
62046e5a 794void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
00b27a3e 795{
62046e5a 796 u32 function = *eax, index = *ecx;
00b27a3e
AK
797 struct kvm_cpuid_entry2 *best;
798
00b27a3e
AK
799 best = kvm_find_cpuid_entry(vcpu, function, index);
800
801 if (!best)
802 best = check_cpuid_limit(vcpu, function, index);
803
bc613494
MT
804 /*
805 * Perfmon not yet supported for L2 guest.
806 */
807 if (is_guest_mode(vcpu) && function == 0xa)
808 best = NULL;
809
00b27a3e 810 if (best) {
62046e5a
AK
811 *eax = best->eax;
812 *ebx = best->ebx;
813 *ecx = best->ecx;
814 *edx = best->edx;
815 } else
816 *eax = *ebx = *ecx = *edx = 0;
a9d4e439 817 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx);
62046e5a 818}
66f7b72e 819EXPORT_SYMBOL_GPL(kvm_cpuid);
62046e5a
AK
820
821void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
822{
823 u32 function, eax, ebx, ecx, edx;
824
825 function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
826 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
827 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
828 kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
829 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
830 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
831 kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
00b27a3e 832 kvm_x86_ops->skip_emulated_instruction(vcpu);
00b27a3e
AK
833}
834EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);