Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-block.git] / arch / x86 / kvm / x86.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * derived from drivers/kvm/kvm_main.c
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright (C) 2008 Qumranet, Inc.
9  * Copyright IBM Corporation, 2008
10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11  *
12  * Authors:
13  *   Avi Kivity   <avi@qumranet.com>
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Amit Shah    <amit.shah@qumranet.com>
16  *   Ben-Ami Yassour <benami@il.ibm.com>
17  */
18
19 #include <linux/kvm_host.h>
20 #include "irq.h"
21 #include "ioapic.h"
22 #include "mmu.h"
23 #include "i8254.h"
24 #include "tss.h"
25 #include "kvm_cache_regs.h"
26 #include "kvm_emulate.h"
27 #include "x86.h"
28 #include "cpuid.h"
29 #include "pmu.h"
30 #include "hyperv.h"
31 #include "lapic.h"
32 #include "xen.h"
33
34 #include <linux/clocksource.h>
35 #include <linux/interrupt.h>
36 #include <linux/kvm.h>
37 #include <linux/fs.h>
38 #include <linux/vmalloc.h>
39 #include <linux/export.h>
40 #include <linux/moduleparam.h>
41 #include <linux/mman.h>
42 #include <linux/highmem.h>
43 #include <linux/iommu.h>
44 #include <linux/cpufreq.h>
45 #include <linux/user-return-notifier.h>
46 #include <linux/srcu.h>
47 #include <linux/slab.h>
48 #include <linux/perf_event.h>
49 #include <linux/uaccess.h>
50 #include <linux/hash.h>
51 #include <linux/pci.h>
52 #include <linux/timekeeper_internal.h>
53 #include <linux/pvclock_gtod.h>
54 #include <linux/kvm_irqfd.h>
55 #include <linux/irqbypass.h>
56 #include <linux/sched/stat.h>
57 #include <linux/sched/isolation.h>
58 #include <linux/mem_encrypt.h>
59 #include <linux/entry-kvm.h>
60 #include <linux/suspend.h>
61
62 #include <trace/events/kvm.h>
63
64 #include <asm/debugreg.h>
65 #include <asm/msr.h>
66 #include <asm/desc.h>
67 #include <asm/mce.h>
68 #include <asm/pkru.h>
69 #include <linux/kernel_stat.h>
70 #include <asm/fpu/api.h>
71 #include <asm/fpu/xcr.h>
72 #include <asm/fpu/xstate.h>
73 #include <asm/pvclock.h>
74 #include <asm/div64.h>
75 #include <asm/irq_remapping.h>
76 #include <asm/mshyperv.h>
77 #include <asm/hypervisor.h>
78 #include <asm/tlbflush.h>
79 #include <asm/intel_pt.h>
80 #include <asm/emulate_prefix.h>
81 #include <asm/sgx.h>
82 #include <clocksource/hyperv_timer.h>
83
84 #define CREATE_TRACE_POINTS
85 #include "trace.h"
86
87 #define MAX_IO_MSRS 256
88 #define KVM_MAX_MCE_BANKS 32
89
90 struct kvm_caps kvm_caps __read_mostly = {
91         .supported_mce_cap = MCG_CTL_P | MCG_SER_P,
92 };
93 EXPORT_SYMBOL_GPL(kvm_caps);
94
95 #define  ERR_PTR_USR(e)  ((void __user *)ERR_PTR(e))
96
97 #define emul_to_vcpu(ctxt) \
98         ((struct kvm_vcpu *)(ctxt)->vcpu)
99
100 /* EFER defaults:
101  * - enable syscall per default because its emulated by KVM
102  * - enable LME and LMA per default on 64 bit KVM
103  */
104 #ifdef CONFIG_X86_64
105 static
106 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
107 #else
108 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
109 #endif
110
111 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
112
113 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
114
115 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE
116
117 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
118                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
119
120 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
121 static void process_nmi(struct kvm_vcpu *vcpu);
122 static void process_smi(struct kvm_vcpu *vcpu);
123 static void enter_smm(struct kvm_vcpu *vcpu);
124 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
125 static void store_regs(struct kvm_vcpu *vcpu);
126 static int sync_regs(struct kvm_vcpu *vcpu);
127 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
128
129 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
130 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
131
132 struct kvm_x86_ops kvm_x86_ops __read_mostly;
133
134 #define KVM_X86_OP(func)                                             \
135         DEFINE_STATIC_CALL_NULL(kvm_x86_##func,                      \
136                                 *(((struct kvm_x86_ops *)0)->func));
137 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
138 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
139 #include <asm/kvm-x86-ops.h>
140 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
141 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
142
143 static bool __read_mostly ignore_msrs = 0;
144 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
145
146 bool __read_mostly report_ignored_msrs = true;
147 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
148 EXPORT_SYMBOL_GPL(report_ignored_msrs);
149
150 unsigned int min_timer_period_us = 200;
151 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
152
153 static bool __read_mostly kvmclock_periodic_sync = true;
154 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
155
156 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
157 static u32 __read_mostly tsc_tolerance_ppm = 250;
158 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
159
160 /*
161  * lapic timer advance (tscdeadline mode only) in nanoseconds.  '-1' enables
162  * adaptive tuning starting from default advancement of 1000ns.  '0' disables
163  * advancement entirely.  Any other value is used as-is and disables adaptive
164  * tuning, i.e. allows privileged userspace to set an exact advancement time.
165  */
166 static int __read_mostly lapic_timer_advance_ns = -1;
167 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);
168
169 static bool __read_mostly vector_hashing = true;
170 module_param(vector_hashing, bool, S_IRUGO);
171
172 bool __read_mostly enable_vmware_backdoor = false;
173 module_param(enable_vmware_backdoor, bool, S_IRUGO);
174 EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
175
176 /*
177  * Flags to manipulate forced emulation behavior (any non-zero value will
178  * enable forced emulation).
179  */
180 #define KVM_FEP_CLEAR_RFLAGS_RF BIT(1)
181 static int __read_mostly force_emulation_prefix;
182 module_param(force_emulation_prefix, int, 0644);
183
184 int __read_mostly pi_inject_timer = -1;
185 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
186
187 /* Enable/disable PMU virtualization */
188 bool __read_mostly enable_pmu = true;
189 EXPORT_SYMBOL_GPL(enable_pmu);
190 module_param(enable_pmu, bool, 0444);
191
192 bool __read_mostly eager_page_split = true;
193 module_param(eager_page_split, bool, 0644);
194
195 /*
196  * Restoring the host value for MSRs that are only consumed when running in
197  * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
198  * returns to userspace, i.e. the kernel can run with the guest's value.
199  */
200 #define KVM_MAX_NR_USER_RETURN_MSRS 16
201
202 struct kvm_user_return_msrs {
203         struct user_return_notifier urn;
204         bool registered;
205         struct kvm_user_return_msr_values {
206                 u64 host;
207                 u64 curr;
208         } values[KVM_MAX_NR_USER_RETURN_MSRS];
209 };
210
211 u32 __read_mostly kvm_nr_uret_msrs;
212 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
213 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
214 static struct kvm_user_return_msrs __percpu *user_return_msrs;
215
216 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
217                                 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
218                                 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
219                                 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)
220
221 u64 __read_mostly host_efer;
222 EXPORT_SYMBOL_GPL(host_efer);
223
224 bool __read_mostly allow_smaller_maxphyaddr = 0;
225 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
226
227 bool __read_mostly enable_apicv = true;
228 EXPORT_SYMBOL_GPL(enable_apicv);
229
230 u64 __read_mostly host_xss;
231 EXPORT_SYMBOL_GPL(host_xss);
232
233 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
234         KVM_GENERIC_VM_STATS(),
235         STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
236         STATS_DESC_COUNTER(VM, mmu_pte_write),
237         STATS_DESC_COUNTER(VM, mmu_pde_zapped),
238         STATS_DESC_COUNTER(VM, mmu_flooded),
239         STATS_DESC_COUNTER(VM, mmu_recycled),
240         STATS_DESC_COUNTER(VM, mmu_cache_miss),
241         STATS_DESC_ICOUNTER(VM, mmu_unsync),
242         STATS_DESC_ICOUNTER(VM, pages_4k),
243         STATS_DESC_ICOUNTER(VM, pages_2m),
244         STATS_DESC_ICOUNTER(VM, pages_1g),
245         STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
246         STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size),
247         STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
248 };
249
250 const struct kvm_stats_header kvm_vm_stats_header = {
251         .name_size = KVM_STATS_NAME_SIZE,
252         .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
253         .id_offset = sizeof(struct kvm_stats_header),
254         .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
255         .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
256                        sizeof(kvm_vm_stats_desc),
257 };
258
259 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
260         KVM_GENERIC_VCPU_STATS(),
261         STATS_DESC_COUNTER(VCPU, pf_taken),
262         STATS_DESC_COUNTER(VCPU, pf_fixed),
263         STATS_DESC_COUNTER(VCPU, pf_emulate),
264         STATS_DESC_COUNTER(VCPU, pf_spurious),
265         STATS_DESC_COUNTER(VCPU, pf_fast),
266         STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created),
267         STATS_DESC_COUNTER(VCPU, pf_guest),
268         STATS_DESC_COUNTER(VCPU, tlb_flush),
269         STATS_DESC_COUNTER(VCPU, invlpg),
270         STATS_DESC_COUNTER(VCPU, exits),
271         STATS_DESC_COUNTER(VCPU, io_exits),
272         STATS_DESC_COUNTER(VCPU, mmio_exits),
273         STATS_DESC_COUNTER(VCPU, signal_exits),
274         STATS_DESC_COUNTER(VCPU, irq_window_exits),
275         STATS_DESC_COUNTER(VCPU, nmi_window_exits),
276         STATS_DESC_COUNTER(VCPU, l1d_flush),
277         STATS_DESC_COUNTER(VCPU, halt_exits),
278         STATS_DESC_COUNTER(VCPU, request_irq_exits),
279         STATS_DESC_COUNTER(VCPU, irq_exits),
280         STATS_DESC_COUNTER(VCPU, host_state_reload),
281         STATS_DESC_COUNTER(VCPU, fpu_reload),
282         STATS_DESC_COUNTER(VCPU, insn_emulation),
283         STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
284         STATS_DESC_COUNTER(VCPU, hypercalls),
285         STATS_DESC_COUNTER(VCPU, irq_injections),
286         STATS_DESC_COUNTER(VCPU, nmi_injections),
287         STATS_DESC_COUNTER(VCPU, req_event),
288         STATS_DESC_COUNTER(VCPU, nested_run),
289         STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
290         STATS_DESC_COUNTER(VCPU, directed_yield_successful),
291         STATS_DESC_COUNTER(VCPU, preemption_reported),
292         STATS_DESC_COUNTER(VCPU, preemption_other),
293         STATS_DESC_IBOOLEAN(VCPU, guest_mode),
294         STATS_DESC_COUNTER(VCPU, notify_window_exits),
295 };
296
297 const struct kvm_stats_header kvm_vcpu_stats_header = {
298         .name_size = KVM_STATS_NAME_SIZE,
299         .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
300         .id_offset = sizeof(struct kvm_stats_header),
301         .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
302         .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
303                        sizeof(kvm_vcpu_stats_desc),
304 };
305
306 u64 __read_mostly host_xcr0;
307
308 static struct kmem_cache *x86_emulator_cache;
309
310 /*
311  * When called, it means the previous get/set msr reached an invalid msr.
312  * Return true if we want to ignore/silent this failed msr access.
313  */
314 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
315 {
316         const char *op = write ? "wrmsr" : "rdmsr";
317
318         if (ignore_msrs) {
319                 if (report_ignored_msrs)
320                         kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
321                                       op, msr, data);
322                 /* Mask the error */
323                 return true;
324         } else {
325                 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
326                                       op, msr, data);
327                 return false;
328         }
329 }
330
331 static struct kmem_cache *kvm_alloc_emulator_cache(void)
332 {
333         unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
334         unsigned int size = sizeof(struct x86_emulate_ctxt);
335
336         return kmem_cache_create_usercopy("x86_emulator", size,
337                                           __alignof__(struct x86_emulate_ctxt),
338                                           SLAB_ACCOUNT, useroffset,
339                                           size - useroffset, NULL);
340 }
341
342 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
343
344 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
345 {
346         int i;
347         for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
348                 vcpu->arch.apf.gfns[i] = ~0;
349 }
350
351 static void kvm_on_user_return(struct user_return_notifier *urn)
352 {
353         unsigned slot;
354         struct kvm_user_return_msrs *msrs
355                 = container_of(urn, struct kvm_user_return_msrs, urn);
356         struct kvm_user_return_msr_values *values;
357         unsigned long flags;
358
359         /*
360          * Disabling irqs at this point since the following code could be
361          * interrupted and executed through kvm_arch_hardware_disable()
362          */
363         local_irq_save(flags);
364         if (msrs->registered) {
365                 msrs->registered = false;
366                 user_return_notifier_unregister(urn);
367         }
368         local_irq_restore(flags);
369         for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
370                 values = &msrs->values[slot];
371                 if (values->host != values->curr) {
372                         wrmsrl(kvm_uret_msrs_list[slot], values->host);
373                         values->curr = values->host;
374                 }
375         }
376 }
377
378 static int kvm_probe_user_return_msr(u32 msr)
379 {
380         u64 val;
381         int ret;
382
383         preempt_disable();
384         ret = rdmsrl_safe(msr, &val);
385         if (ret)
386                 goto out;
387         ret = wrmsrl_safe(msr, val);
388 out:
389         preempt_enable();
390         return ret;
391 }
392
393 int kvm_add_user_return_msr(u32 msr)
394 {
395         BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
396
397         if (kvm_probe_user_return_msr(msr))
398                 return -1;
399
400         kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
401         return kvm_nr_uret_msrs++;
402 }
403 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
404
405 int kvm_find_user_return_msr(u32 msr)
406 {
407         int i;
408
409         for (i = 0; i < kvm_nr_uret_msrs; ++i) {
410                 if (kvm_uret_msrs_list[i] == msr)
411                         return i;
412         }
413         return -1;
414 }
415 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
416
417 static void kvm_user_return_msr_cpu_online(void)
418 {
419         unsigned int cpu = smp_processor_id();
420         struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
421         u64 value;
422         int i;
423
424         for (i = 0; i < kvm_nr_uret_msrs; ++i) {
425                 rdmsrl_safe(kvm_uret_msrs_list[i], &value);
426                 msrs->values[i].host = value;
427                 msrs->values[i].curr = value;
428         }
429 }
430
431 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
432 {
433         unsigned int cpu = smp_processor_id();
434         struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
435         int err;
436
437         value = (value & mask) | (msrs->values[slot].host & ~mask);
438         if (value == msrs->values[slot].curr)
439                 return 0;
440         err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
441         if (err)
442                 return 1;
443
444         msrs->values[slot].curr = value;
445         if (!msrs->registered) {
446                 msrs->urn.on_user_return = kvm_on_user_return;
447                 user_return_notifier_register(&msrs->urn);
448                 msrs->registered = true;
449         }
450         return 0;
451 }
452 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr);
453
454 static void drop_user_return_notifiers(void)
455 {
456         unsigned int cpu = smp_processor_id();
457         struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
458
459         if (msrs->registered)
460                 kvm_on_user_return(&msrs->urn);
461 }
462
463 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
464 {
465         return vcpu->arch.apic_base;
466 }
467 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
468
469 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
470 {
471         return kvm_apic_mode(kvm_get_apic_base(vcpu));
472 }
473 EXPORT_SYMBOL_GPL(kvm_get_apic_mode);
474
475 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
476 {
477         enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
478         enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
479         u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff |
480                 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
481
482         if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
483                 return 1;
484         if (!msr_info->host_initiated) {
485                 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
486                         return 1;
487                 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
488                         return 1;
489         }
490
491         kvm_lapic_set_base(vcpu, msr_info->data);
492         kvm_recalculate_apic_map(vcpu->kvm);
493         return 0;
494 }
495 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
496
497 /*
498  * Handle a fault on a hardware virtualization (VMX or SVM) instruction.
499  *
500  * Hardware virtualization extension instructions may fault if a reboot turns
501  * off virtualization while processes are running.  Usually after catching the
502  * fault we just panic; during reboot instead the instruction is ignored.
503  */
504 noinstr void kvm_spurious_fault(void)
505 {
506         /* Fault while not rebooting.  We want the trace. */
507         BUG_ON(!kvm_rebooting);
508 }
509 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
510
511 #define EXCPT_BENIGN            0
512 #define EXCPT_CONTRIBUTORY      1
513 #define EXCPT_PF                2
514
515 static int exception_class(int vector)
516 {
517         switch (vector) {
518         case PF_VECTOR:
519                 return EXCPT_PF;
520         case DE_VECTOR:
521         case TS_VECTOR:
522         case NP_VECTOR:
523         case SS_VECTOR:
524         case GP_VECTOR:
525                 return EXCPT_CONTRIBUTORY;
526         default:
527                 break;
528         }
529         return EXCPT_BENIGN;
530 }
531
532 #define EXCPT_FAULT             0
533 #define EXCPT_TRAP              1
534 #define EXCPT_ABORT             2
535 #define EXCPT_INTERRUPT         3
536 #define EXCPT_DB                4
537
538 static int exception_type(int vector)
539 {
540         unsigned int mask;
541
542         if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
543                 return EXCPT_INTERRUPT;
544
545         mask = 1 << vector;
546
547         /*
548          * #DBs can be trap-like or fault-like, the caller must check other CPU
549          * state, e.g. DR6, to determine whether a #DB is a trap or fault.
550          */
551         if (mask & (1 << DB_VECTOR))
552                 return EXCPT_DB;
553
554         if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR)))
555                 return EXCPT_TRAP;
556
557         if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
558                 return EXCPT_ABORT;
559
560         /* Reserved exceptions will result in fault */
561         return EXCPT_FAULT;
562 }
563
564 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
565                                    struct kvm_queued_exception *ex)
566 {
567         if (!ex->has_payload)
568                 return;
569
570         switch (ex->vector) {
571         case DB_VECTOR:
572                 /*
573                  * "Certain debug exceptions may clear bit 0-3.  The
574                  * remaining contents of the DR6 register are never
575                  * cleared by the processor".
576                  */
577                 vcpu->arch.dr6 &= ~DR_TRAP_BITS;
578                 /*
579                  * In order to reflect the #DB exception payload in guest
580                  * dr6, three components need to be considered: active low
581                  * bit, FIXED_1 bits and active high bits (e.g. DR6_BD,
582                  * DR6_BS and DR6_BT)
583                  * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits.
584                  * In the target guest dr6:
585                  * FIXED_1 bits should always be set.
586                  * Active low bits should be cleared if 1-setting in payload.
587                  * Active high bits should be set if 1-setting in payload.
588                  *
589                  * Note, the payload is compatible with the pending debug
590                  * exceptions/exit qualification under VMX, that active_low bits
591                  * are active high in payload.
592                  * So they need to be flipped for DR6.
593                  */
594                 vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
595                 vcpu->arch.dr6 |= ex->payload;
596                 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW;
597
598                 /*
599                  * The #DB payload is defined as compatible with the 'pending
600                  * debug exceptions' field under VMX, not DR6. While bit 12 is
601                  * defined in the 'pending debug exceptions' field (enabled
602                  * breakpoint), it is reserved and must be zero in DR6.
603                  */
604                 vcpu->arch.dr6 &= ~BIT(12);
605                 break;
606         case PF_VECTOR:
607                 vcpu->arch.cr2 = ex->payload;
608                 break;
609         }
610
611         ex->has_payload = false;
612         ex->payload = 0;
613 }
614 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
615
616 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector,
617                                        bool has_error_code, u32 error_code,
618                                        bool has_payload, unsigned long payload)
619 {
620         struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
621
622         ex->vector = vector;
623         ex->injected = false;
624         ex->pending = true;
625         ex->has_error_code = has_error_code;
626         ex->error_code = error_code;
627         ex->has_payload = has_payload;
628         ex->payload = payload;
629 }
630
631 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
632                 unsigned nr, bool has_error, u32 error_code,
633                 bool has_payload, unsigned long payload, bool reinject)
634 {
635         u32 prev_nr;
636         int class1, class2;
637
638         kvm_make_request(KVM_REQ_EVENT, vcpu);
639
640         /*
641          * If the exception is destined for L2 and isn't being reinjected,
642          * morph it to a VM-Exit if L1 wants to intercept the exception.  A
643          * previously injected exception is not checked because it was checked
644          * when it was original queued, and re-checking is incorrect if _L1_
645          * injected the exception, in which case it's exempt from interception.
646          */
647         if (!reinject && is_guest_mode(vcpu) &&
648             kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) {
649                 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code,
650                                            has_payload, payload);
651                 return;
652         }
653
654         if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
655         queue:
656                 if (reinject) {
657                         /*
658                          * On VM-Entry, an exception can be pending if and only
659                          * if event injection was blocked by nested_run_pending.
660                          * In that case, however, vcpu_enter_guest() requests an
661                          * immediate exit, and the guest shouldn't proceed far
662                          * enough to need reinjection.
663                          */
664                         WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
665                         vcpu->arch.exception.injected = true;
666                         if (WARN_ON_ONCE(has_payload)) {
667                                 /*
668                                  * A reinjected event has already
669                                  * delivered its payload.
670                                  */
671                                 has_payload = false;
672                                 payload = 0;
673                         }
674                 } else {
675                         vcpu->arch.exception.pending = true;
676                         vcpu->arch.exception.injected = false;
677                 }
678                 vcpu->arch.exception.has_error_code = has_error;
679                 vcpu->arch.exception.vector = nr;
680                 vcpu->arch.exception.error_code = error_code;
681                 vcpu->arch.exception.has_payload = has_payload;
682                 vcpu->arch.exception.payload = payload;
683                 if (!is_guest_mode(vcpu))
684                         kvm_deliver_exception_payload(vcpu,
685                                                       &vcpu->arch.exception);
686                 return;
687         }
688
689         /* to check exception */
690         prev_nr = vcpu->arch.exception.vector;
691         if (prev_nr == DF_VECTOR) {
692                 /* triple fault -> shutdown */
693                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
694                 return;
695         }
696         class1 = exception_class(prev_nr);
697         class2 = exception_class(nr);
698         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) ||
699             (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
700                 /*
701                  * Synthesize #DF.  Clear the previously injected or pending
702                  * exception so as not to incorrectly trigger shutdown.
703                  */
704                 vcpu->arch.exception.injected = false;
705                 vcpu->arch.exception.pending = false;
706
707                 kvm_queue_exception_e(vcpu, DF_VECTOR, 0);
708         } else {
709                 /* replace previous exception with a new one in a hope
710                    that instruction re-execution will regenerate lost
711                    exception */
712                 goto queue;
713         }
714 }
715
716 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
717 {
718         kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
719 }
720 EXPORT_SYMBOL_GPL(kvm_queue_exception);
721
722 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
723 {
724         kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
725 }
726 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
727
728 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
729                            unsigned long payload)
730 {
731         kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
732 }
733 EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
734
735 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
736                                     u32 error_code, unsigned long payload)
737 {
738         kvm_multiple_exception(vcpu, nr, true, error_code,
739                                true, payload, false);
740 }
741
742 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
743 {
744         if (err)
745                 kvm_inject_gp(vcpu, 0);
746         else
747                 return kvm_skip_emulated_instruction(vcpu);
748
749         return 1;
750 }
751 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
752
753 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
754 {
755         if (err) {
756                 kvm_inject_gp(vcpu, 0);
757                 return 1;
758         }
759
760         return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
761                                        EMULTYPE_COMPLETE_USER_EXIT);
762 }
763
764 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
765 {
766         ++vcpu->stat.pf_guest;
767
768         /*
769          * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of
770          * whether or not L1 wants to intercept "regular" #PF.
771          */
772         if (is_guest_mode(vcpu) && fault->async_page_fault)
773                 kvm_queue_exception_vmexit(vcpu, PF_VECTOR,
774                                            true, fault->error_code,
775                                            true, fault->address);
776         else
777                 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
778                                         fault->address);
779 }
780 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
781
782 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
783                                     struct x86_exception *fault)
784 {
785         struct kvm_mmu *fault_mmu;
786         WARN_ON_ONCE(fault->vector != PF_VECTOR);
787
788         fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
789                                                vcpu->arch.walk_mmu;
790
791         /*
792          * Invalidate the TLB entry for the faulting address, if it exists,
793          * else the access will fault indefinitely (and to emulate hardware).
794          */
795         if ((fault->error_code & PFERR_PRESENT_MASK) &&
796             !(fault->error_code & PFERR_RSVD_MASK))
797                 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
798                                        fault_mmu->root.hpa);
799
800         fault_mmu->inject_page_fault(vcpu, fault);
801 }
802 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);
803
804 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
805 {
806         atomic_inc(&vcpu->arch.nmi_queued);
807         kvm_make_request(KVM_REQ_NMI, vcpu);
808 }
809 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
810
811 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
812 {
813         kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
814 }
815 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
816
817 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
818 {
819         kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
820 }
821 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
822
823 /*
824  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
825  * a #GP and return false.
826  */
827 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
828 {
829         if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl)
830                 return true;
831         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
832         return false;
833 }
834 EXPORT_SYMBOL_GPL(kvm_require_cpl);
835
836 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
837 {
838         if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE))
839                 return true;
840
841         kvm_queue_exception(vcpu, UD_VECTOR);
842         return false;
843 }
844 EXPORT_SYMBOL_GPL(kvm_require_dr);
845
846 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
847 {
848         return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
849 }
850
851 /*
852  * Load the pae pdptrs.  Return 1 if they are all valid, 0 otherwise.
853  */
854 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
855 {
856         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
857         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
858         gpa_t real_gpa;
859         int i;
860         int ret;
861         u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
862
863         /*
864          * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated
865          * to an L1 GPA.
866          */
867         real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn),
868                                      PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
869         if (real_gpa == INVALID_GPA)
870                 return 0;
871
872         /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */
873         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte,
874                                        cr3 & GENMASK(11, 5), sizeof(pdpte));
875         if (ret < 0)
876                 return 0;
877
878         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
879                 if ((pdpte[i] & PT_PRESENT_MASK) &&
880                     (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
881                         return 0;
882                 }
883         }
884
885         /*
886          * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled.
887          * Shadow page roots need to be reconstructed instead.
888          */
889         if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)))
890                 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT);
891
892         memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
893         kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
894         kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
895         vcpu->arch.pdptrs_from_userspace = false;
896
897         return 1;
898 }
899 EXPORT_SYMBOL_GPL(load_pdptrs);
900
901 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
902 {
903         if ((cr0 ^ old_cr0) & X86_CR0_PG) {
904                 kvm_clear_async_pf_completion_queue(vcpu);
905                 kvm_async_pf_hash_reset(vcpu);
906
907                 /*
908                  * Clearing CR0.PG is defined to flush the TLB from the guest's
909                  * perspective.
910                  */
911                 if (!(cr0 & X86_CR0_PG))
912                         kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
913         }
914
915         if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
916                 kvm_mmu_reset_context(vcpu);
917
918         if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
919             kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
920             !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
921                 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
922 }
923 EXPORT_SYMBOL_GPL(kvm_post_set_cr0);
924
925 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
926 {
927         unsigned long old_cr0 = kvm_read_cr0(vcpu);
928
929         cr0 |= X86_CR0_ET;
930
931 #ifdef CONFIG_X86_64
932         if (cr0 & 0xffffffff00000000UL)
933                 return 1;
934 #endif
935
936         cr0 &= ~CR0_RESERVED_BITS;
937
938         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
939                 return 1;
940
941         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
942                 return 1;
943
944 #ifdef CONFIG_X86_64
945         if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
946             (cr0 & X86_CR0_PG)) {
947                 int cs_db, cs_l;
948
949                 if (!is_pae(vcpu))
950                         return 1;
951                 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
952                 if (cs_l)
953                         return 1;
954         }
955 #endif
956         if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
957             is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) &&
958             !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
959                 return 1;
960
961         if (!(cr0 & X86_CR0_PG) &&
962             (is_64_bit_mode(vcpu) || kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)))
963                 return 1;
964
965         static_call(kvm_x86_set_cr0)(vcpu, cr0);
966
967         kvm_post_set_cr0(vcpu, old_cr0, cr0);
968
969         return 0;
970 }
971 EXPORT_SYMBOL_GPL(kvm_set_cr0);
972
973 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
974 {
975         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
976 }
977 EXPORT_SYMBOL_GPL(kvm_lmsw);
978
979 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
980 {
981         if (vcpu->arch.guest_state_protected)
982                 return;
983
984         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
985
986                 if (vcpu->arch.xcr0 != host_xcr0)
987                         xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
988
989                 if (vcpu->arch.xsaves_enabled &&
990                     vcpu->arch.ia32_xss != host_xss)
991                         wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
992         }
993
994 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
995         if (static_cpu_has(X86_FEATURE_PKU) &&
996             vcpu->arch.pkru != vcpu->arch.host_pkru &&
997             ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
998              kvm_read_cr4_bits(vcpu, X86_CR4_PKE)))
999                 write_pkru(vcpu->arch.pkru);
1000 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
1001 }
1002 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
1003
1004 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
1005 {
1006         if (vcpu->arch.guest_state_protected)
1007                 return;
1008
1009 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1010         if (static_cpu_has(X86_FEATURE_PKU) &&
1011             ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1012              kvm_read_cr4_bits(vcpu, X86_CR4_PKE))) {
1013                 vcpu->arch.pkru = rdpkru();
1014                 if (vcpu->arch.pkru != vcpu->arch.host_pkru)
1015                         write_pkru(vcpu->arch.host_pkru);
1016         }
1017 #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
1018
1019         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
1020
1021                 if (vcpu->arch.xcr0 != host_xcr0)
1022                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
1023
1024                 if (vcpu->arch.xsaves_enabled &&
1025                     vcpu->arch.ia32_xss != host_xss)
1026                         wrmsrl(MSR_IA32_XSS, host_xss);
1027         }
1028
1029 }
1030 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
1031
1032 #ifdef CONFIG_X86_64
1033 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
1034 {
1035         return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
1036 }
1037 #endif
1038
1039 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1040 {
1041         u64 xcr0 = xcr;
1042         u64 old_xcr0 = vcpu->arch.xcr0;
1043         u64 valid_bits;
1044
1045         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
1046         if (index != XCR_XFEATURE_ENABLED_MASK)
1047                 return 1;
1048         if (!(xcr0 & XFEATURE_MASK_FP))
1049                 return 1;
1050         if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
1051                 return 1;
1052
1053         /*
1054          * Do not allow the guest to set bits that we do not support
1055          * saving.  However, xcr0 bit 0 is always set, even if the
1056          * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
1057          */
1058         valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
1059         if (xcr0 & ~valid_bits)
1060                 return 1;
1061
1062         if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
1063             (!(xcr0 & XFEATURE_MASK_BNDCSR)))
1064                 return 1;
1065
1066         if (xcr0 & XFEATURE_MASK_AVX512) {
1067                 if (!(xcr0 & XFEATURE_MASK_YMM))
1068                         return 1;
1069                 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
1070                         return 1;
1071         }
1072
1073         if ((xcr0 & XFEATURE_MASK_XTILE) &&
1074             ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE))
1075                 return 1;
1076
1077         vcpu->arch.xcr0 = xcr0;
1078
1079         if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
1080                 kvm_update_cpuid_runtime(vcpu);
1081         return 0;
1082 }
1083
1084 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
1085 {
1086         /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
1087         if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
1088             __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
1089                 kvm_inject_gp(vcpu, 0);
1090                 return 1;
1091         }
1092
1093         return kvm_skip_emulated_instruction(vcpu);
1094 }
1095 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
1096
1097 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1098 {
1099         if (cr4 & cr4_reserved_bits)
1100                 return false;
1101
1102         if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
1103                 return false;
1104
1105         return true;
1106 }
1107 EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4);
1108
1109 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1110 {
1111         return __kvm_is_valid_cr4(vcpu, cr4) &&
1112                static_call(kvm_x86_is_valid_cr4)(vcpu, cr4);
1113 }
1114
1115 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
1116 {
1117         if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS)
1118                 kvm_mmu_reset_context(vcpu);
1119
1120         /*
1121          * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB
1122          * according to the SDM; however, stale prev_roots could be reused
1123          * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we
1124          * free them all.  This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST
1125          * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed,
1126          * so fall through.
1127          */
1128         if (!tdp_enabled &&
1129             (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE))
1130                 kvm_mmu_unload(vcpu);
1131
1132         /*
1133          * The TLB has to be flushed for all PCIDs if any of the following
1134          * (architecturally required) changes happen:
1135          * - CR4.PCIDE is changed from 1 to 0
1136          * - CR4.PGE is toggled
1137          *
1138          * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT.
1139          */
1140         if (((cr4 ^ old_cr4) & X86_CR4_PGE) ||
1141             (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
1142                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1143
1144         /*
1145          * The TLB has to be flushed for the current PCID if any of the
1146          * following (architecturally required) changes happen:
1147          * - CR4.SMEP is changed from 0 to 1
1148          * - CR4.PAE is toggled
1149          */
1150         else if (((cr4 ^ old_cr4) & X86_CR4_PAE) ||
1151                  ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP)))
1152                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1153
1154 }
1155 EXPORT_SYMBOL_GPL(kvm_post_set_cr4);
1156
1157 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1158 {
1159         unsigned long old_cr4 = kvm_read_cr4(vcpu);
1160
1161         if (!kvm_is_valid_cr4(vcpu, cr4))
1162                 return 1;
1163
1164         if (is_long_mode(vcpu)) {
1165                 if (!(cr4 & X86_CR4_PAE))
1166                         return 1;
1167                 if ((cr4 ^ old_cr4) & X86_CR4_LA57)
1168                         return 1;
1169         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1170                    && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS)
1171                    && !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
1172                 return 1;
1173
1174         if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
1175                 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID))
1176                         return 1;
1177
1178                 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
1179                 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
1180                         return 1;
1181         }
1182
1183         static_call(kvm_x86_set_cr4)(vcpu, cr4);
1184
1185         kvm_post_set_cr4(vcpu, old_cr4, cr4);
1186
1187         return 0;
1188 }
1189 EXPORT_SYMBOL_GPL(kvm_set_cr4);
1190
1191 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
1192 {
1193         struct kvm_mmu *mmu = vcpu->arch.mmu;
1194         unsigned long roots_to_free = 0;
1195         int i;
1196
1197         /*
1198          * MOV CR3 and INVPCID are usually not intercepted when using TDP, but
1199          * this is reachable when running EPT=1 and unrestricted_guest=0,  and
1200          * also via the emulator.  KVM's TDP page tables are not in the scope of
1201          * the invalidation, but the guest's TLB entries need to be flushed as
1202          * the CPU may have cached entries in its TLB for the target PCID.
1203          */
1204         if (unlikely(tdp_enabled)) {
1205                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1206                 return;
1207         }
1208
1209         /*
1210          * If neither the current CR3 nor any of the prev_roots use the given
1211          * PCID, then nothing needs to be done here because a resync will
1212          * happen anyway before switching to any other CR3.
1213          */
1214         if (kvm_get_active_pcid(vcpu) == pcid) {
1215                 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1216                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1217         }
1218
1219         /*
1220          * If PCID is disabled, there is no need to free prev_roots even if the
1221          * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
1222          * with PCIDE=0.
1223          */
1224         if (!kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
1225                 return;
1226
1227         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
1228                 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid)
1229                         roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
1230
1231         kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
1232 }
1233
1234 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1235 {
1236         bool skip_tlb_flush = false;
1237         unsigned long pcid = 0;
1238 #ifdef CONFIG_X86_64
1239         bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
1240
1241         if (pcid_enabled) {
1242                 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
1243                 cr3 &= ~X86_CR3_PCID_NOFLUSH;
1244                 pcid = cr3 & X86_CR3_PCID_MASK;
1245         }
1246 #endif
1247
1248         /* PDPTRs are always reloaded for PAE paging. */
1249         if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
1250                 goto handle_tlb_flush;
1251
1252         /*
1253          * Do not condition the GPA check on long mode, this helper is used to
1254          * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
1255          * the current vCPU mode is accurate.
1256          */
1257         if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
1258                 return 1;
1259
1260         if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
1261                 return 1;
1262
1263         if (cr3 != kvm_read_cr3(vcpu))
1264                 kvm_mmu_new_pgd(vcpu, cr3);
1265
1266         vcpu->arch.cr3 = cr3;
1267         kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
1268         /* Do not call post_set_cr3, we do not get here for confidential guests.  */
1269
1270 handle_tlb_flush:
1271         /*
1272          * A load of CR3 that flushes the TLB flushes only the current PCID,
1273          * even if PCID is disabled, in which case PCID=0 is flushed.  It's a
1274          * moot point in the end because _disabling_ PCID will flush all PCIDs,
1275          * and it's impossible to use a non-zero PCID when PCID is disabled,
1276          * i.e. only PCID=0 can be relevant.
1277          */
1278         if (!skip_tlb_flush)
1279                 kvm_invalidate_pcid(vcpu, pcid);
1280
1281         return 0;
1282 }
1283 EXPORT_SYMBOL_GPL(kvm_set_cr3);
1284
1285 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1286 {
1287         if (cr8 & CR8_RESERVED_BITS)
1288                 return 1;
1289         if (lapic_in_kernel(vcpu))
1290                 kvm_lapic_set_tpr(vcpu, cr8);
1291         else
1292                 vcpu->arch.cr8 = cr8;
1293         return 0;
1294 }
1295 EXPORT_SYMBOL_GPL(kvm_set_cr8);
1296
1297 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
1298 {
1299         if (lapic_in_kernel(vcpu))
1300                 return kvm_lapic_get_cr8(vcpu);
1301         else
1302                 return vcpu->arch.cr8;
1303 }
1304 EXPORT_SYMBOL_GPL(kvm_get_cr8);
1305
1306 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
1307 {
1308         int i;
1309
1310         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1311                 for (i = 0; i < KVM_NR_DB_REGS; i++)
1312                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
1313         }
1314 }
1315
1316 void kvm_update_dr7(struct kvm_vcpu *vcpu)
1317 {
1318         unsigned long dr7;
1319
1320         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1321                 dr7 = vcpu->arch.guest_debug_dr7;
1322         else
1323                 dr7 = vcpu->arch.dr7;
1324         static_call(kvm_x86_set_dr7)(vcpu, dr7);
1325         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
1326         if (dr7 & DR7_BP_EN_MASK)
1327                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
1328 }
1329 EXPORT_SYMBOL_GPL(kvm_update_dr7);
1330
1331 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
1332 {
1333         u64 fixed = DR6_FIXED_1;
1334
1335         if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
1336                 fixed |= DR6_RTM;
1337
1338         if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
1339                 fixed |= DR6_BUS_LOCK;
1340         return fixed;
1341 }
1342
1343 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1344 {
1345         size_t size = ARRAY_SIZE(vcpu->arch.db);
1346
1347         switch (dr) {
1348         case 0 ... 3:
1349                 vcpu->arch.db[array_index_nospec(dr, size)] = val;
1350                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1351                         vcpu->arch.eff_db[dr] = val;
1352                 break;
1353         case 4:
1354         case 6:
1355                 if (!kvm_dr6_valid(val))
1356                         return 1; /* #GP */
1357                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
1358                 break;
1359         case 5:
1360         default: /* 7 */
1361                 if (!kvm_dr7_valid(val))
1362                         return 1; /* #GP */
1363                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
1364                 kvm_update_dr7(vcpu);
1365                 break;
1366         }
1367
1368         return 0;
1369 }
1370 EXPORT_SYMBOL_GPL(kvm_set_dr);
1371
1372 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
1373 {
1374         size_t size = ARRAY_SIZE(vcpu->arch.db);
1375
1376         switch (dr) {
1377         case 0 ... 3:
1378                 *val = vcpu->arch.db[array_index_nospec(dr, size)];
1379                 break;
1380         case 4:
1381         case 6:
1382                 *val = vcpu->arch.dr6;
1383                 break;
1384         case 5:
1385         default: /* 7 */
1386                 *val = vcpu->arch.dr7;
1387                 break;
1388         }
1389 }
1390 EXPORT_SYMBOL_GPL(kvm_get_dr);
1391
1392 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
1393 {
1394         u32 ecx = kvm_rcx_read(vcpu);
1395         u64 data;
1396
1397         if (kvm_pmu_rdpmc(vcpu, ecx, &data)) {
1398                 kvm_inject_gp(vcpu, 0);
1399                 return 1;
1400         }
1401
1402         kvm_rax_write(vcpu, (u32)data);
1403         kvm_rdx_write(vcpu, data >> 32);
1404         return kvm_skip_emulated_instruction(vcpu);
1405 }
1406 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
1407
1408 /*
1409  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
1410  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
1411  *
1412  * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
1413  * extract the supported MSRs from the related const lists.
1414  * msrs_to_save is selected from the msrs_to_save_all to reflect the
1415  * capabilities of the host cpu. This capabilities test skips MSRs that are
1416  * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
1417  * may depend on host virtualization features rather than host cpu features.
1418  */
1419
1420 static const u32 msrs_to_save_all[] = {
1421         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1422         MSR_STAR,
1423 #ifdef CONFIG_X86_64
1424         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1425 #endif
1426         MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1427         MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1428         MSR_IA32_SPEC_CTRL,
1429         MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
1430         MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
1431         MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
1432         MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
1433         MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
1434         MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
1435         MSR_IA32_UMWAIT_CONTROL,
1436
1437         MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
1438         MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
1439         MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
1440         MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1441         MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
1442         MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
1443         MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
1444         MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
1445         MSR_ARCH_PERFMON_PERFCTR0 + 8, MSR_ARCH_PERFMON_PERFCTR0 + 9,
1446         MSR_ARCH_PERFMON_PERFCTR0 + 10, MSR_ARCH_PERFMON_PERFCTR0 + 11,
1447         MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
1448         MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
1449         MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
1450         MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
1451         MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
1452         MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
1453         MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
1454         MSR_ARCH_PERFMON_EVENTSEL0 + 8, MSR_ARCH_PERFMON_EVENTSEL0 + 9,
1455         MSR_ARCH_PERFMON_EVENTSEL0 + 10, MSR_ARCH_PERFMON_EVENTSEL0 + 11,
1456         MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
1457         MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
1458         MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
1459         MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
1460
1461         MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
1462         MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
1463         MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
1464         MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
1465         MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
1466         MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
1467         MSR_IA32_XFD, MSR_IA32_XFD_ERR,
1468 };
1469
1470 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
1471 static unsigned num_msrs_to_save;
1472
1473 static const u32 emulated_msrs_all[] = {
1474         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
1475         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
1476         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
1477         HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
1478         HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
1479         HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
1480         HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
1481         HV_X64_MSR_RESET,
1482         HV_X64_MSR_VP_INDEX,
1483         HV_X64_MSR_VP_RUNTIME,
1484         HV_X64_MSR_SCONTROL,
1485         HV_X64_MSR_STIMER0_CONFIG,
1486         HV_X64_MSR_VP_ASSIST_PAGE,
1487         HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
1488         HV_X64_MSR_TSC_EMULATION_STATUS,
1489         HV_X64_MSR_SYNDBG_OPTIONS,
1490         HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
1491         HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
1492         HV_X64_MSR_SYNDBG_PENDING_BUFFER,
1493
1494         MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
1495         MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
1496
1497         MSR_IA32_TSC_ADJUST,
1498         MSR_IA32_TSC_DEADLINE,
1499         MSR_IA32_ARCH_CAPABILITIES,
1500         MSR_IA32_PERF_CAPABILITIES,
1501         MSR_IA32_MISC_ENABLE,
1502         MSR_IA32_MCG_STATUS,
1503         MSR_IA32_MCG_CTL,
1504         MSR_IA32_MCG_EXT_CTL,
1505         MSR_IA32_SMBASE,
1506         MSR_SMI_COUNT,
1507         MSR_PLATFORM_INFO,
1508         MSR_MISC_FEATURES_ENABLES,
1509         MSR_AMD64_VIRT_SPEC_CTRL,
1510         MSR_AMD64_TSC_RATIO,
1511         MSR_IA32_POWER_CTL,
1512         MSR_IA32_UCODE_REV,
1513
1514         /*
1515          * The following list leaves out MSRs whose values are determined
1516          * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
1517          * We always support the "true" VMX control MSRs, even if the host
1518          * processor does not, so I am putting these registers here rather
1519          * than in msrs_to_save_all.
1520          */
1521         MSR_IA32_VMX_BASIC,
1522         MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1523         MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1524         MSR_IA32_VMX_TRUE_EXIT_CTLS,
1525         MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1526         MSR_IA32_VMX_MISC,
1527         MSR_IA32_VMX_CR0_FIXED0,
1528         MSR_IA32_VMX_CR4_FIXED0,
1529         MSR_IA32_VMX_VMCS_ENUM,
1530         MSR_IA32_VMX_PROCBASED_CTLS2,
1531         MSR_IA32_VMX_EPT_VPID_CAP,
1532         MSR_IA32_VMX_VMFUNC,
1533
1534         MSR_K7_HWCR,
1535         MSR_KVM_POLL_CONTROL,
1536 };
1537
1538 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
1539 static unsigned num_emulated_msrs;
1540
1541 /*
1542  * List of msr numbers which are used to expose MSR-based features that
1543  * can be used by a hypervisor to validate requested CPU features.
1544  */
1545 static const u32 msr_based_features_all[] = {
1546         MSR_IA32_VMX_BASIC,
1547         MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1548         MSR_IA32_VMX_PINBASED_CTLS,
1549         MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1550         MSR_IA32_VMX_PROCBASED_CTLS,
1551         MSR_IA32_VMX_TRUE_EXIT_CTLS,
1552         MSR_IA32_VMX_EXIT_CTLS,
1553         MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1554         MSR_IA32_VMX_ENTRY_CTLS,
1555         MSR_IA32_VMX_MISC,
1556         MSR_IA32_VMX_CR0_FIXED0,
1557         MSR_IA32_VMX_CR0_FIXED1,
1558         MSR_IA32_VMX_CR4_FIXED0,
1559         MSR_IA32_VMX_CR4_FIXED1,
1560         MSR_IA32_VMX_VMCS_ENUM,
1561         MSR_IA32_VMX_PROCBASED_CTLS2,
1562         MSR_IA32_VMX_EPT_VPID_CAP,
1563         MSR_IA32_VMX_VMFUNC,
1564
1565         MSR_F10H_DECFG,
1566         MSR_IA32_UCODE_REV,
1567         MSR_IA32_ARCH_CAPABILITIES,
1568         MSR_IA32_PERF_CAPABILITIES,
1569 };
1570
1571 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
1572 static unsigned int num_msr_based_features;
1573
1574 /*
1575  * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
1576  * does not yet virtualize. These include:
1577  *   10 - MISC_PACKAGE_CTRLS
1578  *   11 - ENERGY_FILTERING_CTL
1579  *   12 - DOITM
1580  *   18 - FB_CLEAR_CTRL
1581  *   21 - XAPIC_DISABLE_STATUS
1582  *   23 - OVERCLOCKING_STATUS
1583  */
1584
1585 #define KVM_SUPPORTED_ARCH_CAP \
1586         (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \
1587          ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
1588          ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
1589          ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
1590          ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO)
1591
1592 static u64 kvm_get_arch_capabilities(void)
1593 {
1594         u64 data = 0;
1595
1596         if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
1597                 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
1598                 data &= KVM_SUPPORTED_ARCH_CAP;
1599         }
1600
1601         /*
1602          * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
1603          * the nested hypervisor runs with NX huge pages.  If it is not,
1604          * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other
1605          * L1 guests, so it need not worry about its own (L2) guests.
1606          */
1607         data |= ARCH_CAP_PSCHANGE_MC_NO;
1608
1609         /*
1610          * If we're doing cache flushes (either "always" or "cond")
1611          * we will do one whenever the guest does a vmlaunch/vmresume.
1612          * If an outer hypervisor is doing the cache flush for us
1613          * (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
1614          * capability to the guest too, and if EPT is disabled we're not
1615          * vulnerable.  Overall, only VMENTER_L1D_FLUSH_NEVER will
1616          * require a nested hypervisor to do a flush of its own.
1617          */
1618         if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
1619                 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
1620
1621         if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1622                 data |= ARCH_CAP_RDCL_NO;
1623         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1624                 data |= ARCH_CAP_SSB_NO;
1625         if (!boot_cpu_has_bug(X86_BUG_MDS))
1626                 data |= ARCH_CAP_MDS_NO;
1627
1628         if (!boot_cpu_has(X86_FEATURE_RTM)) {
1629                 /*
1630                  * If RTM=0 because the kernel has disabled TSX, the host might
1631                  * have TAA_NO or TSX_CTRL.  Clear TAA_NO (the guest sees RTM=0
1632                  * and therefore knows that there cannot be TAA) but keep
1633                  * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
1634                  * and we want to allow migrating those guests to tsx=off hosts.
1635                  */
1636                 data &= ~ARCH_CAP_TAA_NO;
1637         } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
1638                 data |= ARCH_CAP_TAA_NO;
1639         } else {
1640                 /*
1641                  * Nothing to do here; we emulate TSX_CTRL if present on the
1642                  * host so the guest can choose between disabling TSX or
1643                  * using VERW to clear CPU buffers.
1644                  */
1645         }
1646
1647         return data;
1648 }
1649
1650 static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
1651 {
1652         switch (msr->index) {
1653         case MSR_IA32_ARCH_CAPABILITIES:
1654                 msr->data = kvm_get_arch_capabilities();
1655                 break;
1656         case MSR_IA32_UCODE_REV:
1657                 rdmsrl_safe(msr->index, &msr->data);
1658                 break;
1659         default:
1660                 return static_call(kvm_x86_get_msr_feature)(msr);
1661         }
1662         return 0;
1663 }
1664
1665 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1666 {
1667         struct kvm_msr_entry msr;
1668         int r;
1669
1670         msr.index = index;
1671         r = kvm_get_msr_feature(&msr);
1672
1673         if (r == KVM_MSR_RET_INVALID) {
1674                 /* Unconditionally clear the output for simplicity */
1675                 *data = 0;
1676                 if (kvm_msr_ignored_check(index, 0, false))
1677                         r = 0;
1678         }
1679
1680         if (r)
1681                 return r;
1682
1683         *data = msr.data;
1684
1685         return 0;
1686 }
1687
1688 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1689 {
1690         if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
1691                 return false;
1692
1693         if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
1694                 return false;
1695
1696         if (efer & (EFER_LME | EFER_LMA) &&
1697             !guest_cpuid_has(vcpu, X86_FEATURE_LM))
1698                 return false;
1699
1700         if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
1701                 return false;
1702
1703         return true;
1704
1705 }
1706 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1707 {
1708         if (efer & efer_reserved_bits)
1709                 return false;
1710
1711         return __kvm_valid_efer(vcpu, efer);
1712 }
1713 EXPORT_SYMBOL_GPL(kvm_valid_efer);
1714
1715 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1716 {
1717         u64 old_efer = vcpu->arch.efer;
1718         u64 efer = msr_info->data;
1719         int r;
1720
1721         if (efer & efer_reserved_bits)
1722                 return 1;
1723
1724         if (!msr_info->host_initiated) {
1725                 if (!__kvm_valid_efer(vcpu, efer))
1726                         return 1;
1727
1728                 if (is_paging(vcpu) &&
1729                     (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1730                         return 1;
1731         }
1732
1733         efer &= ~EFER_LMA;
1734         efer |= vcpu->arch.efer & EFER_LMA;
1735
1736         r = static_call(kvm_x86_set_efer)(vcpu, efer);
1737         if (r) {
1738                 WARN_ON(r > 0);
1739                 return r;
1740         }
1741
1742         if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
1743                 kvm_mmu_reset_context(vcpu);
1744
1745         return 0;
1746 }
1747
1748 void kvm_enable_efer_bits(u64 mask)
1749 {
1750        efer_reserved_bits &= ~mask;
1751 }
1752 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1753
1754 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
1755 {
1756         struct kvm_x86_msr_filter *msr_filter;
1757         struct msr_bitmap_range *ranges;
1758         struct kvm *kvm = vcpu->kvm;
1759         bool allowed;
1760         int idx;
1761         u32 i;
1762
1763         /* x2APIC MSRs do not support filtering. */
1764         if (index >= 0x800 && index <= 0x8ff)
1765                 return true;
1766
1767         idx = srcu_read_lock(&kvm->srcu);
1768
1769         msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
1770         if (!msr_filter) {
1771                 allowed = true;
1772                 goto out;
1773         }
1774
1775         allowed = msr_filter->default_allow;
1776         ranges = msr_filter->ranges;
1777
1778         for (i = 0; i < msr_filter->count; i++) {
1779                 u32 start = ranges[i].base;
1780                 u32 end = start + ranges[i].nmsrs;
1781                 u32 flags = ranges[i].flags;
1782                 unsigned long *bitmap = ranges[i].bitmap;
1783
1784                 if ((index >= start) && (index < end) && (flags & type)) {
1785                         allowed = !!test_bit(index - start, bitmap);
1786                         break;
1787                 }
1788         }
1789
1790 out:
1791         srcu_read_unlock(&kvm->srcu, idx);
1792
1793         return allowed;
1794 }
1795 EXPORT_SYMBOL_GPL(kvm_msr_allowed);
1796
1797 /*
1798  * Write @data into the MSR specified by @index.  Select MSR specific fault
1799  * checks are bypassed if @host_initiated is %true.
1800  * Returns 0 on success, non-0 otherwise.
1801  * Assumes vcpu_load() was already called.
1802  */
1803 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1804                          bool host_initiated)
1805 {
1806         struct msr_data msr;
1807
1808         switch (index) {
1809         case MSR_FS_BASE:
1810         case MSR_GS_BASE:
1811         case MSR_KERNEL_GS_BASE:
1812         case MSR_CSTAR:
1813         case MSR_LSTAR:
1814                 if (is_noncanonical_address(data, vcpu))
1815                         return 1;
1816                 break;
1817         case MSR_IA32_SYSENTER_EIP:
1818         case MSR_IA32_SYSENTER_ESP:
1819                 /*
1820                  * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1821                  * non-canonical address is written on Intel but not on
1822                  * AMD (which ignores the top 32-bits, because it does
1823                  * not implement 64-bit SYSENTER).
1824                  *
1825                  * 64-bit code should hence be able to write a non-canonical
1826                  * value on AMD.  Making the address canonical ensures that
1827                  * vmentry does not fail on Intel after writing a non-canonical
1828                  * value, and that something deterministic happens if the guest
1829                  * invokes 64-bit SYSENTER.
1830                  */
1831                 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
1832                 break;
1833         case MSR_TSC_AUX:
1834                 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1835                         return 1;
1836
1837                 if (!host_initiated &&
1838                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
1839                     !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
1840                         return 1;
1841
1842                 /*
1843                  * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
1844                  * incomplete and conflicting architectural behavior.  Current
1845                  * AMD CPUs completely ignore bits 63:32, i.e. they aren't
1846                  * reserved and always read as zeros.  Enforce Intel's reserved
1847                  * bits check if and only if the guest CPU is Intel, and clear
1848                  * the bits in all other cases.  This ensures cross-vendor
1849                  * migration will provide consistent behavior for the guest.
1850                  */
1851                 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0)
1852                         return 1;
1853
1854                 data = (u32)data;
1855                 break;
1856         }
1857
1858         msr.data = data;
1859         msr.index = index;
1860         msr.host_initiated = host_initiated;
1861
1862         return static_call(kvm_x86_set_msr)(vcpu, &msr);
1863 }
1864
1865 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
1866                                      u32 index, u64 data, bool host_initiated)
1867 {
1868         int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
1869
1870         if (ret == KVM_MSR_RET_INVALID)
1871                 if (kvm_msr_ignored_check(index, data, true))
1872                         ret = 0;
1873
1874         return ret;
1875 }
1876
1877 /*
1878  * Read the MSR specified by @index into @data.  Select MSR specific fault
1879  * checks are bypassed if @host_initiated is %true.
1880  * Returns 0 on success, non-0 otherwise.
1881  * Assumes vcpu_load() was already called.
1882  */
1883 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1884                   bool host_initiated)
1885 {
1886         struct msr_data msr;
1887         int ret;
1888
1889         switch (index) {
1890         case MSR_TSC_AUX:
1891                 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1892                         return 1;
1893
1894                 if (!host_initiated &&
1895                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
1896                     !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
1897                         return 1;
1898                 break;
1899         }
1900
1901         msr.index = index;
1902         msr.host_initiated = host_initiated;
1903
1904         ret = static_call(kvm_x86_get_msr)(vcpu, &msr);
1905         if (!ret)
1906                 *data = msr.data;
1907         return ret;
1908 }
1909
1910 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
1911                                      u32 index, u64 *data, bool host_initiated)
1912 {
1913         int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
1914
1915         if (ret == KVM_MSR_RET_INVALID) {
1916                 /* Unconditionally clear *data for simplicity */
1917                 *data = 0;
1918                 if (kvm_msr_ignored_check(index, 0, false))
1919                         ret = 0;
1920         }
1921
1922         return ret;
1923 }
1924
1925 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1926 {
1927         if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
1928                 return KVM_MSR_RET_FILTERED;
1929         return kvm_get_msr_ignored_check(vcpu, index, data, false);
1930 }
1931
1932 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
1933 {
1934         if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
1935                 return KVM_MSR_RET_FILTERED;
1936         return kvm_set_msr_ignored_check(vcpu, index, data, false);
1937 }
1938
1939 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1940 {
1941         return kvm_get_msr_ignored_check(vcpu, index, data, false);
1942 }
1943 EXPORT_SYMBOL_GPL(kvm_get_msr);
1944
1945 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
1946 {
1947         return kvm_set_msr_ignored_check(vcpu, index, data, false);
1948 }
1949 EXPORT_SYMBOL_GPL(kvm_set_msr);
1950
1951 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
1952 {
1953         if (!vcpu->run->msr.error) {
1954                 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
1955                 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
1956         }
1957 }
1958
1959 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu)
1960 {
1961         return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error);
1962 }
1963
1964 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
1965 {
1966         complete_userspace_rdmsr(vcpu);
1967         return complete_emulated_msr_access(vcpu);
1968 }
1969
1970 static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
1971 {
1972         return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error);
1973 }
1974
1975 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
1976 {
1977         complete_userspace_rdmsr(vcpu);
1978         return complete_fast_msr_access(vcpu);
1979 }
1980
1981 static u64 kvm_msr_reason(int r)
1982 {
1983         switch (r) {
1984         case KVM_MSR_RET_INVALID:
1985                 return KVM_MSR_EXIT_REASON_UNKNOWN;
1986         case KVM_MSR_RET_FILTERED:
1987                 return KVM_MSR_EXIT_REASON_FILTER;
1988         default:
1989                 return KVM_MSR_EXIT_REASON_INVAL;
1990         }
1991 }
1992
1993 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
1994                               u32 exit_reason, u64 data,
1995                               int (*completion)(struct kvm_vcpu *vcpu),
1996                               int r)
1997 {
1998         u64 msr_reason = kvm_msr_reason(r);
1999
2000         /* Check if the user wanted to know about this MSR fault */
2001         if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason))
2002                 return 0;
2003
2004         vcpu->run->exit_reason = exit_reason;
2005         vcpu->run->msr.error = 0;
2006         memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
2007         vcpu->run->msr.reason = msr_reason;
2008         vcpu->run->msr.index = index;
2009         vcpu->run->msr.data = data;
2010         vcpu->arch.complete_userspace_io = completion;
2011
2012         return 1;
2013 }
2014
2015 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
2016 {
2017         u32 ecx = kvm_rcx_read(vcpu);
2018         u64 data;
2019         int r;
2020
2021         r = kvm_get_msr_with_filter(vcpu, ecx, &data);
2022
2023         if (!r) {
2024                 trace_kvm_msr_read(ecx, data);
2025
2026                 kvm_rax_write(vcpu, data & -1u);
2027                 kvm_rdx_write(vcpu, (data >> 32) & -1u);
2028         } else {
2029                 /* MSR read failed? See if we should ask user space */
2030                 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0,
2031                                        complete_fast_rdmsr, r))
2032                         return 0;
2033                 trace_kvm_msr_read_ex(ecx);
2034         }
2035
2036         return static_call(kvm_x86_complete_emulated_msr)(vcpu, r);
2037 }
2038 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr);
2039
2040 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
2041 {
2042         u32 ecx = kvm_rcx_read(vcpu);
2043         u64 data = kvm_read_edx_eax(vcpu);
2044         int r;
2045
2046         r = kvm_set_msr_with_filter(vcpu, ecx, data);
2047
2048         if (!r) {
2049                 trace_kvm_msr_write(ecx, data);
2050         } else {
2051                 /* MSR write failed? See if we should ask user space */
2052                 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data,
2053                                        complete_fast_msr_access, r))
2054                         return 0;
2055                 /* Signal all other negative errors to userspace */
2056                 if (r < 0)
2057                         return r;
2058                 trace_kvm_msr_write_ex(ecx, data);
2059         }
2060
2061         return static_call(kvm_x86_complete_emulated_msr)(vcpu, r);
2062 }
2063 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
2064
2065 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
2066 {
2067         return kvm_skip_emulated_instruction(vcpu);
2068 }
2069 EXPORT_SYMBOL_GPL(kvm_emulate_as_nop);
2070
2071 int kvm_emulate_invd(struct kvm_vcpu *vcpu)
2072 {
2073         /* Treat an INVD instruction as a NOP and just skip it. */
2074         return kvm_emulate_as_nop(vcpu);
2075 }
2076 EXPORT_SYMBOL_GPL(kvm_emulate_invd);
2077
2078 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
2079 {
2080         kvm_queue_exception(vcpu, UD_VECTOR);
2081         return 1;
2082 }
2083 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op);
2084
2085
2086 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
2087 {
2088         if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) &&
2089             !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT))
2090                 return kvm_handle_invalid_op(vcpu);
2091
2092         pr_warn_once("kvm: %s instruction emulated as NOP!\n", insn);
2093         return kvm_emulate_as_nop(vcpu);
2094 }
2095 int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
2096 {
2097         return kvm_emulate_monitor_mwait(vcpu, "MWAIT");
2098 }
2099 EXPORT_SYMBOL_GPL(kvm_emulate_mwait);
2100
2101 int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
2102 {
2103         return kvm_emulate_monitor_mwait(vcpu, "MONITOR");
2104 }
2105 EXPORT_SYMBOL_GPL(kvm_emulate_monitor);
2106
2107 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
2108 {
2109         xfer_to_guest_mode_prepare();
2110         return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
2111                 xfer_to_guest_mode_work_pending();
2112 }
2113
2114 /*
2115  * The fast path for frequent and performance sensitive wrmsr emulation,
2116  * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
2117  * the latency of virtual IPI by avoiding the expensive bits of transitioning
2118  * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
2119  * other cases which must be called after interrupts are enabled on the host.
2120  */
2121 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
2122 {
2123         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
2124                 return 1;
2125
2126         if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
2127             ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
2128             ((data & APIC_MODE_MASK) == APIC_DM_FIXED) &&
2129             ((u32)(data >> 32) != X2APIC_BROADCAST))
2130                 return kvm_x2apic_icr_write(vcpu->arch.apic, data);
2131
2132         return 1;
2133 }
2134
2135 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
2136 {
2137         if (!kvm_can_use_hv_timer(vcpu))
2138                 return 1;
2139
2140         kvm_set_lapic_tscdeadline_msr(vcpu, data);
2141         return 0;
2142 }
2143
2144 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
2145 {
2146         u32 msr = kvm_rcx_read(vcpu);
2147         u64 data;
2148         fastpath_t ret = EXIT_FASTPATH_NONE;
2149
2150         switch (msr) {
2151         case APIC_BASE_MSR + (APIC_ICR >> 4):
2152                 data = kvm_read_edx_eax(vcpu);
2153                 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
2154                         kvm_skip_emulated_instruction(vcpu);
2155                         ret = EXIT_FASTPATH_EXIT_HANDLED;
2156                 }
2157                 break;
2158         case MSR_IA32_TSC_DEADLINE:
2159                 data = kvm_read_edx_eax(vcpu);
2160                 if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
2161                         kvm_skip_emulated_instruction(vcpu);
2162                         ret = EXIT_FASTPATH_REENTER_GUEST;
2163                 }
2164                 break;
2165         default:
2166                 break;
2167         }
2168
2169         if (ret != EXIT_FASTPATH_NONE)
2170                 trace_kvm_msr_write(msr, data);
2171
2172         return ret;
2173 }
2174 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
2175
2176 /*
2177  * Adapt set_msr() to msr_io()'s calling convention
2178  */
2179 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2180 {
2181         return kvm_get_msr_ignored_check(vcpu, index, data, true);
2182 }
2183
2184 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2185 {
2186         return kvm_set_msr_ignored_check(vcpu, index, *data, true);
2187 }
2188
2189 #ifdef CONFIG_X86_64
2190 struct pvclock_clock {
2191         int vclock_mode;
2192         u64 cycle_last;
2193         u64 mask;
2194         u32 mult;
2195         u32 shift;
2196         u64 base_cycles;
2197         u64 offset;
2198 };
2199
2200 struct pvclock_gtod_data {
2201         seqcount_t      seq;
2202
2203         struct pvclock_clock clock; /* extract of a clocksource struct */
2204         struct pvclock_clock raw_clock; /* extract of a clocksource struct */
2205
2206         ktime_t         offs_boot;
2207         u64             wall_time_sec;
2208 };
2209
2210 static struct pvclock_gtod_data pvclock_gtod_data;
2211
2212 static void update_pvclock_gtod(struct timekeeper *tk)
2213 {
2214         struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
2215
2216         write_seqcount_begin(&vdata->seq);
2217
2218         /* copy pvclock gtod data */
2219         vdata->clock.vclock_mode        = tk->tkr_mono.clock->vdso_clock_mode;
2220         vdata->clock.cycle_last         = tk->tkr_mono.cycle_last;
2221         vdata->clock.mask               = tk->tkr_mono.mask;
2222         vdata->clock.mult               = tk->tkr_mono.mult;
2223         vdata->clock.shift              = tk->tkr_mono.shift;
2224         vdata->clock.base_cycles        = tk->tkr_mono.xtime_nsec;
2225         vdata->clock.offset             = tk->tkr_mono.base;
2226
2227         vdata->raw_clock.vclock_mode    = tk->tkr_raw.clock->vdso_clock_mode;
2228         vdata->raw_clock.cycle_last     = tk->tkr_raw.cycle_last;
2229         vdata->raw_clock.mask           = tk->tkr_raw.mask;
2230         vdata->raw_clock.mult           = tk->tkr_raw.mult;
2231         vdata->raw_clock.shift          = tk->tkr_raw.shift;
2232         vdata->raw_clock.base_cycles    = tk->tkr_raw.xtime_nsec;
2233         vdata->raw_clock.offset         = tk->tkr_raw.base;
2234
2235         vdata->wall_time_sec            = tk->xtime_sec;
2236
2237         vdata->offs_boot                = tk->offs_boot;
2238
2239         write_seqcount_end(&vdata->seq);
2240 }
2241
2242 static s64 get_kvmclock_base_ns(void)
2243 {
2244         /* Count up from boot time, but with the frequency of the raw clock.  */
2245         return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot));
2246 }
2247 #else
2248 static s64 get_kvmclock_base_ns(void)
2249 {
2250         /* Master clock not used, so we can just use CLOCK_BOOTTIME.  */
2251         return ktime_get_boottime_ns();
2252 }
2253 #endif
2254
2255 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
2256 {
2257         int version;
2258         int r;
2259         struct pvclock_wall_clock wc;
2260         u32 wc_sec_hi;
2261         u64 wall_nsec;
2262
2263         if (!wall_clock)
2264                 return;
2265
2266         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
2267         if (r)
2268                 return;
2269
2270         if (version & 1)
2271                 ++version;  /* first time write, random junk */
2272
2273         ++version;
2274
2275         if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
2276                 return;
2277
2278         /*
2279          * The guest calculates current wall clock time by adding
2280          * system time (updated by kvm_guest_time_update below) to the
2281          * wall clock specified here.  We do the reverse here.
2282          */
2283         wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
2284
2285         wc.nsec = do_div(wall_nsec, 1000000000);
2286         wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */
2287         wc.version = version;
2288
2289         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
2290
2291         if (sec_hi_ofs) {
2292                 wc_sec_hi = wall_nsec >> 32;
2293                 kvm_write_guest(kvm, wall_clock + sec_hi_ofs,
2294                                 &wc_sec_hi, sizeof(wc_sec_hi));
2295         }
2296
2297         version++;
2298         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
2299 }
2300
2301 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
2302                                   bool old_msr, bool host_initiated)
2303 {
2304         struct kvm_arch *ka = &vcpu->kvm->arch;
2305
2306         if (vcpu->vcpu_id == 0 && !host_initiated) {
2307                 if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
2308                         kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2309
2310                 ka->boot_vcpu_runs_old_kvmclock = old_msr;
2311         }
2312
2313         vcpu->arch.time = system_time;
2314         kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2315
2316         /* we verify if the enable bit is set... */
2317         if (system_time & 1) {
2318                 kvm_gfn_to_pfn_cache_init(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
2319                                           KVM_HOST_USES_PFN, system_time & ~1ULL,
2320                                           sizeof(struct pvclock_vcpu_time_info));
2321         } else {
2322                 kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
2323         }
2324
2325         return;
2326 }
2327
2328 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
2329 {
2330         do_shl32_div32(dividend, divisor);
2331         return dividend;
2332 }
2333
2334 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
2335                                s8 *pshift, u32 *pmultiplier)
2336 {
2337         uint64_t scaled64;
2338         int32_t  shift = 0;
2339         uint64_t tps64;
2340         uint32_t tps32;
2341
2342         tps64 = base_hz;
2343         scaled64 = scaled_hz;
2344         while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
2345                 tps64 >>= 1;
2346                 shift--;
2347         }
2348
2349         tps32 = (uint32_t)tps64;
2350         while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
2351                 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
2352                         scaled64 >>= 1;
2353                 else
2354                         tps32 <<= 1;
2355                 shift++;
2356         }
2357
2358         *pshift = shift;
2359         *pmultiplier = div_frac(scaled64, tps32);
2360 }
2361
2362 #ifdef CONFIG_X86_64
2363 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
2364 #endif
2365
2366 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
2367 static unsigned long max_tsc_khz;
2368
2369 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
2370 {
2371         u64 v = (u64)khz * (1000000 + ppm);
2372         do_div(v, 1000000);
2373         return v;
2374 }
2375
2376 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
2377
2378 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2379 {
2380         u64 ratio;
2381
2382         /* Guest TSC same frequency as host TSC? */
2383         if (!scale) {
2384                 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2385                 return 0;
2386         }
2387
2388         /* TSC scaling supported? */
2389         if (!kvm_caps.has_tsc_control) {
2390                 if (user_tsc_khz > tsc_khz) {
2391                         vcpu->arch.tsc_catchup = 1;
2392                         vcpu->arch.tsc_always_catchup = 1;
2393                         return 0;
2394                 } else {
2395                         pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
2396                         return -1;
2397                 }
2398         }
2399
2400         /* TSC scaling required  - calculate ratio */
2401         ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits,
2402                                 user_tsc_khz, tsc_khz);
2403
2404         if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) {
2405                 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
2406                                     user_tsc_khz);
2407                 return -1;
2408         }
2409
2410         kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
2411         return 0;
2412 }
2413
2414 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
2415 {
2416         u32 thresh_lo, thresh_hi;
2417         int use_scaling = 0;
2418
2419         /* tsc_khz can be zero if TSC calibration fails */
2420         if (user_tsc_khz == 0) {
2421                 /* set tsc_scaling_ratio to a safe value */
2422                 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2423                 return -1;
2424         }
2425
2426         /* Compute a scale to convert nanoseconds in TSC cycles */
2427         kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
2428                            &vcpu->arch.virtual_tsc_shift,
2429                            &vcpu->arch.virtual_tsc_mult);
2430         vcpu->arch.virtual_tsc_khz = user_tsc_khz;
2431
2432         /*
2433          * Compute the variation in TSC rate which is acceptable
2434          * within the range of tolerance and decide if the
2435          * rate being applied is within that bounds of the hardware
2436          * rate.  If so, no scaling or compensation need be done.
2437          */
2438         thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
2439         thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
2440         if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
2441                 pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
2442                 use_scaling = 1;
2443         }
2444         return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
2445 }
2446
2447 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
2448 {
2449         u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
2450                                       vcpu->arch.virtual_tsc_mult,
2451                                       vcpu->arch.virtual_tsc_shift);
2452         tsc += vcpu->arch.this_tsc_write;
2453         return tsc;
2454 }
2455
2456 #ifdef CONFIG_X86_64
2457 static inline int gtod_is_based_on_tsc(int mode)
2458 {
2459         return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
2460 }
2461 #endif
2462
2463 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
2464 {
2465 #ifdef CONFIG_X86_64
2466         bool vcpus_matched;
2467         struct kvm_arch *ka = &vcpu->kvm->arch;
2468         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2469
2470         vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
2471                          atomic_read(&vcpu->kvm->online_vcpus));
2472
2473         /*
2474          * Once the masterclock is enabled, always perform request in
2475          * order to update it.
2476          *
2477          * In order to enable masterclock, the host clocksource must be TSC
2478          * and the vcpus need to have matched TSCs.  When that happens,
2479          * perform request to enable masterclock.
2480          */
2481         if (ka->use_master_clock ||
2482             (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
2483                 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2484
2485         trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
2486                             atomic_read(&vcpu->kvm->online_vcpus),
2487                             ka->use_master_clock, gtod->clock.vclock_mode);
2488 #endif
2489 }
2490
2491 /*
2492  * Multiply tsc by a fixed point number represented by ratio.
2493  *
2494  * The most significant 64-N bits (mult) of ratio represent the
2495  * integral part of the fixed point number; the remaining N bits
2496  * (frac) represent the fractional part, ie. ratio represents a fixed
2497  * point number (mult + frac * 2^(-N)).
2498  *
2499  * N equals to kvm_caps.tsc_scaling_ratio_frac_bits.
2500  */
2501 static inline u64 __scale_tsc(u64 ratio, u64 tsc)
2502 {
2503         return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits);
2504 }
2505
2506 u64 kvm_scale_tsc(u64 tsc, u64 ratio)
2507 {
2508         u64 _tsc = tsc;
2509
2510         if (ratio != kvm_caps.default_tsc_scaling_ratio)
2511                 _tsc = __scale_tsc(ratio, tsc);
2512
2513         return _tsc;
2514 }
2515 EXPORT_SYMBOL_GPL(kvm_scale_tsc);
2516
2517 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2518 {
2519         u64 tsc;
2520
2521         tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
2522
2523         return target_tsc - tsc;
2524 }
2525
2526 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2527 {
2528         return vcpu->arch.l1_tsc_offset +
2529                 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
2530 }
2531 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
2532
2533 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
2534 {
2535         u64 nested_offset;
2536
2537         if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio)
2538                 nested_offset = l1_offset;
2539         else
2540                 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier,
2541                                                 kvm_caps.tsc_scaling_ratio_frac_bits);
2542
2543         nested_offset += l2_offset;
2544         return nested_offset;
2545 }
2546 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset);
2547
2548 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
2549 {
2550         if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio)
2551                 return mul_u64_u64_shr(l1_multiplier, l2_multiplier,
2552                                        kvm_caps.tsc_scaling_ratio_frac_bits);
2553
2554         return l1_multiplier;
2555 }
2556 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
2557
2558 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
2559 {
2560         trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2561                                    vcpu->arch.l1_tsc_offset,
2562                                    l1_offset);
2563
2564         vcpu->arch.l1_tsc_offset = l1_offset;
2565
2566         /*
2567          * If we are here because L1 chose not to trap WRMSR to TSC then
2568          * according to the spec this should set L1's TSC (as opposed to
2569          * setting L1's offset for L2).
2570          */
2571         if (is_guest_mode(vcpu))
2572                 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2573                         l1_offset,
2574                         static_call(kvm_x86_get_l2_tsc_offset)(vcpu),
2575                         static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
2576         else
2577                 vcpu->arch.tsc_offset = l1_offset;
2578
2579         static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
2580 }
2581
2582 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
2583 {
2584         vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
2585
2586         /* Userspace is changing the multiplier while L2 is active */
2587         if (is_guest_mode(vcpu))
2588                 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2589                         l1_multiplier,
2590                         static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
2591         else
2592                 vcpu->arch.tsc_scaling_ratio = l1_multiplier;
2593
2594         if (kvm_caps.has_tsc_control)
2595                 static_call(kvm_x86_write_tsc_multiplier)(
2596                         vcpu, vcpu->arch.tsc_scaling_ratio);
2597 }
2598
2599 static inline bool kvm_check_tsc_unstable(void)
2600 {
2601 #ifdef CONFIG_X86_64
2602         /*
2603          * TSC is marked unstable when we're running on Hyper-V,
2604          * 'TSC page' clocksource is good.
2605          */
2606         if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
2607                 return false;
2608 #endif
2609         return check_tsc_unstable();
2610 }
2611
2612 /*
2613  * Infers attempts to synchronize the guest's tsc from host writes. Sets the
2614  * offset for the vcpu and tracks the TSC matching generation that the vcpu
2615  * participates in.
2616  */
2617 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
2618                                   u64 ns, bool matched)
2619 {
2620         struct kvm *kvm = vcpu->kvm;
2621
2622         lockdep_assert_held(&kvm->arch.tsc_write_lock);
2623
2624         /*
2625          * We also track th most recent recorded KHZ, write and time to
2626          * allow the matching interval to be extended at each write.
2627          */
2628         kvm->arch.last_tsc_nsec = ns;
2629         kvm->arch.last_tsc_write = tsc;
2630         kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
2631         kvm->arch.last_tsc_offset = offset;
2632
2633         vcpu->arch.last_guest_tsc = tsc;
2634
2635         kvm_vcpu_write_tsc_offset(vcpu, offset);
2636
2637         if (!matched) {
2638                 /*
2639                  * We split periods of matched TSC writes into generations.
2640                  * For each generation, we track the original measured
2641                  * nanosecond time, offset, and write, so if TSCs are in
2642                  * sync, we can match exact offset, and if not, we can match
2643                  * exact software computation in compute_guest_tsc()
2644                  *
2645                  * These values are tracked in kvm->arch.cur_xxx variables.
2646                  */
2647                 kvm->arch.cur_tsc_generation++;
2648                 kvm->arch.cur_tsc_nsec = ns;
2649                 kvm->arch.cur_tsc_write = tsc;
2650                 kvm->arch.cur_tsc_offset = offset;
2651                 kvm->arch.nr_vcpus_matched_tsc = 0;
2652         } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
2653                 kvm->arch.nr_vcpus_matched_tsc++;
2654         }
2655
2656         /* Keep track of which generation this VCPU has synchronized to */
2657         vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2658         vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2659         vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2660
2661         kvm_track_tsc_matching(vcpu);
2662 }
2663
2664 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
2665 {
2666         struct kvm *kvm = vcpu->kvm;
2667         u64 offset, ns, elapsed;
2668         unsigned long flags;
2669         bool matched = false;
2670         bool synchronizing = false;
2671
2672         raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
2673         offset = kvm_compute_l1_tsc_offset(vcpu, data);
2674         ns = get_kvmclock_base_ns();
2675         elapsed = ns - kvm->arch.last_tsc_nsec;
2676
2677         if (vcpu->arch.virtual_tsc_khz) {
2678                 if (data == 0) {
2679                         /*
2680                          * detection of vcpu initialization -- need to sync
2681                          * with other vCPUs. This particularly helps to keep
2682                          * kvm_clock stable after CPU hotplug
2683                          */
2684                         synchronizing = true;
2685                 } else {
2686                         u64 tsc_exp = kvm->arch.last_tsc_write +
2687                                                 nsec_to_cycles(vcpu, elapsed);
2688                         u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
2689                         /*
2690                          * Special case: TSC write with a small delta (1 second)
2691                          * of virtual cycle time against real time is
2692                          * interpreted as an attempt to synchronize the CPU.
2693                          */
2694                         synchronizing = data < tsc_exp + tsc_hz &&
2695                                         data + tsc_hz > tsc_exp;
2696                 }
2697         }
2698
2699         /*
2700          * For a reliable TSC, we can match TSC offsets, and for an unstable
2701          * TSC, we add elapsed time in this computation.  We could let the
2702          * compensation code attempt to catch up if we fall behind, but
2703          * it's better to try to match offsets from the beginning.
2704          */
2705         if (synchronizing &&
2706             vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
2707                 if (!kvm_check_tsc_unstable()) {
2708                         offset = kvm->arch.cur_tsc_offset;
2709                 } else {
2710                         u64 delta = nsec_to_cycles(vcpu, elapsed);
2711                         data += delta;
2712                         offset = kvm_compute_l1_tsc_offset(vcpu, data);
2713                 }
2714                 matched = true;
2715         }
2716
2717         __kvm_synchronize_tsc(vcpu, offset, data, ns, matched);
2718         raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
2719 }
2720
2721 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
2722                                            s64 adjustment)
2723 {
2724         u64 tsc_offset = vcpu->arch.l1_tsc_offset;
2725         kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
2726 }
2727
2728 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
2729 {
2730         if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio)
2731                 WARN_ON(adjustment < 0);
2732         adjustment = kvm_scale_tsc((u64) adjustment,
2733                                    vcpu->arch.l1_tsc_scaling_ratio);
2734         adjust_tsc_offset_guest(vcpu, adjustment);
2735 }
2736
2737 #ifdef CONFIG_X86_64
2738
2739 static u64 read_tsc(void)
2740 {
2741         u64 ret = (u64)rdtsc_ordered();
2742         u64 last = pvclock_gtod_data.clock.cycle_last;
2743
2744         if (likely(ret >= last))
2745                 return ret;
2746
2747         /*
2748          * GCC likes to generate cmov here, but this branch is extremely
2749          * predictable (it's just a function of time and the likely is
2750          * very likely) and there's a data dependence, so force GCC
2751          * to generate a branch instead.  I don't barrier() because
2752          * we don't actually need a barrier, and if this function
2753          * ever gets inlined it will generate worse code.
2754          */
2755         asm volatile ("");
2756         return last;
2757 }
2758
2759 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
2760                           int *mode)
2761 {
2762         long v;
2763         u64 tsc_pg_val;
2764
2765         switch (clock->vclock_mode) {
2766         case VDSO_CLOCKMODE_HVCLOCK:
2767                 tsc_pg_val = hv_read_tsc_page_tsc(hv_get_tsc_page(),
2768                                                   tsc_timestamp);
2769                 if (tsc_pg_val != U64_MAX) {
2770                         /* TSC page valid */
2771                         *mode = VDSO_CLOCKMODE_HVCLOCK;
2772                         v = (tsc_pg_val - clock->cycle_last) &
2773                                 clock->mask;
2774                 } else {
2775                         /* TSC page invalid */
2776                         *mode = VDSO_CLOCKMODE_NONE;
2777                 }
2778                 break;
2779         case VDSO_CLOCKMODE_TSC:
2780                 *mode = VDSO_CLOCKMODE_TSC;
2781                 *tsc_timestamp = read_tsc();
2782                 v = (*tsc_timestamp - clock->cycle_last) &
2783                         clock->mask;
2784                 break;
2785         default:
2786                 *mode = VDSO_CLOCKMODE_NONE;
2787         }
2788
2789         if (*mode == VDSO_CLOCKMODE_NONE)
2790                 *tsc_timestamp = v = 0;
2791
2792         return v * clock->mult;
2793 }
2794
2795 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
2796 {
2797         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2798         unsigned long seq;
2799         int mode;
2800         u64 ns;
2801
2802         do {
2803                 seq = read_seqcount_begin(&gtod->seq);
2804                 ns = gtod->raw_clock.base_cycles;
2805                 ns += vgettsc(&gtod->raw_clock, tsc_timestamp, &mode);
2806                 ns >>= gtod->raw_clock.shift;
2807                 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot));
2808         } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2809         *t = ns;
2810
2811         return mode;
2812 }
2813
2814 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
2815 {
2816         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2817         unsigned long seq;
2818         int mode;
2819         u64 ns;
2820
2821         do {
2822                 seq = read_seqcount_begin(&gtod->seq);
2823                 ts->tv_sec = gtod->wall_time_sec;
2824                 ns = gtod->clock.base_cycles;
2825                 ns += vgettsc(&gtod->clock, tsc_timestamp, &mode);
2826                 ns >>= gtod->clock.shift;
2827         } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2828
2829         ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
2830         ts->tv_nsec = ns;
2831
2832         return mode;
2833 }
2834
2835 /* returns true if host is using TSC based clocksource */
2836 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
2837 {
2838         /* checked again under seqlock below */
2839         if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2840                 return false;
2841
2842         return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns,
2843                                                       tsc_timestamp));
2844 }
2845
2846 /* returns true if host is using TSC based clocksource */
2847 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
2848                                            u64 *tsc_timestamp)
2849 {
2850         /* checked again under seqlock below */
2851         if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2852                 return false;
2853
2854         return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
2855 }
2856 #endif
2857
2858 /*
2859  *
2860  * Assuming a stable TSC across physical CPUS, and a stable TSC
2861  * across virtual CPUs, the following condition is possible.
2862  * Each numbered line represents an event visible to both
2863  * CPUs at the next numbered event.
2864  *
2865  * "timespecX" represents host monotonic time. "tscX" represents
2866  * RDTSC value.
2867  *
2868  *              VCPU0 on CPU0           |       VCPU1 on CPU1
2869  *
2870  * 1.  read timespec0,tsc0
2871  * 2.                                   | timespec1 = timespec0 + N
2872  *                                      | tsc1 = tsc0 + M
2873  * 3. transition to guest               | transition to guest
2874  * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2875  * 5.                                   | ret1 = timespec1 + (rdtsc - tsc1)
2876  *                                      | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2877  *
2878  * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
2879  *
2880  *      - ret0 < ret1
2881  *      - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2882  *              ...
2883  *      - 0 < N - M => M < N
2884  *
2885  * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
2886  * always the case (the difference between two distinct xtime instances
2887  * might be smaller then the difference between corresponding TSC reads,
2888  * when updating guest vcpus pvclock areas).
2889  *
2890  * To avoid that problem, do not allow visibility of distinct
2891  * system_timestamp/tsc_timestamp values simultaneously: use a master
2892  * copy of host monotonic time values. Update that master copy
2893  * in lockstep.
2894  *
2895  * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
2896  *
2897  */
2898
2899 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
2900 {
2901 #ifdef CONFIG_X86_64
2902         struct kvm_arch *ka = &kvm->arch;
2903         int vclock_mode;
2904         bool host_tsc_clocksource, vcpus_matched;
2905
2906         lockdep_assert_held(&kvm->arch.tsc_write_lock);
2907         vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
2908                         atomic_read(&kvm->online_vcpus));
2909
2910         /*
2911          * If the host uses TSC clock, then passthrough TSC as stable
2912          * to the guest.
2913          */
2914         host_tsc_clocksource = kvm_get_time_and_clockread(
2915                                         &ka->master_kernel_ns,
2916                                         &ka->master_cycle_now);
2917
2918         ka->use_master_clock = host_tsc_clocksource && vcpus_matched
2919                                 && !ka->backwards_tsc_observed
2920                                 && !ka->boot_vcpu_runs_old_kvmclock;
2921
2922         if (ka->use_master_clock)
2923                 atomic_set(&kvm_guest_has_master_clock, 1);
2924
2925         vclock_mode = pvclock_gtod_data.clock.vclock_mode;
2926         trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
2927                                         vcpus_matched);
2928 #endif
2929 }
2930
2931 static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
2932 {
2933         kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
2934 }
2935
2936 static void __kvm_start_pvclock_update(struct kvm *kvm)
2937 {
2938         raw_spin_lock_irq(&kvm->arch.tsc_write_lock);
2939         write_seqcount_begin(&kvm->arch.pvclock_sc);
2940 }
2941
2942 static void kvm_start_pvclock_update(struct kvm *kvm)
2943 {
2944         kvm_make_mclock_inprogress_request(kvm);
2945
2946         /* no guest entries from this point */
2947         __kvm_start_pvclock_update(kvm);
2948 }
2949
2950 static void kvm_end_pvclock_update(struct kvm *kvm)
2951 {
2952         struct kvm_arch *ka = &kvm->arch;
2953         struct kvm_vcpu *vcpu;
2954         unsigned long i;
2955
2956         write_seqcount_end(&ka->pvclock_sc);
2957         raw_spin_unlock_irq(&ka->tsc_write_lock);
2958         kvm_for_each_vcpu(i, vcpu, kvm)
2959                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2960
2961         /* guest entries allowed */
2962         kvm_for_each_vcpu(i, vcpu, kvm)
2963                 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
2964 }
2965
2966 static void kvm_update_masterclock(struct kvm *kvm)
2967 {
2968         kvm_hv_request_tsc_page_update(kvm);
2969         kvm_start_pvclock_update(kvm);
2970         pvclock_update_vm_gtod_copy(kvm);
2971         kvm_end_pvclock_update(kvm);
2972 }
2973
2974 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc.  */
2975 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
2976 {
2977         struct kvm_arch *ka = &kvm->arch;
2978         struct pvclock_vcpu_time_info hv_clock;
2979
2980         /* both __this_cpu_read() and rdtsc() should be on the same cpu */
2981         get_cpu();
2982
2983         data->flags = 0;
2984         if (ka->use_master_clock && __this_cpu_read(cpu_tsc_khz)) {
2985 #ifdef CONFIG_X86_64
2986                 struct timespec64 ts;
2987
2988                 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) {
2989                         data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec;
2990                         data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC;
2991                 } else
2992 #endif
2993                 data->host_tsc = rdtsc();
2994
2995                 data->flags |= KVM_CLOCK_TSC_STABLE;
2996                 hv_clock.tsc_timestamp = ka->master_cycle_now;
2997                 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
2998                 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
2999                                    &hv_clock.tsc_shift,
3000                                    &hv_clock.tsc_to_system_mul);
3001                 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc);
3002         } else {
3003                 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset;
3004         }
3005
3006         put_cpu();
3007 }
3008
3009 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3010 {
3011         struct kvm_arch *ka = &kvm->arch;
3012         unsigned seq;
3013
3014         do {
3015                 seq = read_seqcount_begin(&ka->pvclock_sc);
3016                 __get_kvmclock(kvm, data);
3017         } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3018 }
3019
3020 u64 get_kvmclock_ns(struct kvm *kvm)
3021 {
3022         struct kvm_clock_data data;
3023
3024         get_kvmclock(kvm, &data);
3025         return data.clock;
3026 }
3027
3028 static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
3029                                     struct gfn_to_pfn_cache *gpc,
3030                                     unsigned int offset)
3031 {
3032         struct kvm_vcpu_arch *vcpu = &v->arch;
3033         struct pvclock_vcpu_time_info *guest_hv_clock;
3034         unsigned long flags;
3035
3036         read_lock_irqsave(&gpc->lock, flags);
3037         while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa,
3038                                            offset + sizeof(*guest_hv_clock))) {
3039                 read_unlock_irqrestore(&gpc->lock, flags);
3040
3041                 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa,
3042                                                  offset + sizeof(*guest_hv_clock)))
3043                         return;
3044
3045                 read_lock_irqsave(&gpc->lock, flags);
3046         }
3047
3048         guest_hv_clock = (void *)(gpc->khva + offset);
3049
3050         /*
3051          * This VCPU is paused, but it's legal for a guest to read another
3052          * VCPU's kvmclock, so we really have to follow the specification where
3053          * it says that version is odd if data is being modified, and even after
3054          * it is consistent.
3055          */
3056
3057         guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1;
3058         smp_wmb();
3059
3060         /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
3061         vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
3062
3063         if (vcpu->pvclock_set_guest_stopped_request) {
3064                 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
3065                 vcpu->pvclock_set_guest_stopped_request = false;
3066         }
3067
3068         memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock));
3069         smp_wmb();
3070
3071         guest_hv_clock->version = ++vcpu->hv_clock.version;
3072
3073         mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
3074         read_unlock_irqrestore(&gpc->lock, flags);
3075
3076         trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
3077 }
3078
3079 static int kvm_guest_time_update(struct kvm_vcpu *v)
3080 {
3081         unsigned long flags, tgt_tsc_khz;
3082         unsigned seq;
3083         struct kvm_vcpu_arch *vcpu = &v->arch;
3084         struct kvm_arch *ka = &v->kvm->arch;
3085         s64 kernel_ns;
3086         u64 tsc_timestamp, host_tsc;
3087         u8 pvclock_flags;
3088         bool use_master_clock;
3089
3090         kernel_ns = 0;
3091         host_tsc = 0;
3092
3093         /*
3094          * If the host uses TSC clock, then passthrough TSC as stable
3095          * to the guest.
3096          */
3097         do {
3098                 seq = read_seqcount_begin(&ka->pvclock_sc);
3099                 use_master_clock = ka->use_master_clock;
3100                 if (use_master_clock) {
3101                         host_tsc = ka->master_cycle_now;
3102                         kernel_ns = ka->master_kernel_ns;
3103                 }
3104         } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3105
3106         /* Keep irq disabled to prevent changes to the clock */
3107         local_irq_save(flags);
3108         tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
3109         if (unlikely(tgt_tsc_khz == 0)) {
3110                 local_irq_restore(flags);
3111                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
3112                 return 1;
3113         }
3114         if (!use_master_clock) {
3115                 host_tsc = rdtsc();
3116                 kernel_ns = get_kvmclock_base_ns();
3117         }
3118
3119         tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
3120
3121         /*
3122          * We may have to catch up the TSC to match elapsed wall clock
3123          * time for two reasons, even if kvmclock is used.
3124          *   1) CPU could have been running below the maximum TSC rate
3125          *   2) Broken TSC compensation resets the base at each VCPU
3126          *      entry to avoid unknown leaps of TSC even when running
3127          *      again on the same CPU.  This may cause apparent elapsed
3128          *      time to disappear, and the guest to stand still or run
3129          *      very slowly.
3130          */
3131         if (vcpu->tsc_catchup) {
3132                 u64 tsc = compute_guest_tsc(v, kernel_ns);
3133                 if (tsc > tsc_timestamp) {
3134                         adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
3135                         tsc_timestamp = tsc;
3136                 }
3137         }
3138
3139         local_irq_restore(flags);
3140
3141         /* With all the info we got, fill in the values */
3142
3143         if (kvm_caps.has_tsc_control)
3144                 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz,
3145                                             v->arch.l1_tsc_scaling_ratio);
3146
3147         if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
3148                 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
3149                                    &vcpu->hv_clock.tsc_shift,
3150                                    &vcpu->hv_clock.tsc_to_system_mul);
3151                 vcpu->hw_tsc_khz = tgt_tsc_khz;
3152         }
3153
3154         vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
3155         vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
3156         vcpu->last_guest_tsc = tsc_timestamp;
3157
3158         /* If the host uses TSC clocksource, then it is stable */
3159         pvclock_flags = 0;
3160         if (use_master_clock)
3161                 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
3162
3163         vcpu->hv_clock.flags = pvclock_flags;
3164
3165         if (vcpu->pv_time.active)
3166                 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
3167         if (vcpu->xen.vcpu_info_cache.active)
3168                 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
3169                                         offsetof(struct compat_vcpu_info, time));
3170         if (vcpu->xen.vcpu_time_info_cache.active)
3171                 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0);
3172         kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
3173         return 0;
3174 }
3175
3176 /*
3177  * kvmclock updates which are isolated to a given vcpu, such as
3178  * vcpu->cpu migration, should not allow system_timestamp from
3179  * the rest of the vcpus to remain static. Otherwise ntp frequency
3180  * correction applies to one vcpu's system_timestamp but not
3181  * the others.
3182  *
3183  * So in those cases, request a kvmclock update for all vcpus.
3184  * We need to rate-limit these requests though, as they can
3185  * considerably slow guests that have a large number of vcpus.
3186  * The time for a remote vcpu to update its kvmclock is bound
3187  * by the delay we use to rate-limit the updates.
3188  */
3189
3190 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
3191
3192 static void kvmclock_update_fn(struct work_struct *work)
3193 {
3194         unsigned long i;
3195         struct delayed_work *dwork = to_delayed_work(work);
3196         struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
3197                                            kvmclock_update_work);
3198         struct kvm *kvm = container_of(ka, struct kvm, arch);
3199         struct kvm_vcpu *vcpu;
3200
3201         kvm_for_each_vcpu(i, vcpu, kvm) {
3202                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3203                 kvm_vcpu_kick(vcpu);
3204         }
3205 }
3206
3207 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
3208 {
3209         struct kvm *kvm = v->kvm;
3210
3211         kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
3212         schedule_delayed_work(&kvm->arch.kvmclock_update_work,
3213                                         KVMCLOCK_UPDATE_DELAY);
3214 }
3215
3216 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
3217
3218 static void kvmclock_sync_fn(struct work_struct *work)
3219 {
3220         struct delayed_work *dwork = to_delayed_work(work);
3221         struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
3222                                            kvmclock_sync_work);
3223         struct kvm *kvm = container_of(ka, struct kvm, arch);
3224
3225         if (!kvmclock_periodic_sync)
3226                 return;
3227
3228         schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
3229         schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
3230                                         KVMCLOCK_SYNC_PERIOD);
3231 }
3232
3233 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */
3234 static bool is_mci_control_msr(u32 msr)
3235 {
3236         return (msr & 3) == 0;
3237 }
3238 static bool is_mci_status_msr(u32 msr)
3239 {
3240         return (msr & 3) == 1;
3241 }
3242
3243 /*
3244  * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
3245  */
3246 static bool can_set_mci_status(struct kvm_vcpu *vcpu)
3247 {
3248         /* McStatusWrEn enabled? */
3249         if (guest_cpuid_is_amd_or_hygon(vcpu))
3250                 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
3251
3252         return false;
3253 }
3254
3255 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3256 {
3257         u64 mcg_cap = vcpu->arch.mcg_cap;
3258         unsigned bank_num = mcg_cap & 0xff;
3259         u32 msr = msr_info->index;
3260         u64 data = msr_info->data;
3261         u32 offset, last_msr;
3262
3263         switch (msr) {
3264         case MSR_IA32_MCG_STATUS:
3265                 vcpu->arch.mcg_status = data;
3266                 break;
3267         case MSR_IA32_MCG_CTL:
3268                 if (!(mcg_cap & MCG_CTL_P) &&
3269                     (data || !msr_info->host_initiated))
3270                         return 1;
3271                 if (data != 0 && data != ~(u64)0)
3272                         return 1;
3273                 vcpu->arch.mcg_ctl = data;
3274                 break;
3275         case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
3276                 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
3277                 if (msr > last_msr)
3278                         return 1;
3279
3280                 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated))
3281                         return 1;
3282                 /* An attempt to write a 1 to a reserved bit raises #GP */
3283                 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK))
3284                         return 1;
3285                 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
3286                                             last_msr + 1 - MSR_IA32_MC0_CTL2);
3287                 vcpu->arch.mci_ctl2_banks[offset] = data;
3288                 break;
3289         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3290                 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
3291                 if (msr > last_msr)
3292                         return 1;
3293
3294                 /*
3295                  * Only 0 or all 1s can be written to IA32_MCi_CTL, all other
3296                  * values are architecturally undefined.  But, some Linux
3297                  * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB
3298                  * issue on AMD K8s, allow bit 10 to be clear when setting all
3299                  * other bits in order to avoid an uncaught #GP in the guest.
3300                  *
3301                  * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable,
3302                  * single-bit ECC data errors.
3303                  */
3304                 if (is_mci_control_msr(msr) &&
3305                     data != 0 && (data | (1 << 10) | 1) != ~(u64)0)
3306                         return 1;
3307
3308                 /*
3309                  * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR.
3310                  * AMD-based CPUs allow non-zero values, but if and only if
3311                  * HWCR[McStatusWrEn] is set.
3312                  */
3313                 if (!msr_info->host_initiated && is_mci_status_msr(msr) &&
3314                     data != 0 && !can_set_mci_status(vcpu))
3315                         return 1;
3316
3317                 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
3318                                             last_msr + 1 - MSR_IA32_MC0_CTL);
3319                 vcpu->arch.mce_banks[offset] = data;
3320                 break;
3321         default:
3322                 return 1;
3323         }
3324         return 0;
3325 }
3326
3327 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
3328 {
3329         u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
3330
3331         return (vcpu->arch.apf.msr_en_val & mask) == mask;
3332 }
3333
3334 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
3335 {
3336         gpa_t gpa = data & ~0x3f;
3337
3338         /* Bits 4:5 are reserved, Should be zero */
3339         if (data & 0x30)
3340                 return 1;
3341
3342         if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) &&
3343             (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT))
3344                 return 1;
3345
3346         if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) &&
3347             (data & KVM_ASYNC_PF_DELIVERY_AS_INT))
3348                 return 1;
3349
3350         if (!lapic_in_kernel(vcpu))
3351                 return data ? 1 : 0;
3352
3353         vcpu->arch.apf.msr_en_val = data;
3354
3355         if (!kvm_pv_async_pf_enabled(vcpu)) {
3356                 kvm_clear_async_pf_completion_queue(vcpu);
3357                 kvm_async_pf_hash_reset(vcpu);
3358                 return 0;
3359         }
3360
3361         if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
3362                                         sizeof(u64)))
3363                 return 1;
3364
3365         vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
3366         vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
3367
3368         kvm_async_pf_wakeup_all(vcpu);
3369
3370         return 0;
3371 }
3372
3373 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
3374 {
3375         /* Bits 8-63 are reserved */
3376         if (data >> 8)
3377                 return 1;
3378
3379         if (!lapic_in_kernel(vcpu))
3380                 return 1;
3381
3382         vcpu->arch.apf.msr_int_val = data;
3383
3384         vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK;
3385
3386         return 0;
3387 }
3388
3389 static void kvmclock_reset(struct kvm_vcpu *vcpu)
3390 {
3391         kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
3392         vcpu->arch.time = 0;
3393 }
3394
3395 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
3396 {
3397         ++vcpu->stat.tlb_flush;
3398         static_call(kvm_x86_flush_tlb_all)(vcpu);
3399 }
3400
3401 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
3402 {
3403         ++vcpu->stat.tlb_flush;
3404
3405         if (!tdp_enabled) {
3406                 /*
3407                  * A TLB flush on behalf of the guest is equivalent to
3408                  * INVPCID(all), toggling CR4.PGE, etc., which requires
3409                  * a forced sync of the shadow page tables.  Ensure all the
3410                  * roots are synced and the guest TLB in hardware is clean.
3411                  */
3412                 kvm_mmu_sync_roots(vcpu);
3413                 kvm_mmu_sync_prev_roots(vcpu);
3414         }
3415
3416         static_call(kvm_x86_flush_tlb_guest)(vcpu);
3417 }
3418
3419
3420 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
3421 {
3422         ++vcpu->stat.tlb_flush;
3423         static_call(kvm_x86_flush_tlb_current)(vcpu);
3424 }
3425
3426 /*
3427  * Service "local" TLB flush requests, which are specific to the current MMU
3428  * context.  In addition to the generic event handling in vcpu_enter_guest(),
3429  * TLB flushes that are targeted at an MMU context also need to be serviced
3430  * prior before nested VM-Enter/VM-Exit.
3431  */
3432 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
3433 {
3434         if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
3435                 kvm_vcpu_flush_tlb_current(vcpu);
3436
3437         if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
3438                 kvm_vcpu_flush_tlb_guest(vcpu);
3439 }
3440 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
3441
3442 static void record_steal_time(struct kvm_vcpu *vcpu)
3443 {
3444         struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
3445         struct kvm_steal_time __user *st;
3446         struct kvm_memslots *slots;
3447         gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
3448         u64 steal;
3449         u32 version;
3450
3451         if (kvm_xen_msr_enabled(vcpu->kvm)) {
3452                 kvm_xen_runstate_set_running(vcpu);
3453                 return;
3454         }
3455
3456         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
3457                 return;
3458
3459         if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm))
3460                 return;
3461
3462         slots = kvm_memslots(vcpu->kvm);
3463
3464         if (unlikely(slots->generation != ghc->generation ||
3465                      gpa != ghc->gpa ||
3466                      kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
3467                 /* We rely on the fact that it fits in a single page. */
3468                 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
3469
3470                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
3471                     kvm_is_error_hva(ghc->hva) || !ghc->memslot)
3472                         return;
3473         }
3474
3475         st = (struct kvm_steal_time __user *)ghc->hva;
3476         /*
3477          * Doing a TLB flush here, on the guest's behalf, can avoid
3478          * expensive IPIs.
3479          */
3480         if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
3481                 u8 st_preempted = 0;
3482                 int err = -EFAULT;
3483
3484                 if (!user_access_begin(st, sizeof(*st)))
3485                         return;
3486
3487                 asm volatile("1: xchgb %0, %2\n"
3488                              "xor %1, %1\n"
3489                              "2:\n"
3490                              _ASM_EXTABLE_UA(1b, 2b)
3491                              : "+q" (st_preempted),
3492                                "+&r" (err),
3493                                "+m" (st->preempted));
3494                 if (err)
3495                         goto out;
3496
3497                 user_access_end();
3498
3499                 vcpu->arch.st.preempted = 0;
3500
3501                 trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
3502                                        st_preempted & KVM_VCPU_FLUSH_TLB);
3503                 if (st_preempted & KVM_VCPU_FLUSH_TLB)
3504                         kvm_vcpu_flush_tlb_guest(vcpu);
3505
3506                 if (!user_access_begin(st, sizeof(*st)))
3507                         goto dirty;
3508         } else {
3509                 if (!user_access_begin(st, sizeof(*st)))
3510                         return;
3511
3512                 unsafe_put_user(0, &st->preempted, out);
3513                 vcpu->arch.st.preempted = 0;
3514         }
3515
3516         unsafe_get_user(version, &st->version, out);
3517         if (version & 1)
3518                 version += 1;  /* first time write, random junk */
3519
3520         version += 1;
3521         unsafe_put_user(version, &st->version, out);
3522
3523         smp_wmb();
3524
3525         unsafe_get_user(steal, &st->steal, out);
3526         steal += current->sched_info.run_delay -
3527                 vcpu->arch.st.last_steal;
3528         vcpu->arch.st.last_steal = current->sched_info.run_delay;
3529         unsafe_put_user(steal, &st->steal, out);
3530
3531         version += 1;
3532         unsafe_put_user(version, &st->version, out);
3533
3534  out:
3535         user_access_end();
3536  dirty:
3537         mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
3538 }
3539
3540 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3541 {
3542         bool pr = false;
3543         u32 msr = msr_info->index;
3544         u64 data = msr_info->data;
3545
3546         if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr)
3547                 return kvm_xen_write_hypercall_page(vcpu, data);
3548
3549         switch (msr) {
3550         case MSR_AMD64_NB_CFG:
3551         case MSR_IA32_UCODE_WRITE:
3552         case MSR_VM_HSAVE_PA:
3553         case MSR_AMD64_PATCH_LOADER:
3554         case MSR_AMD64_BU_CFG2:
3555         case MSR_AMD64_DC_CFG:
3556         case MSR_F15H_EX_CFG:
3557                 break;
3558
3559         case MSR_IA32_UCODE_REV:
3560                 if (msr_info->host_initiated)
3561                         vcpu->arch.microcode_version = data;
3562                 break;
3563         case MSR_IA32_ARCH_CAPABILITIES:
3564                 if (!msr_info->host_initiated)
3565                         return 1;
3566                 vcpu->arch.arch_capabilities = data;
3567                 break;
3568         case MSR_IA32_PERF_CAPABILITIES: {
3569                 struct kvm_msr_entry msr_ent = {.index = msr, .data = 0};
3570
3571                 if (!msr_info->host_initiated)
3572                         return 1;
3573                 if (kvm_get_msr_feature(&msr_ent))
3574                         return 1;
3575                 if (data & ~msr_ent.data)
3576                         return 1;
3577
3578                 vcpu->arch.perf_capabilities = data;
3579                 kvm_pmu_refresh(vcpu);
3580                 return 0;
3581         }
3582         case MSR_EFER:
3583                 return set_efer(vcpu, msr_info);
3584         case MSR_K7_HWCR:
3585                 data &= ~(u64)0x40;     /* ignore flush filter disable */
3586                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
3587                 data &= ~(u64)0x8;      /* ignore TLB cache disable */
3588
3589                 /* Handle McStatusWrEn */
3590                 if (data == BIT_ULL(18)) {
3591                         vcpu->arch.msr_hwcr = data;
3592                 } else if (data != 0) {
3593                         vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
3594                                     data);
3595                         return 1;
3596                 }
3597                 break;
3598         case MSR_FAM10H_MMIO_CONF_BASE:
3599                 if (data != 0) {
3600                         vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
3601                                     "0x%llx\n", data);
3602                         return 1;
3603                 }
3604                 break;
3605         case 0x200 ... MSR_IA32_MC0_CTL2 - 1:
3606         case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff:
3607                 return kvm_mtrr_set_msr(vcpu, msr, data);
3608         case MSR_IA32_APICBASE:
3609                 return kvm_set_apic_base(vcpu, msr_info);
3610         case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
3611                 return kvm_x2apic_msr_write(vcpu, msr, data);
3612         case MSR_IA32_TSC_DEADLINE:
3613                 kvm_set_lapic_tscdeadline_msr(vcpu, data);
3614                 break;
3615         case MSR_IA32_TSC_ADJUST:
3616                 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
3617                         if (!msr_info->host_initiated) {
3618                                 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
3619                                 adjust_tsc_offset_guest(vcpu, adj);
3620                                 /* Before back to guest, tsc_timestamp must be adjusted
3621                                  * as well, otherwise guest's percpu pvclock time could jump.
3622                                  */
3623                                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3624                         }
3625                         vcpu->arch.ia32_tsc_adjust_msr = data;
3626                 }
3627                 break;
3628         case MSR_IA32_MISC_ENABLE: {
3629                 u64 old_val = vcpu->arch.ia32_misc_enable_msr;
3630
3631                 if (!msr_info->host_initiated) {
3632                         /* RO bits */
3633                         if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK)
3634                                 return 1;
3635
3636                         /* R bits, i.e. writes are ignored, but don't fault. */
3637                         data = data & ~MSR_IA32_MISC_ENABLE_EMON;
3638                         data |= old_val & MSR_IA32_MISC_ENABLE_EMON;
3639                 }
3640
3641                 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
3642                     ((old_val ^ data)  & MSR_IA32_MISC_ENABLE_MWAIT)) {
3643                         if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
3644                                 return 1;
3645                         vcpu->arch.ia32_misc_enable_msr = data;
3646                         kvm_update_cpuid_runtime(vcpu);
3647                 } else {
3648                         vcpu->arch.ia32_misc_enable_msr = data;
3649                 }
3650                 break;
3651         }
3652         case MSR_IA32_SMBASE:
3653                 if (!msr_info->host_initiated)
3654                         return 1;
3655                 vcpu->arch.smbase = data;
3656                 break;
3657         case MSR_IA32_POWER_CTL:
3658                 vcpu->arch.msr_ia32_power_ctl = data;
3659                 break;
3660         case MSR_IA32_TSC:
3661                 if (msr_info->host_initiated) {
3662                         kvm_synchronize_tsc(vcpu, data);
3663                 } else {
3664                         u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
3665                         adjust_tsc_offset_guest(vcpu, adj);
3666                         vcpu->arch.ia32_tsc_adjust_msr += adj;
3667                 }
3668                 break;
3669         case MSR_IA32_XSS:
3670                 if (!msr_info->host_initiated &&
3671                     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3672                         return 1;
3673                 /*
3674                  * KVM supports exposing PT to the guest, but does not support
3675                  * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
3676                  * XSAVES/XRSTORS to save/restore PT MSRs.
3677                  */
3678                 if (data & ~kvm_caps.supported_xss)
3679                         return 1;
3680                 vcpu->arch.ia32_xss = data;
3681                 kvm_update_cpuid_runtime(vcpu);
3682                 break;
3683         case MSR_SMI_COUNT:
3684                 if (!msr_info->host_initiated)
3685                         return 1;
3686                 vcpu->arch.smi_count = data;
3687                 break;
3688         case MSR_KVM_WALL_CLOCK_NEW:
3689                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3690                         return 1;
3691
3692                 vcpu->kvm->arch.wall_clock = data;
3693                 kvm_write_wall_clock(vcpu->kvm, data, 0);
3694                 break;
3695         case MSR_KVM_WALL_CLOCK:
3696                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3697                         return 1;
3698
3699                 vcpu->kvm->arch.wall_clock = data;
3700                 kvm_write_wall_clock(vcpu->kvm, data, 0);
3701                 break;
3702         case MSR_KVM_SYSTEM_TIME_NEW:
3703                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3704                         return 1;
3705
3706                 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
3707                 break;
3708         case MSR_KVM_SYSTEM_TIME:
3709                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3710                         return 1;
3711
3712                 kvm_write_system_time(vcpu, data, true,  msr_info->host_initiated);
3713                 break;
3714         case MSR_KVM_ASYNC_PF_EN:
3715                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
3716                         return 1;
3717
3718                 if (kvm_pv_enable_async_pf(vcpu, data))
3719                         return 1;
3720                 break;
3721         case MSR_KVM_ASYNC_PF_INT:
3722                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3723                         return 1;
3724
3725                 if (kvm_pv_enable_async_pf_int(vcpu, data))
3726                         return 1;
3727                 break;
3728         case MSR_KVM_ASYNC_PF_ACK:
3729                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3730                         return 1;
3731                 if (data & 0x1) {
3732                         vcpu->arch.apf.pageready_pending = false;
3733                         kvm_check_async_pf_completion(vcpu);
3734                 }
3735                 break;
3736         case MSR_KVM_STEAL_TIME:
3737                 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
3738                         return 1;
3739
3740                 if (unlikely(!sched_info_on()))
3741                         return 1;
3742
3743                 if (data & KVM_STEAL_RESERVED_MASK)
3744                         return 1;
3745
3746                 vcpu->arch.st.msr_val = data;
3747
3748                 if (!(data & KVM_MSR_ENABLED))
3749                         break;
3750
3751                 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
3752
3753                 break;
3754         case MSR_KVM_PV_EOI_EN:
3755                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
3756                         return 1;
3757
3758                 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8)))
3759                         return 1;
3760                 break;
3761
3762         case MSR_KVM_POLL_CONTROL:
3763                 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
3764                         return 1;
3765
3766                 /* only enable bit supported */
3767                 if (data & (-1ULL << 1))
3768                         return 1;
3769
3770                 vcpu->arch.msr_kvm_poll_control = data;
3771                 break;
3772
3773         case MSR_IA32_MCG_CTL:
3774         case MSR_IA32_MCG_STATUS:
3775         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3776         case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
3777                 return set_msr_mce(vcpu, msr_info);
3778
3779         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
3780         case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
3781                 pr = true;
3782                 fallthrough;
3783         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
3784         case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
3785                 if (kvm_pmu_is_valid_msr(vcpu, msr))
3786                         return kvm_pmu_set_msr(vcpu, msr_info);
3787
3788                 if (pr || data != 0)
3789                         vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
3790                                     "0x%x data 0x%llx\n", msr, data);
3791                 break;
3792         case MSR_K7_CLK_CTL:
3793                 /*
3794                  * Ignore all writes to this no longer documented MSR.
3795                  * Writes are only relevant for old K7 processors,
3796                  * all pre-dating SVM, but a recommended workaround from
3797                  * AMD for these chips. It is possible to specify the
3798                  * affected processor models on the command line, hence
3799                  * the need to ignore the workaround.
3800                  */
3801                 break;
3802         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
3803         case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
3804         case HV_X64_MSR_SYNDBG_OPTIONS:
3805         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3806         case HV_X64_MSR_CRASH_CTL:
3807         case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
3808         case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3809         case HV_X64_MSR_TSC_EMULATION_CONTROL:
3810         case HV_X64_MSR_TSC_EMULATION_STATUS:
3811                 return kvm_hv_set_msr_common(vcpu, msr, data,
3812                                              msr_info->host_initiated);
3813         case MSR_IA32_BBL_CR_CTL3:
3814                 /* Drop writes to this legacy MSR -- see rdmsr
3815                  * counterpart for further detail.
3816                  */
3817                 if (report_ignored_msrs)
3818                         vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
3819                                 msr, data);
3820                 break;
3821         case MSR_AMD64_OSVW_ID_LENGTH:
3822                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3823                         return 1;
3824                 vcpu->arch.osvw.length = data;
3825                 break;
3826         case MSR_AMD64_OSVW_STATUS:
3827                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3828                         return 1;
3829                 vcpu->arch.osvw.status = data;
3830                 break;
3831         case MSR_PLATFORM_INFO:
3832                 if (!msr_info->host_initiated ||
3833                     (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
3834                      cpuid_fault_enabled(vcpu)))
3835                         return 1;
3836                 vcpu->arch.msr_platform_info = data;
3837                 break;
3838         case MSR_MISC_FEATURES_ENABLES:
3839                 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
3840                     (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3841                      !supports_cpuid_fault(vcpu)))
3842                         return 1;
3843                 vcpu->arch.msr_misc_features_enables = data;
3844                 break;
3845 #ifdef CONFIG_X86_64
3846         case MSR_IA32_XFD:
3847                 if (!msr_info->host_initiated &&
3848                     !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
3849                         return 1;
3850
3851                 if (data & ~kvm_guest_supported_xfd(vcpu))
3852                         return 1;
3853
3854                 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
3855                 break;
3856         case MSR_IA32_XFD_ERR:
3857                 if (!msr_info->host_initiated &&
3858                     !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
3859                         return 1;
3860
3861                 if (data & ~kvm_guest_supported_xfd(vcpu))
3862                         return 1;
3863
3864                 vcpu->arch.guest_fpu.xfd_err = data;
3865                 break;
3866 #endif
3867         case MSR_IA32_PEBS_ENABLE:
3868         case MSR_IA32_DS_AREA:
3869         case MSR_PEBS_DATA_CFG:
3870         case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
3871                 if (kvm_pmu_is_valid_msr(vcpu, msr))
3872                         return kvm_pmu_set_msr(vcpu, msr_info);
3873                 /*
3874                  * Userspace is allowed to write '0' to MSRs that KVM reports
3875                  * as to-be-saved, even if an MSRs isn't fully supported.
3876                  */
3877                 return !msr_info->host_initiated || data;
3878         default:
3879                 if (kvm_pmu_is_valid_msr(vcpu, msr))
3880                         return kvm_pmu_set_msr(vcpu, msr_info);
3881                 return KVM_MSR_RET_INVALID;
3882         }
3883         return 0;
3884 }
3885 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
3886
3887 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
3888 {
3889         u64 data;
3890         u64 mcg_cap = vcpu->arch.mcg_cap;
3891         unsigned bank_num = mcg_cap & 0xff;
3892         u32 offset, last_msr;
3893
3894         switch (msr) {
3895         case MSR_IA32_P5_MC_ADDR:
3896         case MSR_IA32_P5_MC_TYPE:
3897                 data = 0;
3898                 break;
3899         case MSR_IA32_MCG_CAP:
3900                 data = vcpu->arch.mcg_cap;
3901                 break;
3902         case MSR_IA32_MCG_CTL:
3903                 if (!(mcg_cap & MCG_CTL_P) && !host)
3904                         return 1;
3905                 data = vcpu->arch.mcg_ctl;
3906                 break;
3907         case MSR_IA32_MCG_STATUS:
3908                 data = vcpu->arch.mcg_status;
3909                 break;
3910         case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
3911                 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
3912                 if (msr > last_msr)
3913                         return 1;
3914
3915                 if (!(mcg_cap & MCG_CMCI_P) && !host)
3916                         return 1;
3917                 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
3918                                             last_msr + 1 - MSR_IA32_MC0_CTL2);
3919                 data = vcpu->arch.mci_ctl2_banks[offset];
3920                 break;
3921         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3922                 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
3923                 if (msr > last_msr)
3924                         return 1;
3925
3926                 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
3927                                             last_msr + 1 - MSR_IA32_MC0_CTL);
3928                 data = vcpu->arch.mce_banks[offset];
3929                 break;
3930         default:
3931                 return 1;
3932         }
3933         *pdata = data;
3934         return 0;
3935 }
3936
3937 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3938 {
3939         switch (msr_info->index) {
3940         case MSR_IA32_PLATFORM_ID:
3941         case MSR_IA32_EBL_CR_POWERON:
3942         case MSR_IA32_LASTBRANCHFROMIP:
3943         case MSR_IA32_LASTBRANCHTOIP:
3944         case MSR_IA32_LASTINTFROMIP:
3945         case MSR_IA32_LASTINTTOIP:
3946         case MSR_AMD64_SYSCFG:
3947         case MSR_K8_TSEG_ADDR:
3948         case MSR_K8_TSEG_MASK:
3949         case MSR_VM_HSAVE_PA:
3950         case MSR_K8_INT_PENDING_MSG:
3951         case MSR_AMD64_NB_CFG:
3952         case MSR_FAM10H_MMIO_CONF_BASE:
3953         case MSR_AMD64_BU_CFG2:
3954         case MSR_IA32_PERF_CTL:
3955         case MSR_AMD64_DC_CFG:
3956         case MSR_F15H_EX_CFG:
3957         /*
3958          * Intel Sandy Bridge CPUs must support the RAPL (running average power
3959          * limit) MSRs. Just return 0, as we do not want to expose the host
3960          * data here. Do not conditionalize this on CPUID, as KVM does not do
3961          * so for existing CPU-specific MSRs.
3962          */
3963         case MSR_RAPL_POWER_UNIT:
3964         case MSR_PP0_ENERGY_STATUS:     /* Power plane 0 (core) */
3965         case MSR_PP1_ENERGY_STATUS:     /* Power plane 1 (graphics uncore) */
3966         case MSR_PKG_ENERGY_STATUS:     /* Total package */
3967         case MSR_DRAM_ENERGY_STATUS:    /* DRAM controller */
3968                 msr_info->data = 0;
3969                 break;
3970         case MSR_IA32_PEBS_ENABLE:
3971         case MSR_IA32_DS_AREA:
3972         case MSR_PEBS_DATA_CFG:
3973         case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
3974                 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
3975                         return kvm_pmu_get_msr(vcpu, msr_info);
3976                 /*
3977                  * Userspace is allowed to read MSRs that KVM reports as
3978                  * to-be-saved, even if an MSR isn't fully supported.
3979                  */
3980                 if (!msr_info->host_initiated)
3981                         return 1;
3982                 msr_info->data = 0;
3983                 break;
3984         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
3985         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
3986         case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
3987         case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
3988                 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
3989                         return kvm_pmu_get_msr(vcpu, msr_info);
3990                 msr_info->data = 0;
3991                 break;
3992         case MSR_IA32_UCODE_REV:
3993                 msr_info->data = vcpu->arch.microcode_version;
3994                 break;
3995         case MSR_IA32_ARCH_CAPABILITIES:
3996                 if (!msr_info->host_initiated &&
3997                     !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
3998                         return 1;
3999                 msr_info->data = vcpu->arch.arch_capabilities;
4000                 break;
4001         case MSR_IA32_PERF_CAPABILITIES:
4002                 if (!msr_info->host_initiated &&
4003                     !guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
4004                         return 1;
4005                 msr_info->data = vcpu->arch.perf_capabilities;
4006                 break;
4007         case MSR_IA32_POWER_CTL:
4008                 msr_info->data = vcpu->arch.msr_ia32_power_ctl;
4009                 break;
4010         case MSR_IA32_TSC: {
4011                 /*
4012                  * Intel SDM states that MSR_IA32_TSC read adds the TSC offset
4013                  * even when not intercepted. AMD manual doesn't explicitly
4014                  * state this but appears to behave the same.
4015                  *
4016                  * On userspace reads and writes, however, we unconditionally
4017                  * return L1's TSC value to ensure backwards-compatible
4018                  * behavior for migration.
4019                  */
4020                 u64 offset, ratio;
4021
4022                 if (msr_info->host_initiated) {
4023                         offset = vcpu->arch.l1_tsc_offset;
4024                         ratio = vcpu->arch.l1_tsc_scaling_ratio;
4025                 } else {
4026                         offset = vcpu->arch.tsc_offset;
4027                         ratio = vcpu->arch.tsc_scaling_ratio;
4028                 }
4029
4030                 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
4031                 break;
4032         }
4033         case MSR_MTRRcap:
4034         case 0x200 ... MSR_IA32_MC0_CTL2 - 1:
4035         case MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) ... 0x2ff:
4036                 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
4037         case 0xcd: /* fsb frequency */
4038                 msr_info->data = 3;
4039                 break;
4040                 /*
4041                  * MSR_EBC_FREQUENCY_ID
4042                  * Conservative value valid for even the basic CPU models.
4043                  * Models 0,1: 000 in bits 23:21 indicating a bus speed of
4044                  * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
4045                  * and 266MHz for model 3, or 4. Set Core Clock
4046                  * Frequency to System Bus Frequency Ratio to 1 (bits
4047                  * 31:24) even though these are only valid for CPU
4048                  * models > 2, however guests may end up dividing or
4049                  * multiplying by zero otherwise.
4050                  */
4051         case MSR_EBC_FREQUENCY_ID:
4052                 msr_info->data = 1 << 24;
4053                 break;
4054         case MSR_IA32_APICBASE:
4055                 msr_info->data = kvm_get_apic_base(vcpu);
4056                 break;
4057         case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
4058                 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
4059         case MSR_IA32_TSC_DEADLINE:
4060                 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
4061                 break;
4062         case MSR_IA32_TSC_ADJUST:
4063                 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
4064                 break;
4065         case MSR_IA32_MISC_ENABLE:
4066                 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
4067                 break;
4068         case MSR_IA32_SMBASE:
4069                 if (!msr_info->host_initiated)
4070                         return 1;
4071                 msr_info->data = vcpu->arch.smbase;
4072                 break;
4073         case MSR_SMI_COUNT:
4074                 msr_info->data = vcpu->arch.smi_count;
4075                 break;
4076         case MSR_IA32_PERF_STATUS:
4077                 /* TSC increment by tick */
4078                 msr_info->data = 1000ULL;
4079                 /* CPU multiplier */
4080                 msr_info->data |= (((uint64_t)4ULL) << 40);
4081                 break;
4082         case MSR_EFER:
4083                 msr_info->data = vcpu->arch.efer;
4084                 break;
4085         case MSR_KVM_WALL_CLOCK:
4086                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4087                         return 1;
4088
4089                 msr_info->data = vcpu->kvm->arch.wall_clock;
4090                 break;
4091         case MSR_KVM_WALL_CLOCK_NEW:
4092                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4093                         return 1;
4094
4095                 msr_info->data = vcpu->kvm->arch.wall_clock;
4096                 break;
4097         case MSR_KVM_SYSTEM_TIME:
4098                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4099                         return 1;
4100
4101                 msr_info->data = vcpu->arch.time;
4102                 break;
4103         case MSR_KVM_SYSTEM_TIME_NEW:
4104                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4105                         return 1;
4106
4107                 msr_info->data = vcpu->arch.time;
4108                 break;
4109         case MSR_KVM_ASYNC_PF_EN:
4110                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
4111                         return 1;
4112
4113                 msr_info->data = vcpu->arch.apf.msr_en_val;
4114                 break;
4115         case MSR_KVM_ASYNC_PF_INT:
4116                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4117                         return 1;
4118
4119                 msr_info->data = vcpu->arch.apf.msr_int_val;
4120                 break;
4121         case MSR_KVM_ASYNC_PF_ACK:
4122                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4123                         return 1;
4124
4125                 msr_info->data = 0;
4126                 break;
4127         case MSR_KVM_STEAL_TIME:
4128                 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
4129                         return 1;
4130
4131                 msr_info->data = vcpu->arch.st.msr_val;
4132                 break;
4133         case MSR_KVM_PV_EOI_EN:
4134                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
4135                         return 1;
4136
4137                 msr_info->data = vcpu->arch.pv_eoi.msr_val;
4138                 break;
4139         case MSR_KVM_POLL_CONTROL:
4140                 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
4141                         return 1;
4142
4143                 msr_info->data = vcpu->arch.msr_kvm_poll_control;
4144                 break;
4145         case MSR_IA32_P5_MC_ADDR:
4146         case MSR_IA32_P5_MC_TYPE:
4147         case MSR_IA32_MCG_CAP:
4148         case MSR_IA32_MCG_CTL:
4149         case MSR_IA32_MCG_STATUS:
4150         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4151         case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4152                 return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
4153                                    msr_info->host_initiated);
4154         case MSR_IA32_XSS:
4155                 if (!msr_info->host_initiated &&
4156                     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
4157                         return 1;
4158                 msr_info->data = vcpu->arch.ia32_xss;
4159                 break;
4160         case MSR_K7_CLK_CTL:
4161                 /*
4162                  * Provide expected ramp-up count for K7. All other
4163                  * are set to zero, indicating minimum divisors for
4164                  * every field.
4165                  *
4166                  * This prevents guest kernels on AMD host with CPU
4167                  * type 6, model 8 and higher from exploding due to
4168                  * the rdmsr failing.
4169                  */
4170                 msr_info->data = 0x20000000;
4171                 break;
4172         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
4173         case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
4174         case HV_X64_MSR_SYNDBG_OPTIONS:
4175         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4176         case HV_X64_MSR_CRASH_CTL:
4177         case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
4178         case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4179         case HV_X64_MSR_TSC_EMULATION_CONTROL:
4180         case HV_X64_MSR_TSC_EMULATION_STATUS:
4181                 return kvm_hv_get_msr_common(vcpu,
4182                                              msr_info->index, &msr_info->data,
4183                                              msr_info->host_initiated);
4184         case MSR_IA32_BBL_CR_CTL3:
4185                 /* This legacy MSR exists but isn't fully documented in current
4186                  * silicon.  It is however accessed by winxp in very narrow
4187                  * scenarios where it sets bit #19, itself documented as
4188                  * a "reserved" bit.  Best effort attempt to source coherent
4189                  * read data here should the balance of the register be
4190                  * interpreted by the guest:
4191                  *
4192                  * L2 cache control register 3: 64GB range, 256KB size,
4193                  * enabled, latency 0x1, configured
4194                  */
4195                 msr_info->data = 0xbe702111;
4196                 break;
4197         case MSR_AMD64_OSVW_ID_LENGTH:
4198                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
4199                         return 1;
4200                 msr_info->data = vcpu->arch.osvw.length;
4201                 break;
4202         case MSR_AMD64_OSVW_STATUS:
4203                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
4204                         return 1;
4205                 msr_info->data = vcpu->arch.osvw.status;
4206                 break;
4207         case MSR_PLATFORM_INFO:
4208                 if (!msr_info->host_initiated &&
4209                     !vcpu->kvm->arch.guest_can_read_msr_platform_info)
4210                         return 1;
4211                 msr_info->data = vcpu->arch.msr_platform_info;
4212                 break;
4213         case MSR_MISC_FEATURES_ENABLES:
4214                 msr_info->data = vcpu->arch.msr_misc_features_enables;
4215                 break;
4216         case MSR_K7_HWCR:
4217                 msr_info->data = vcpu->arch.msr_hwcr;
4218                 break;
4219 #ifdef CONFIG_X86_64
4220         case MSR_IA32_XFD:
4221                 if (!msr_info->host_initiated &&
4222                     !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
4223                         return 1;
4224
4225                 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd;
4226                 break;
4227         case MSR_IA32_XFD_ERR:
4228                 if (!msr_info->host_initiated &&
4229                     !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
4230                         return 1;
4231
4232                 msr_info->data = vcpu->arch.guest_fpu.xfd_err;
4233                 break;
4234 #endif
4235         default:
4236                 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4237                         return kvm_pmu_get_msr(vcpu, msr_info);
4238                 return KVM_MSR_RET_INVALID;
4239         }
4240         return 0;
4241 }
4242 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
4243
4244 /*
4245  * Read or write a bunch of msrs. All parameters are kernel addresses.
4246  *
4247  * @return number of msrs set successfully.
4248  */
4249 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
4250                     struct kvm_msr_entry *entries,
4251                     int (*do_msr)(struct kvm_vcpu *vcpu,
4252                                   unsigned index, u64 *data))
4253 {
4254         int i;
4255
4256         for (i = 0; i < msrs->nmsrs; ++i)
4257                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
4258                         break;
4259
4260         return i;
4261 }
4262
4263 /*
4264  * Read or write a bunch of msrs. Parameters are user addresses.
4265  *
4266  * @return number of msrs set successfully.
4267  */
4268 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
4269                   int (*do_msr)(struct kvm_vcpu *vcpu,
4270                                 unsigned index, u64 *data),
4271                   int writeback)
4272 {
4273         struct kvm_msrs msrs;
4274         struct kvm_msr_entry *entries;
4275         int r, n;
4276         unsigned size;
4277
4278         r = -EFAULT;
4279         if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
4280                 goto out;
4281
4282         r = -E2BIG;
4283         if (msrs.nmsrs >= MAX_IO_MSRS)
4284                 goto out;
4285
4286         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
4287         entries = memdup_user(user_msrs->entries, size);
4288         if (IS_ERR(entries)) {
4289                 r = PTR_ERR(entries);
4290                 goto out;
4291         }
4292
4293         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
4294         if (r < 0)
4295                 goto out_free;
4296
4297         r = -EFAULT;
4298         if (writeback && copy_to_user(user_msrs->entries, entries, size))
4299                 goto out_free;
4300
4301         r = n;
4302
4303 out_free:
4304         kfree(entries);
4305 out:
4306         return r;
4307 }
4308
4309 static inline bool kvm_can_mwait_in_guest(void)
4310 {
4311         return boot_cpu_has(X86_FEATURE_MWAIT) &&
4312                 !boot_cpu_has_bug(X86_BUG_MONITOR) &&
4313                 boot_cpu_has(X86_FEATURE_ARAT);
4314 }
4315
4316 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
4317                                             struct kvm_cpuid2 __user *cpuid_arg)
4318 {
4319         struct kvm_cpuid2 cpuid;
4320         int r;
4321
4322         r = -EFAULT;
4323         if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4324                 return r;
4325
4326         r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries);
4327         if (r)
4328                 return r;
4329
4330         r = -EFAULT;
4331         if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4332                 return r;
4333
4334         return 0;
4335 }
4336
4337 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
4338 {
4339         int r = 0;
4340
4341         switch (ext) {
4342         case KVM_CAP_IRQCHIP:
4343         case KVM_CAP_HLT:
4344         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
4345         case KVM_CAP_SET_TSS_ADDR:
4346         case KVM_CAP_EXT_CPUID:
4347         case KVM_CAP_EXT_EMUL_CPUID:
4348         case KVM_CAP_CLOCKSOURCE:
4349         case KVM_CAP_PIT:
4350         case KVM_CAP_NOP_IO_DELAY:
4351         case KVM_CAP_MP_STATE:
4352         case KVM_CAP_SYNC_MMU:
4353         case KVM_CAP_USER_NMI:
4354         case KVM_CAP_REINJECT_CONTROL:
4355         case KVM_CAP_IRQ_INJECT_STATUS:
4356         case KVM_CAP_IOEVENTFD:
4357         case KVM_CAP_IOEVENTFD_NO_LENGTH:
4358         case KVM_CAP_PIT2:
4359         case KVM_CAP_PIT_STATE2:
4360         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
4361         case KVM_CAP_VCPU_EVENTS:
4362         case KVM_CAP_HYPERV:
4363         case KVM_CAP_HYPERV_VAPIC:
4364         case KVM_CAP_HYPERV_SPIN:
4365         case KVM_CAP_HYPERV_SYNIC:
4366         case KVM_CAP_HYPERV_SYNIC2:
4367         case KVM_CAP_HYPERV_VP_INDEX:
4368         case KVM_CAP_HYPERV_EVENTFD:
4369         case KVM_CAP_HYPERV_TLBFLUSH:
4370         case KVM_CAP_HYPERV_SEND_IPI:
4371         case KVM_CAP_HYPERV_CPUID:
4372         case KVM_CAP_HYPERV_ENFORCE_CPUID:
4373         case KVM_CAP_SYS_HYPERV_CPUID:
4374         case KVM_CAP_PCI_SEGMENT:
4375         case KVM_CAP_DEBUGREGS:
4376         case KVM_CAP_X86_ROBUST_SINGLESTEP:
4377         case KVM_CAP_XSAVE:
4378         case KVM_CAP_ASYNC_PF:
4379         case KVM_CAP_ASYNC_PF_INT:
4380         case KVM_CAP_GET_TSC_KHZ:
4381         case KVM_CAP_KVMCLOCK_CTRL:
4382         case KVM_CAP_READONLY_MEM:
4383         case KVM_CAP_HYPERV_TIME:
4384         case KVM_CAP_IOAPIC_POLARITY_IGNORED:
4385         case KVM_CAP_TSC_DEADLINE_TIMER:
4386         case KVM_CAP_DISABLE_QUIRKS:
4387         case KVM_CAP_SET_BOOT_CPU_ID:
4388         case KVM_CAP_SPLIT_IRQCHIP:
4389         case KVM_CAP_IMMEDIATE_EXIT:
4390         case KVM_CAP_PMU_EVENT_FILTER:
4391         case KVM_CAP_GET_MSR_FEATURES:
4392         case KVM_CAP_MSR_PLATFORM_INFO:
4393         case KVM_CAP_EXCEPTION_PAYLOAD:
4394         case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
4395         case KVM_CAP_SET_GUEST_DEBUG:
4396         case KVM_CAP_LAST_CPU:
4397         case KVM_CAP_X86_USER_SPACE_MSR:
4398         case KVM_CAP_X86_MSR_FILTER:
4399         case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
4400 #ifdef CONFIG_X86_SGX_KVM
4401         case KVM_CAP_SGX_ATTRIBUTE:
4402 #endif
4403         case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
4404         case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
4405         case KVM_CAP_SREGS2:
4406         case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
4407         case KVM_CAP_VCPU_ATTRIBUTES:
4408         case KVM_CAP_SYS_ATTRIBUTES:
4409         case KVM_CAP_VAPIC:
4410         case KVM_CAP_ENABLE_CAP:
4411         case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
4412                 r = 1;
4413                 break;
4414         case KVM_CAP_EXIT_HYPERCALL:
4415                 r = KVM_EXIT_HYPERCALL_VALID_MASK;
4416                 break;
4417         case KVM_CAP_SET_GUEST_DEBUG2:
4418                 return KVM_GUESTDBG_VALID_MASK;
4419 #ifdef CONFIG_KVM_XEN
4420         case KVM_CAP_XEN_HVM:
4421                 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
4422                     KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
4423                     KVM_XEN_HVM_CONFIG_SHARED_INFO |
4424                     KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL |
4425                     KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
4426                 if (sched_info_on())
4427                         r |= KVM_XEN_HVM_CONFIG_RUNSTATE;
4428                 break;
4429 #endif
4430         case KVM_CAP_SYNC_REGS:
4431                 r = KVM_SYNC_X86_VALID_FIELDS;
4432                 break;
4433         case KVM_CAP_ADJUST_CLOCK:
4434                 r = KVM_CLOCK_VALID_FLAGS;
4435                 break;
4436         case KVM_CAP_X86_DISABLE_EXITS:
4437                 r |=  KVM_X86_DISABLE_EXITS_HLT | KVM_X86_DISABLE_EXITS_PAUSE |
4438                       KVM_X86_DISABLE_EXITS_CSTATE;
4439                 if(kvm_can_mwait_in_guest())
4440                         r |= KVM_X86_DISABLE_EXITS_MWAIT;
4441                 break;
4442         case KVM_CAP_X86_SMM:
4443                 /* SMBASE is usually relocated above 1M on modern chipsets,
4444                  * and SMM handlers might indeed rely on 4G segment limits,
4445                  * so do not report SMM to be available if real mode is
4446                  * emulated via vm86 mode.  Still, do not go to great lengths
4447                  * to avoid userspace's usage of the feature, because it is a
4448                  * fringe case that is not enabled except via specific settings
4449                  * of the module parameters.
4450                  */
4451                 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE);
4452                 break;
4453         case KVM_CAP_NR_VCPUS:
4454                 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
4455                 break;
4456         case KVM_CAP_MAX_VCPUS:
4457                 r = KVM_MAX_VCPUS;
4458                 break;
4459         case KVM_CAP_MAX_VCPU_ID:
4460                 r = KVM_MAX_VCPU_IDS;
4461                 break;
4462         case KVM_CAP_PV_MMU:    /* obsolete */
4463                 r = 0;
4464                 break;
4465         case KVM_CAP_MCE:
4466                 r = KVM_MAX_MCE_BANKS;
4467                 break;
4468         case KVM_CAP_XCRS:
4469                 r = boot_cpu_has(X86_FEATURE_XSAVE);
4470                 break;
4471         case KVM_CAP_TSC_CONTROL:
4472         case KVM_CAP_VM_TSC_CONTROL:
4473                 r = kvm_caps.has_tsc_control;
4474                 break;
4475         case KVM_CAP_X2APIC_API:
4476                 r = KVM_X2APIC_API_VALID_FLAGS;
4477                 break;
4478         case KVM_CAP_NESTED_STATE:
4479                 r = kvm_x86_ops.nested_ops->get_state ?
4480                         kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
4481                 break;
4482         case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
4483                 r = kvm_x86_ops.enable_direct_tlbflush != NULL;
4484                 break;
4485         case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
4486                 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
4487                 break;
4488         case KVM_CAP_SMALLER_MAXPHYADDR:
4489                 r = (int) allow_smaller_maxphyaddr;
4490                 break;
4491         case KVM_CAP_STEAL_TIME:
4492                 r = sched_info_on();
4493                 break;
4494         case KVM_CAP_X86_BUS_LOCK_EXIT:
4495                 if (kvm_caps.has_bus_lock_exit)
4496                         r = KVM_BUS_LOCK_DETECTION_OFF |
4497                             KVM_BUS_LOCK_DETECTION_EXIT;
4498                 else
4499                         r = 0;
4500                 break;
4501         case KVM_CAP_XSAVE2: {
4502                 u64 guest_perm = xstate_get_guest_group_perm();
4503
4504                 r = xstate_required_size(kvm_caps.supported_xcr0 & guest_perm, false);
4505                 if (r < sizeof(struct kvm_xsave))
4506                         r = sizeof(struct kvm_xsave);
4507                 break;
4508         }
4509         case KVM_CAP_PMU_CAPABILITY:
4510                 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0;
4511                 break;
4512         case KVM_CAP_DISABLE_QUIRKS2:
4513                 r = KVM_X86_VALID_QUIRKS;
4514                 break;
4515         case KVM_CAP_X86_NOTIFY_VMEXIT:
4516                 r = kvm_caps.has_notify_vmexit;
4517                 break;
4518         default:
4519                 break;
4520         }
4521         return r;
4522 }
4523
4524 static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr)
4525 {
4526         void __user *uaddr = (void __user*)(unsigned long)attr->addr;
4527
4528         if ((u64)(unsigned long)uaddr != attr->addr)
4529                 return ERR_PTR_USR(-EFAULT);
4530         return uaddr;
4531 }
4532
4533 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
4534 {
4535         u64 __user *uaddr = kvm_get_attr_addr(attr);
4536
4537         if (attr->group)
4538                 return -ENXIO;
4539
4540         if (IS_ERR(uaddr))
4541                 return PTR_ERR(uaddr);
4542
4543         switch (attr->attr) {
4544         case KVM_X86_XCOMP_GUEST_SUPP:
4545                 if (put_user(kvm_caps.supported_xcr0, uaddr))
4546                         return -EFAULT;
4547                 return 0;
4548         default:
4549                 return -ENXIO;
4550                 break;
4551         }
4552 }
4553
4554 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
4555 {
4556         if (attr->group)
4557                 return -ENXIO;
4558
4559         switch (attr->attr) {
4560         case KVM_X86_XCOMP_GUEST_SUPP:
4561                 return 0;
4562         default:
4563                 return -ENXIO;
4564         }
4565 }
4566
4567 long kvm_arch_dev_ioctl(struct file *filp,
4568                         unsigned int ioctl, unsigned long arg)
4569 {
4570         void __user *argp = (void __user *)arg;
4571         long r;
4572
4573         switch (ioctl) {
4574         case KVM_GET_MSR_INDEX_LIST: {
4575                 struct kvm_msr_list __user *user_msr_list = argp;
4576                 struct kvm_msr_list msr_list;
4577                 unsigned n;
4578
4579                 r = -EFAULT;
4580                 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
4581                         goto out;
4582                 n = msr_list.nmsrs;
4583                 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
4584                 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
4585                         goto out;
4586                 r = -E2BIG;
4587                 if (n < msr_list.nmsrs)
4588                         goto out;
4589                 r = -EFAULT;
4590                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
4591                                  num_msrs_to_save * sizeof(u32)))
4592                         goto out;
4593                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
4594                                  &emulated_msrs,
4595                                  num_emulated_msrs * sizeof(u32)))
4596                         goto out;
4597                 r = 0;
4598                 break;
4599         }
4600         case KVM_GET_SUPPORTED_CPUID:
4601         case KVM_GET_EMULATED_CPUID: {
4602                 struct kvm_cpuid2 __user *cpuid_arg = argp;
4603                 struct kvm_cpuid2 cpuid;
4604
4605                 r = -EFAULT;
4606                 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4607                         goto out;
4608
4609                 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
4610                                             ioctl);
4611                 if (r)
4612                         goto out;
4613
4614                 r = -EFAULT;
4615                 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4616                         goto out;
4617                 r = 0;
4618                 break;
4619         }
4620         case KVM_X86_GET_MCE_CAP_SUPPORTED:
4621                 r = -EFAULT;
4622                 if (copy_to_user(argp, &kvm_caps.supported_mce_cap,
4623                                  sizeof(kvm_caps.supported_mce_cap)))
4624                         goto out;
4625                 r = 0;
4626                 break;
4627         case KVM_GET_MSR_FEATURE_INDEX_LIST: {
4628                 struct kvm_msr_list __user *user_msr_list = argp;
4629                 struct kvm_msr_list msr_list;
4630                 unsigned int n;
4631
4632                 r = -EFAULT;
4633                 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
4634                         goto out;
4635                 n = msr_list.nmsrs;
4636                 msr_list.nmsrs = num_msr_based_features;
4637                 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
4638                         goto out;
4639                 r = -E2BIG;
4640                 if (n < msr_list.nmsrs)
4641                         goto out;
4642                 r = -EFAULT;
4643                 if (copy_to_user(user_msr_list->indices, &msr_based_features,
4644                                  num_msr_based_features * sizeof(u32)))
4645                         goto out;
4646                 r = 0;
4647                 break;
4648         }
4649         case KVM_GET_MSRS:
4650                 r = msr_io(NULL, argp, do_get_msr_feature, 1);
4651                 break;
4652         case KVM_GET_SUPPORTED_HV_CPUID:
4653                 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
4654                 break;
4655         case KVM_GET_DEVICE_ATTR: {
4656                 struct kvm_device_attr attr;
4657                 r = -EFAULT;
4658                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4659                         break;
4660                 r = kvm_x86_dev_get_attr(&attr);
4661                 break;
4662         }
4663         case KVM_HAS_DEVICE_ATTR: {
4664                 struct kvm_device_attr attr;
4665                 r = -EFAULT;
4666                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4667                         break;
4668                 r = kvm_x86_dev_has_attr(&attr);
4669                 break;
4670         }
4671         default:
4672                 r = -EINVAL;
4673                 break;
4674         }
4675 out:
4676         return r;
4677 }
4678
4679 static void wbinvd_ipi(void *garbage)
4680 {
4681         wbinvd();
4682 }
4683
4684 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
4685 {
4686         return kvm_arch_has_noncoherent_dma(vcpu->kvm);
4687 }
4688
4689 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
4690 {
4691         /* Address WBINVD may be executed by guest */
4692         if (need_emulate_wbinvd(vcpu)) {
4693                 if (static_call(kvm_x86_has_wbinvd_exit)())
4694                         cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4695                 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
4696                         smp_call_function_single(vcpu->cpu,
4697                                         wbinvd_ipi, NULL, 1);
4698         }
4699
4700         static_call(kvm_x86_vcpu_load)(vcpu, cpu);
4701
4702         /* Save host pkru register if supported */
4703         vcpu->arch.host_pkru = read_pkru();
4704
4705         /* Apply any externally detected TSC adjustments (due to suspend) */
4706         if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
4707                 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
4708                 vcpu->arch.tsc_offset_adjustment = 0;
4709                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4710         }
4711
4712         if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
4713                 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
4714                                 rdtsc() - vcpu->arch.last_host_tsc;
4715                 if (tsc_delta < 0)
4716                         mark_tsc_unstable("KVM discovered backwards TSC");
4717
4718                 if (kvm_check_tsc_unstable()) {
4719                         u64 offset = kvm_compute_l1_tsc_offset(vcpu,
4720                                                 vcpu->arch.last_guest_tsc);
4721                         kvm_vcpu_write_tsc_offset(vcpu, offset);
4722                         vcpu->arch.tsc_catchup = 1;
4723                 }
4724
4725                 if (kvm_lapic_hv_timer_in_use(vcpu))
4726                         kvm_lapic_restart_hv_timer(vcpu);
4727
4728                 /*
4729                  * On a host with synchronized TSC, there is no need to update
4730                  * kvmclock on vcpu->cpu migration
4731                  */
4732                 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
4733                         kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
4734                 if (vcpu->cpu != cpu)
4735                         kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
4736                 vcpu->cpu = cpu;
4737         }
4738
4739         kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
4740 }
4741
4742 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
4743 {
4744         struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
4745         struct kvm_steal_time __user *st;
4746         struct kvm_memslots *slots;
4747         static const u8 preempted = KVM_VCPU_PREEMPTED;
4748         gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
4749
4750         /*
4751          * The vCPU can be marked preempted if and only if the VM-Exit was on
4752          * an instruction boundary and will not trigger guest emulation of any
4753          * kind (see vcpu_run).  Vendor specific code controls (conservatively)
4754          * when this is true, for example allowing the vCPU to be marked
4755          * preempted if and only if the VM-Exit was due to a host interrupt.
4756          */
4757         if (!vcpu->arch.at_instruction_boundary) {
4758                 vcpu->stat.preemption_other++;
4759                 return;
4760         }
4761
4762         vcpu->stat.preemption_reported++;
4763         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
4764                 return;
4765
4766         if (vcpu->arch.st.preempted)
4767                 return;
4768
4769         /* This happens on process exit */
4770         if (unlikely(current->mm != vcpu->kvm->mm))
4771                 return;
4772
4773         slots = kvm_memslots(vcpu->kvm);
4774
4775         if (unlikely(slots->generation != ghc->generation ||
4776                      gpa != ghc->gpa ||
4777                      kvm_is_error_hva(ghc->hva) || !ghc->memslot))
4778                 return;
4779
4780         st = (struct kvm_steal_time __user *)ghc->hva;
4781         BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted));
4782
4783         if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted)))
4784                 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
4785
4786         mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
4787 }
4788
4789 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
4790 {
4791         int idx;
4792
4793         if (vcpu->preempted) {
4794                 if (!vcpu->arch.guest_state_protected)
4795                         vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
4796
4797                 /*
4798                  * Take the srcu lock as memslots will be accessed to check the gfn
4799                  * cache generation against the memslots generation.
4800                  */
4801                 idx = srcu_read_lock(&vcpu->kvm->srcu);
4802                 if (kvm_xen_msr_enabled(vcpu->kvm))
4803                         kvm_xen_runstate_set_preempted(vcpu);
4804                 else
4805                         kvm_steal_time_set_preempted(vcpu);
4806                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4807         }
4808
4809         static_call(kvm_x86_vcpu_put)(vcpu);
4810         vcpu->arch.last_host_tsc = rdtsc();
4811 }
4812
4813 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
4814                                     struct kvm_lapic_state *s)
4815 {
4816         static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
4817
4818         return kvm_apic_get_state(vcpu, s);
4819 }
4820
4821 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
4822                                     struct kvm_lapic_state *s)
4823 {
4824         int r;
4825
4826         r = kvm_apic_set_state(vcpu, s);
4827         if (r)
4828                 return r;
4829         update_cr8_intercept(vcpu);
4830
4831         return 0;
4832 }
4833
4834 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
4835 {
4836         /*
4837          * We can accept userspace's request for interrupt injection
4838          * as long as we have a place to store the interrupt number.
4839          * The actual injection will happen when the CPU is able to
4840          * deliver the interrupt.
4841          */
4842         if (kvm_cpu_has_extint(vcpu))
4843                 return false;
4844
4845         /* Acknowledging ExtINT does not happen if LINT0 is masked.  */
4846         return (!lapic_in_kernel(vcpu) ||
4847                 kvm_apic_accept_pic_intr(vcpu));
4848 }
4849
4850 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
4851 {
4852         /*
4853          * Do not cause an interrupt window exit if an exception
4854          * is pending or an event needs reinjection; userspace
4855          * might want to inject the interrupt manually using KVM_SET_REGS
4856          * or KVM_SET_SREGS.  For that to work, we must be at an
4857          * instruction boundary and with no events half-injected.
4858          */
4859         return (kvm_arch_interrupt_allowed(vcpu) &&
4860                 kvm_cpu_accept_dm_intr(vcpu) &&
4861                 !kvm_event_needs_reinjection(vcpu) &&
4862                 !kvm_is_exception_pending(vcpu));
4863 }
4864
4865 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
4866                                     struct kvm_interrupt *irq)
4867 {
4868         if (irq->irq >= KVM_NR_INTERRUPTS)
4869                 return -EINVAL;
4870
4871         if (!irqchip_in_kernel(vcpu->kvm)) {
4872                 kvm_queue_interrupt(vcpu, irq->irq, false);
4873                 kvm_make_request(KVM_REQ_EVENT, vcpu);
4874                 return 0;
4875         }
4876
4877         /*
4878          * With in-kernel LAPIC, we only use this to inject EXTINT, so
4879          * fail for in-kernel 8259.
4880          */
4881         if (pic_in_kernel(vcpu->kvm))
4882                 return -ENXIO;
4883
4884         if (vcpu->arch.pending_external_vector != -1)
4885                 return -EEXIST;
4886
4887         vcpu->arch.pending_external_vector = irq->irq;
4888         kvm_make_request(KVM_REQ_EVENT, vcpu);
4889         return 0;
4890 }
4891
4892 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
4893 {
4894         kvm_inject_nmi(vcpu);
4895
4896         return 0;
4897 }
4898
4899 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
4900 {
4901         kvm_make_request(KVM_REQ_SMI, vcpu);
4902
4903         return 0;
4904 }
4905
4906 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
4907                                            struct kvm_tpr_access_ctl *tac)
4908 {
4909         if (tac->flags)
4910                 return -EINVAL;
4911         vcpu->arch.tpr_access_reporting = !!tac->enabled;
4912         return 0;
4913 }
4914
4915 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
4916                                         u64 mcg_cap)
4917 {
4918         int r;
4919         unsigned bank_num = mcg_cap & 0xff, bank;
4920
4921         r = -EINVAL;
4922         if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
4923                 goto out;
4924         if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000))
4925                 goto out;
4926         r = 0;
4927         vcpu->arch.mcg_cap = mcg_cap;
4928         /* Init IA32_MCG_CTL to all 1s */
4929         if (mcg_cap & MCG_CTL_P)
4930                 vcpu->arch.mcg_ctl = ~(u64)0;
4931         /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */
4932         for (bank = 0; bank < bank_num; bank++) {
4933                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
4934                 if (mcg_cap & MCG_CMCI_P)
4935                         vcpu->arch.mci_ctl2_banks[bank] = 0;
4936         }
4937
4938         kvm_apic_after_set_mcg_cap(vcpu);
4939
4940         static_call(kvm_x86_setup_mce)(vcpu);
4941 out:
4942         return r;
4943 }
4944
4945 /*
4946  * Validate this is an UCNA (uncorrectable no action) error by checking the
4947  * MCG_STATUS and MCi_STATUS registers:
4948  * - none of the bits for Machine Check Exceptions are set
4949  * - both the VAL (valid) and UC (uncorrectable) bits are set
4950  * MCI_STATUS_PCC - Processor Context Corrupted
4951  * MCI_STATUS_S - Signaled as a Machine Check Exception
4952  * MCI_STATUS_AR - Software recoverable Action Required
4953  */
4954 static bool is_ucna(struct kvm_x86_mce *mce)
4955 {
4956         return  !mce->mcg_status &&
4957                 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) &&
4958                 (mce->status & MCI_STATUS_VAL) &&
4959                 (mce->status & MCI_STATUS_UC);
4960 }
4961
4962 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks)
4963 {
4964         u64 mcg_cap = vcpu->arch.mcg_cap;
4965
4966         banks[1] = mce->status;
4967         banks[2] = mce->addr;
4968         banks[3] = mce->misc;
4969         vcpu->arch.mcg_status = mce->mcg_status;
4970
4971         if (!(mcg_cap & MCG_CMCI_P) ||
4972             !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN))
4973                 return 0;
4974
4975         if (lapic_in_kernel(vcpu))
4976                 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI);
4977
4978         return 0;
4979 }
4980
4981 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
4982                                       struct kvm_x86_mce *mce)
4983 {
4984         u64 mcg_cap = vcpu->arch.mcg_cap;
4985         unsigned bank_num = mcg_cap & 0xff;
4986         u64 *banks = vcpu->arch.mce_banks;
4987
4988         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
4989                 return -EINVAL;
4990
4991         banks += array_index_nospec(4 * mce->bank, 4 * bank_num);
4992
4993         if (is_ucna(mce))
4994                 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks);
4995
4996         /*
4997          * if IA32_MCG_CTL is not all 1s, the uncorrected error
4998          * reporting is disabled
4999          */
5000         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
5001             vcpu->arch.mcg_ctl != ~(u64)0)
5002                 return 0;
5003         /*
5004          * if IA32_MCi_CTL is not all 1s, the uncorrected error
5005          * reporting is disabled for the bank
5006          */
5007         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
5008                 return 0;
5009         if (mce->status & MCI_STATUS_UC) {
5010                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
5011                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
5012                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5013                         return 0;
5014                 }
5015                 if (banks[1] & MCI_STATUS_VAL)
5016                         mce->status |= MCI_STATUS_OVER;
5017                 banks[2] = mce->addr;
5018                 banks[3] = mce->misc;
5019                 vcpu->arch.mcg_status = mce->mcg_status;
5020                 banks[1] = mce->status;
5021                 kvm_queue_exception(vcpu, MC_VECTOR);
5022         } else if (!(banks[1] & MCI_STATUS_VAL)
5023                    || !(banks[1] & MCI_STATUS_UC)) {
5024                 if (banks[1] & MCI_STATUS_VAL)
5025                         mce->status |= MCI_STATUS_OVER;
5026                 banks[2] = mce->addr;
5027                 banks[3] = mce->misc;
5028                 banks[1] = mce->status;
5029         } else
5030                 banks[1] |= MCI_STATUS_OVER;
5031         return 0;
5032 }
5033
5034 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
5035                                                struct kvm_vcpu_events *events)
5036 {
5037         struct kvm_queued_exception *ex;
5038
5039         process_nmi(vcpu);
5040
5041         if (kvm_check_request(KVM_REQ_SMI, vcpu))
5042                 process_smi(vcpu);
5043
5044         /*
5045          * KVM's ABI only allows for one exception to be migrated.  Luckily,
5046          * the only time there can be two queued exceptions is if there's a
5047          * non-exiting _injected_ exception, and a pending exiting exception.
5048          * In that case, ignore the VM-Exiting exception as it's an extension
5049          * of the injected exception.
5050          */
5051         if (vcpu->arch.exception_vmexit.pending &&
5052             !vcpu->arch.exception.pending &&
5053             !vcpu->arch.exception.injected)
5054                 ex = &vcpu->arch.exception_vmexit;
5055         else
5056                 ex = &vcpu->arch.exception;
5057
5058         /*
5059          * In guest mode, payload delivery should be deferred if the exception
5060          * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1
5061          * intercepts #PF, ditto for DR6 and #DBs.  If the per-VM capability,
5062          * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not
5063          * propagate the payload and so it cannot be safely deferred.  Deliver
5064          * the payload if the capability hasn't been requested.
5065          */
5066         if (!vcpu->kvm->arch.exception_payload_enabled &&
5067             ex->pending && ex->has_payload)
5068                 kvm_deliver_exception_payload(vcpu, ex);
5069
5070         /*
5071          * The API doesn't provide the instruction length for software
5072          * exceptions, so don't report them. As long as the guest RIP
5073          * isn't advanced, we should expect to encounter the exception
5074          * again.
5075          */
5076         if (kvm_exception_is_soft(ex->vector)) {
5077                 events->exception.injected = 0;
5078                 events->exception.pending = 0;
5079         } else {
5080                 events->exception.injected = ex->injected;
5081                 events->exception.pending = ex->pending;
5082                 /*
5083                  * For ABI compatibility, deliberately conflate
5084                  * pending and injected exceptions when
5085                  * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
5086                  */
5087                 if (!vcpu->kvm->arch.exception_payload_enabled)
5088                         events->exception.injected |= ex->pending;
5089         }
5090         events->exception.nr = ex->vector;
5091         events->exception.has_error_code = ex->has_error_code;
5092         events->exception.error_code = ex->error_code;
5093         events->exception_has_payload = ex->has_payload;
5094         events->exception_payload = ex->payload;
5095
5096         events->interrupt.injected =
5097                 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
5098         events->interrupt.nr = vcpu->arch.interrupt.nr;
5099         events->interrupt.soft = 0;
5100         events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
5101
5102         events->nmi.injected = vcpu->arch.nmi_injected;
5103         events->nmi.pending = vcpu->arch.nmi_pending != 0;
5104         events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu);
5105         events->nmi.pad = 0;
5106
5107         events->sipi_vector = 0; /* never valid when reporting to user space */
5108
5109         events->smi.smm = is_smm(vcpu);
5110         events->smi.pending = vcpu->arch.smi_pending;
5111         events->smi.smm_inside_nmi =
5112                 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
5113         events->smi.latched_init = kvm_lapic_latched_init(vcpu);
5114
5115         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
5116                          | KVM_VCPUEVENT_VALID_SHADOW
5117                          | KVM_VCPUEVENT_VALID_SMM);
5118         if (vcpu->kvm->arch.exception_payload_enabled)
5119                 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
5120         if (vcpu->kvm->arch.triple_fault_event) {
5121                 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5122                 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
5123         }
5124
5125         memset(&events->reserved, 0, sizeof(events->reserved));
5126 }
5127
5128 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
5129
5130 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
5131                                               struct kvm_vcpu_events *events)
5132 {
5133         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
5134                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
5135                               | KVM_VCPUEVENT_VALID_SHADOW
5136                               | KVM_VCPUEVENT_VALID_SMM
5137                               | KVM_VCPUEVENT_VALID_PAYLOAD
5138                               | KVM_VCPUEVENT_VALID_TRIPLE_FAULT))
5139                 return -EINVAL;
5140
5141         if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
5142                 if (!vcpu->kvm->arch.exception_payload_enabled)
5143                         return -EINVAL;
5144                 if (events->exception.pending)
5145                         events->exception.injected = 0;
5146                 else
5147                         events->exception_has_payload = 0;
5148         } else {
5149                 events->exception.pending = 0;
5150                 events->exception_has_payload = 0;
5151         }
5152
5153         if ((events->exception.injected || events->exception.pending) &&
5154             (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
5155                 return -EINVAL;
5156
5157         /* INITs are latched while in SMM */
5158         if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
5159             (events->smi.smm || events->smi.pending) &&
5160             vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
5161                 return -EINVAL;
5162
5163         process_nmi(vcpu);
5164
5165         /*
5166          * Flag that userspace is stuffing an exception, the next KVM_RUN will
5167          * morph the exception to a VM-Exit if appropriate.  Do this only for
5168          * pending exceptions, already-injected exceptions are not subject to
5169          * intercpetion.  Note, userspace that conflates pending and injected
5170          * is hosed, and will incorrectly convert an injected exception into a
5171          * pending exception, which in turn may cause a spurious VM-Exit.
5172          */
5173         vcpu->arch.exception_from_userspace = events->exception.pending;
5174
5175         vcpu->arch.exception_vmexit.pending = false;
5176
5177         vcpu->arch.exception.injected = events->exception.injected;
5178         vcpu->arch.exception.pending = events->exception.pending;
5179         vcpu->arch.exception.vector = events->exception.nr;
5180         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
5181         vcpu->arch.exception.error_code = events->exception.error_code;
5182         vcpu->arch.exception.has_payload = events->exception_has_payload;
5183         vcpu->arch.exception.payload = events->exception_payload;
5184
5185         vcpu->arch.interrupt.injected = events->interrupt.injected;
5186         vcpu->arch.interrupt.nr = events->interrupt.nr;
5187         vcpu->arch.interrupt.soft = events->interrupt.soft;
5188         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
5189                 static_call(kvm_x86_set_interrupt_shadow)(vcpu,
5190                                                 events->interrupt.shadow);
5191
5192         vcpu->arch.nmi_injected = events->nmi.injected;
5193         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
5194                 vcpu->arch.nmi_pending = events->nmi.pending;
5195         static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
5196
5197         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
5198             lapic_in_kernel(vcpu))
5199                 vcpu->arch.apic->sipi_vector = events->sipi_vector;
5200
5201         if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
5202                 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
5203                         kvm_x86_ops.nested_ops->leave_nested(vcpu);
5204                         kvm_smm_changed(vcpu, events->smi.smm);
5205                 }
5206
5207                 vcpu->arch.smi_pending = events->smi.pending;
5208
5209                 if (events->smi.smm) {
5210                         if (events->smi.smm_inside_nmi)
5211                                 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
5212                         else
5213                                 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
5214                 }
5215
5216                 if (lapic_in_kernel(vcpu)) {
5217                         if (events->smi.latched_init)
5218                                 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5219                         else
5220                                 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5221                 }
5222         }
5223
5224         if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
5225                 if (!vcpu->kvm->arch.triple_fault_event)
5226                         return -EINVAL;
5227                 if (events->triple_fault.pending)
5228                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5229                 else
5230                         kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5231         }
5232
5233         kvm_make_request(KVM_REQ_EVENT, vcpu);
5234
5235         return 0;
5236 }
5237
5238 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
5239                                              struct kvm_debugregs *dbgregs)
5240 {
5241         unsigned long val;
5242
5243         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
5244         kvm_get_dr(vcpu, 6, &val);
5245         dbgregs->dr6 = val;
5246         dbgregs->dr7 = vcpu->arch.dr7;
5247         dbgregs->flags = 0;
5248         memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
5249 }
5250
5251 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
5252                                             struct kvm_debugregs *dbgregs)
5253 {
5254         if (dbgregs->flags)
5255                 return -EINVAL;
5256
5257         if (!kvm_dr6_valid(dbgregs->dr6))
5258                 return -EINVAL;
5259         if (!kvm_dr7_valid(dbgregs->dr7))
5260                 return -EINVAL;
5261
5262         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
5263         kvm_update_dr0123(vcpu);
5264         vcpu->arch.dr6 = dbgregs->dr6;
5265         vcpu->arch.dr7 = dbgregs->dr7;
5266         kvm_update_dr7(vcpu);
5267
5268         return 0;
5269 }
5270
5271 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
5272                                          struct kvm_xsave *guest_xsave)
5273 {
5274         if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5275                 return;
5276
5277         fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
5278                                        guest_xsave->region,
5279                                        sizeof(guest_xsave->region),
5280                                        vcpu->arch.pkru);
5281 }
5282
5283 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
5284                                           u8 *state, unsigned int size)
5285 {
5286         if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5287                 return;
5288
5289         fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
5290                                        state, size, vcpu->arch.pkru);
5291 }
5292
5293 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
5294                                         struct kvm_xsave *guest_xsave)
5295 {
5296         if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5297                 return 0;
5298
5299         return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
5300                                               guest_xsave->region,
5301                                               kvm_caps.supported_xcr0,
5302                                               &vcpu->arch.pkru);
5303 }
5304
5305 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
5306                                         struct kvm_xcrs *guest_xcrs)
5307 {
5308         if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
5309                 guest_xcrs->nr_xcrs = 0;
5310                 return;
5311         }
5312
5313         guest_xcrs->nr_xcrs = 1;
5314         guest_xcrs->flags = 0;
5315         guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
5316         guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
5317 }
5318
5319 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
5320                                        struct kvm_xcrs *guest_xcrs)
5321 {
5322         int i, r = 0;
5323
5324         if (!boot_cpu_has(X86_FEATURE_XSAVE))
5325                 return -EINVAL;
5326
5327         if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
5328                 return -EINVAL;
5329
5330         for (i = 0; i < guest_xcrs->nr_xcrs; i++)
5331                 /* Only support XCR0 currently */
5332                 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
5333                         r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
5334                                 guest_xcrs->xcrs[i].value);
5335                         break;
5336                 }
5337         if (r)
5338                 r = -EINVAL;
5339         return r;
5340 }
5341
5342 /*
5343  * kvm_set_guest_paused() indicates to the guest kernel that it has been
5344  * stopped by the hypervisor.  This function will be called from the host only.
5345  * EINVAL is returned when the host attempts to set the flag for a guest that
5346  * does not support pv clocks.
5347  */
5348 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
5349 {
5350         if (!vcpu->arch.pv_time.active)
5351                 return -EINVAL;
5352         vcpu->arch.pvclock_set_guest_stopped_request = true;
5353         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5354         return 0;
5355 }
5356
5357 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
5358                                  struct kvm_device_attr *attr)
5359 {
5360         int r;
5361
5362         switch (attr->attr) {
5363         case KVM_VCPU_TSC_OFFSET:
5364                 r = 0;
5365                 break;
5366         default:
5367                 r = -ENXIO;
5368         }
5369
5370         return r;
5371 }
5372
5373 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
5374                                  struct kvm_device_attr *attr)
5375 {
5376         u64 __user *uaddr = kvm_get_attr_addr(attr);
5377         int r;
5378
5379         if (IS_ERR(uaddr))
5380                 return PTR_ERR(uaddr);
5381
5382         switch (attr->attr) {
5383         case KVM_VCPU_TSC_OFFSET:
5384                 r = -EFAULT;
5385                 if (put_user(vcpu->arch.l1_tsc_offset, uaddr))
5386                         break;
5387                 r = 0;
5388                 break;
5389         default:
5390                 r = -ENXIO;
5391         }
5392
5393         return r;
5394 }
5395
5396 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
5397                                  struct kvm_device_attr *attr)
5398 {
5399         u64 __user *uaddr = kvm_get_attr_addr(attr);
5400         struct kvm *kvm = vcpu->kvm;
5401         int r;
5402
5403         if (IS_ERR(uaddr))
5404                 return PTR_ERR(uaddr);
5405
5406         switch (attr->attr) {
5407         case KVM_VCPU_TSC_OFFSET: {
5408                 u64 offset, tsc, ns;
5409                 unsigned long flags;
5410                 bool matched;
5411
5412                 r = -EFAULT;
5413                 if (get_user(offset, uaddr))
5414                         break;
5415
5416                 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
5417
5418                 matched = (vcpu->arch.virtual_tsc_khz &&
5419                            kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
5420                            kvm->arch.last_tsc_offset == offset);
5421
5422                 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
5423                 ns = get_kvmclock_base_ns();
5424
5425                 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched);
5426                 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
5427
5428                 r = 0;
5429                 break;
5430         }
5431         default:
5432                 r = -ENXIO;
5433         }
5434
5435         return r;
5436 }
5437
5438 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
5439                                       unsigned int ioctl,
5440                                       void __user *argp)
5441 {
5442         struct kvm_device_attr attr;
5443         int r;
5444
5445         if (copy_from_user(&attr, argp, sizeof(attr)))
5446                 return -EFAULT;
5447
5448         if (attr.group != KVM_VCPU_TSC_CTRL)
5449                 return -ENXIO;
5450
5451         switch (ioctl) {
5452         case KVM_HAS_DEVICE_ATTR:
5453                 r = kvm_arch_tsc_has_attr(vcpu, &attr);
5454                 break;
5455         case KVM_GET_DEVICE_ATTR:
5456                 r = kvm_arch_tsc_get_attr(vcpu, &attr);
5457                 break;
5458         case KVM_SET_DEVICE_ATTR:
5459                 r = kvm_arch_tsc_set_attr(vcpu, &attr);
5460                 break;
5461         }
5462
5463         return r;
5464 }
5465
5466 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5467                                      struct kvm_enable_cap *cap)
5468 {
5469         int r;
5470         uint16_t vmcs_version;
5471         void __user *user_ptr;
5472
5473         if (cap->flags)
5474                 return -EINVAL;
5475
5476         switch (cap->cap) {
5477         case KVM_CAP_HYPERV_SYNIC2:
5478                 if (cap->args[0])
5479                         return -EINVAL;
5480                 fallthrough;
5481
5482         case KVM_CAP_HYPERV_SYNIC:
5483                 if (!irqchip_in_kernel(vcpu->kvm))
5484                         return -EINVAL;
5485                 return kvm_hv_activate_synic(vcpu, cap->cap ==
5486                                              KVM_CAP_HYPERV_SYNIC2);
5487         case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
5488                 if (!kvm_x86_ops.nested_ops->enable_evmcs)
5489                         return -ENOTTY;
5490                 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
5491                 if (!r) {
5492                         user_ptr = (void __user *)(uintptr_t)cap->args[0];
5493                         if (copy_to_user(user_ptr, &vmcs_version,
5494                                          sizeof(vmcs_version)))
5495                                 r = -EFAULT;
5496                 }
5497                 return r;
5498         case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
5499                 if (!kvm_x86_ops.enable_direct_tlbflush)
5500                         return -ENOTTY;
5501
5502                 return static_call(kvm_x86_enable_direct_tlbflush)(vcpu);
5503
5504         case KVM_CAP_HYPERV_ENFORCE_CPUID:
5505                 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
5506
5507         case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
5508                 vcpu->arch.pv_cpuid.enforce = cap->args[0];
5509                 if (vcpu->arch.pv_cpuid.enforce)
5510                         kvm_update_pv_runtime(vcpu);
5511
5512                 return 0;
5513         default:
5514                 return -EINVAL;
5515         }
5516 }
5517
5518 long kvm_arch_vcpu_ioctl(struct file *filp,
5519                          unsigned int ioctl, unsigned long arg)
5520 {
5521         struct kvm_vcpu *vcpu = filp->private_data;
5522         void __user *argp = (void __user *)arg;
5523         int r;
5524         union {
5525                 struct kvm_sregs2 *sregs2;
5526                 struct kvm_lapic_state *lapic;
5527                 struct kvm_xsave *xsave;
5528                 struct kvm_xcrs *xcrs;
5529                 void *buffer;
5530         } u;
5531
5532         vcpu_load(vcpu);
5533
5534         u.buffer = NULL;
5535         switch (ioctl) {
5536         case KVM_GET_LAPIC: {
5537                 r = -EINVAL;
5538                 if (!lapic_in_kernel(vcpu))
5539                         goto out;
5540                 u.lapic = kzalloc(sizeof(struct kvm_lapic_state),
5541                                 GFP_KERNEL_ACCOUNT);
5542
5543                 r = -ENOMEM;
5544                 if (!u.lapic)
5545                         goto out;
5546                 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
5547                 if (r)
5548                         goto out;
5549                 r = -EFAULT;
5550                 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
5551                         goto out;
5552                 r = 0;
5553                 break;
5554         }
5555         case KVM_SET_LAPIC: {
5556                 r = -EINVAL;
5557                 if (!lapic_in_kernel(vcpu))
5558                         goto out;
5559                 u.lapic = memdup_user(argp, sizeof(*u.lapic));
5560                 if (IS_ERR(u.lapic)) {
5561                         r = PTR_ERR(u.lapic);
5562                         goto out_nofree;
5563                 }
5564
5565                 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
5566                 break;
5567         }
5568         case KVM_INTERRUPT: {
5569                 struct kvm_interrupt irq;
5570
5571                 r = -EFAULT;
5572                 if (copy_from_user(&irq, argp, sizeof(irq)))
5573                         goto out;
5574                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
5575                 break;
5576         }
5577         case KVM_NMI: {
5578                 r = kvm_vcpu_ioctl_nmi(vcpu);
5579                 break;
5580         }
5581         case KVM_SMI: {
5582                 r = kvm_vcpu_ioctl_smi(vcpu);
5583                 break;
5584         }
5585         case KVM_SET_CPUID: {
5586                 struct kvm_cpuid __user *cpuid_arg = argp;
5587                 struct kvm_cpuid cpuid;
5588
5589                 r = -EFAULT;
5590                 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5591                         goto out;
5592                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
5593                 break;
5594         }
5595         case KVM_SET_CPUID2: {
5596                 struct kvm_cpuid2 __user *cpuid_arg = argp;
5597                 struct kvm_cpuid2 cpuid;
5598
5599                 r = -EFAULT;
5600                 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5601                         goto out;
5602                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
5603                                               cpuid_arg->entries);
5604                 break;
5605         }
5606         case KVM_GET_CPUID2: {
5607                 struct kvm_cpuid2 __user *cpuid_arg = argp;
5608                 struct kvm_cpuid2 cpuid;
5609
5610                 r = -EFAULT;
5611                 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5612                         goto out;
5613                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
5614                                               cpuid_arg->entries);
5615                 if (r)
5616                         goto out;
5617                 r = -EFAULT;
5618                 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
5619                         goto out;
5620                 r = 0;
5621                 break;
5622         }
5623         case KVM_GET_MSRS: {
5624                 int idx = srcu_read_lock(&vcpu->kvm->srcu);
5625                 r = msr_io(vcpu, argp, do_get_msr, 1);
5626                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5627                 break;
5628         }
5629         case KVM_SET_MSRS: {
5630                 int idx = srcu_read_lock(&vcpu->kvm->srcu);
5631                 r = msr_io(vcpu, argp, do_set_msr, 0);
5632                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5633                 break;
5634         }
5635         case KVM_TPR_ACCESS_REPORTING: {
5636                 struct kvm_tpr_access_ctl tac;
5637
5638                 r = -EFAULT;
5639                 if (copy_from_user(&tac, argp, sizeof(tac)))
5640                         goto out;
5641                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
5642                 if (r)
5643                         goto out;
5644                 r = -EFAULT;
5645                 if (copy_to_user(argp, &tac, sizeof(tac)))
5646                         goto out;
5647                 r = 0;
5648                 break;
5649         };
5650         case KVM_SET_VAPIC_ADDR: {
5651                 struct kvm_vapic_addr va;
5652                 int idx;
5653
5654                 r = -EINVAL;
5655                 if (!lapic_in_kernel(vcpu))
5656                         goto out;
5657                 r = -EFAULT;
5658                 if (copy_from_user(&va, argp, sizeof(va)))
5659                         goto out;
5660                 idx = srcu_read_lock(&vcpu->kvm->srcu);
5661                 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
5662                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5663                 break;
5664         }
5665         case KVM_X86_SETUP_MCE: {
5666                 u64 mcg_cap;
5667
5668                 r = -EFAULT;
5669                 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
5670                         goto out;
5671                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
5672                 break;
5673         }
5674         case KVM_X86_SET_MCE: {
5675                 struct kvm_x86_mce mce;
5676
5677                 r = -EFAULT;
5678                 if (copy_from_user(&mce, argp, sizeof(mce)))
5679                         goto out;
5680                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
5681                 break;
5682         }
5683         case KVM_GET_VCPU_EVENTS: {
5684                 struct kvm_vcpu_events events;
5685
5686                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
5687
5688                 r = -EFAULT;
5689                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
5690                         break;
5691                 r = 0;
5692                 break;
5693         }
5694         case KVM_SET_VCPU_EVENTS: {
5695                 struct kvm_vcpu_events events;
5696
5697                 r = -EFAULT;
5698                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
5699                         break;
5700
5701                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
5702                 break;
5703         }
5704         case KVM_GET_DEBUGREGS: {
5705                 struct kvm_debugregs dbgregs;
5706
5707                 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
5708
5709                 r = -EFAULT;
5710                 if (copy_to_user(argp, &dbgregs,
5711                                  sizeof(struct kvm_debugregs)))
5712                         break;
5713                 r = 0;
5714                 break;
5715         }
5716         case KVM_SET_DEBUGREGS: {
5717                 struct kvm_debugregs dbgregs;
5718
5719                 r = -EFAULT;
5720                 if (copy_from_user(&dbgregs, argp,
5721                                    sizeof(struct kvm_debugregs)))
5722                         break;
5723
5724                 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
5725                 break;
5726         }
5727         case KVM_GET_XSAVE: {
5728                 r = -EINVAL;
5729                 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave))
5730                         break;
5731
5732                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT);
5733                 r = -ENOMEM;
5734                 if (!u.xsave)
5735                         break;
5736
5737                 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
5738
5739                 r = -EFAULT;
5740                 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
5741                         break;
5742                 r = 0;
5743                 break;
5744         }
5745         case KVM_SET_XSAVE: {
5746                 int size = vcpu->arch.guest_fpu.uabi_size;
5747
5748                 u.xsave = memdup_user(argp, size);
5749                 if (IS_ERR(u.xsave)) {
5750                         r = PTR_ERR(u.xsave);
5751                         goto out_nofree;
5752                 }
5753
5754                 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
5755                 break;
5756         }
5757
5758         case KVM_GET_XSAVE2: {
5759                 int size = vcpu->arch.guest_fpu.uabi_size;
5760
5761                 u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT);
5762                 r = -ENOMEM;
5763                 if (!u.xsave)
5764                         break;
5765
5766                 kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
5767
5768                 r = -EFAULT;
5769                 if (copy_to_user(argp, u.xsave, size))
5770                         break;
5771
5772                 r = 0;
5773                 break;
5774         }
5775
5776         case KVM_GET_XCRS: {
5777                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT);
5778                 r = -ENOMEM;
5779                 if (!u.xcrs)
5780                         break;
5781
5782                 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
5783
5784                 r = -EFAULT;
5785                 if (copy_to_user(argp, u.xcrs,
5786                                  sizeof(struct kvm_xcrs)))
5787                         break;
5788                 r = 0;
5789                 break;
5790         }
5791         case KVM_SET_XCRS: {
5792                 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
5793                 if (IS_ERR(u.xcrs)) {
5794                         r = PTR_ERR(u.xcrs);
5795                         goto out_nofree;
5796                 }
5797
5798                 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
5799                 break;
5800         }
5801         case KVM_SET_TSC_KHZ: {
5802                 u32 user_tsc_khz;
5803
5804                 r = -EINVAL;
5805                 user_tsc_khz = (u32)arg;
5806
5807                 if (kvm_caps.has_tsc_control &&
5808                     user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
5809                         goto out;
5810
5811                 if (user_tsc_khz == 0)
5812                         user_tsc_khz = tsc_khz;
5813
5814                 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
5815                         r = 0;
5816
5817                 goto out;
5818         }
5819         case KVM_GET_TSC_KHZ: {
5820                 r = vcpu->arch.virtual_tsc_khz;
5821                 goto out;
5822         }
5823         case KVM_KVMCLOCK_CTRL: {
5824                 r = kvm_set_guest_paused(vcpu);
5825                 goto out;
5826         }
5827         case KVM_ENABLE_CAP: {
5828                 struct kvm_enable_cap cap;
5829
5830                 r = -EFAULT;
5831                 if (copy_from_user(&cap, argp, sizeof(cap)))
5832                         goto out;
5833                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5834                 break;
5835         }
5836         case KVM_GET_NESTED_STATE: {
5837                 struct kvm_nested_state __user *user_kvm_nested_state = argp;
5838                 u32 user_data_size;
5839
5840                 r = -EINVAL;
5841                 if (!kvm_x86_ops.nested_ops->get_state)
5842                         break;
5843
5844                 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
5845                 r = -EFAULT;
5846                 if (get_user(user_data_size, &user_kvm_nested_state->size))
5847                         break;
5848
5849                 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
5850                                                      user_data_size);
5851                 if (r < 0)
5852                         break;
5853
5854                 if (r > user_data_size) {
5855                         if (put_user(r, &user_kvm_nested_state->size))
5856                                 r = -EFAULT;
5857                         else
5858                                 r = -E2BIG;
5859                         break;
5860                 }
5861
5862                 r = 0;
5863                 break;
5864         }
5865         case KVM_SET_NESTED_STATE: {
5866                 struct kvm_nested_state __user *user_kvm_nested_state = argp;
5867                 struct kvm_nested_state kvm_state;
5868                 int idx;
5869
5870                 r = -EINVAL;
5871                 if (!kvm_x86_ops.nested_ops->set_state)
5872                         break;
5873
5874                 r = -EFAULT;
5875                 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
5876                         break;
5877
5878                 r = -EINVAL;
5879                 if (kvm_state.size < sizeof(kvm_state))
5880                         break;
5881
5882                 if (kvm_state.flags &
5883                     ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
5884                       | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING
5885                       | KVM_STATE_NESTED_GIF_SET))
5886                         break;
5887
5888                 /* nested_run_pending implies guest_mode.  */
5889                 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
5890                     && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
5891                         break;
5892
5893                 idx = srcu_read_lock(&vcpu->kvm->srcu);
5894                 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
5895                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5896                 break;
5897         }
5898         case KVM_GET_SUPPORTED_HV_CPUID:
5899                 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
5900                 break;
5901 #ifdef CONFIG_KVM_XEN
5902         case KVM_XEN_VCPU_GET_ATTR: {
5903                 struct kvm_xen_vcpu_attr xva;
5904
5905                 r = -EFAULT;
5906                 if (copy_from_user(&xva, argp, sizeof(xva)))
5907                         goto out;
5908                 r = kvm_xen_vcpu_get_attr(vcpu, &xva);
5909                 if (!r && copy_to_user(argp, &xva, sizeof(xva)))
5910                         r = -EFAULT;
5911                 break;
5912         }
5913         case KVM_XEN_VCPU_SET_ATTR: {
5914                 struct kvm_xen_vcpu_attr xva;
5915
5916                 r = -EFAULT;
5917                 if (copy_from_user(&xva, argp, sizeof(xva)))
5918                         goto out;
5919                 r = kvm_xen_vcpu_set_attr(vcpu, &xva);
5920                 break;
5921         }
5922 #endif
5923         case KVM_GET_SREGS2: {
5924                 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL);
5925                 r = -ENOMEM;
5926                 if (!u.sregs2)
5927                         goto out;
5928                 __get_sregs2(vcpu, u.sregs2);
5929                 r = -EFAULT;
5930                 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2)))
5931                         goto out;
5932                 r = 0;
5933                 break;
5934         }
5935         case KVM_SET_SREGS2: {
5936                 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
5937                 if (IS_ERR(u.sregs2)) {
5938                         r = PTR_ERR(u.sregs2);
5939                         u.sregs2 = NULL;
5940                         goto out;
5941                 }
5942                 r = __set_sregs2(vcpu, u.sregs2);
5943                 break;
5944         }
5945         case KVM_HAS_DEVICE_ATTR:
5946         case KVM_GET_DEVICE_ATTR:
5947         case KVM_SET_DEVICE_ATTR:
5948                 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp);
5949                 break;
5950         default:
5951                 r = -EINVAL;
5952         }
5953 out:
5954         kfree(u.buffer);
5955 out_nofree:
5956         vcpu_put(vcpu);
5957         return r;
5958 }
5959
5960 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
5961 {
5962         return VM_FAULT_SIGBUS;
5963 }
5964
5965 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
5966 {
5967         int ret;
5968
5969         if (addr > (unsigned int)(-3 * PAGE_SIZE))
5970                 return -EINVAL;
5971         ret = static_call(kvm_x86_set_tss_addr)(kvm, addr);
5972         return ret;
5973 }
5974
5975 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
5976                                               u64 ident_addr)
5977 {
5978         return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr);
5979 }
5980
5981 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
5982                                          unsigned long kvm_nr_mmu_pages)
5983 {
5984         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
5985                 return -EINVAL;
5986
5987         mutex_lock(&kvm->slots_lock);
5988
5989         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
5990         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
5991
5992         mutex_unlock(&kvm->slots_lock);
5993         return 0;
5994 }
5995
5996 static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
5997 {
5998         return kvm->arch.n_max_mmu_pages;
5999 }
6000
6001 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
6002 {
6003         struct kvm_pic *pic = kvm->arch.vpic;
6004         int r;
6005
6006         r = 0;
6007         switch (chip->chip_id) {
6008         case KVM_IRQCHIP_PIC_MASTER:
6009                 memcpy(&chip->chip.pic, &pic->pics[0],
6010                         sizeof(struct kvm_pic_state));
6011                 break;
6012         case KVM_IRQCHIP_PIC_SLAVE:
6013                 memcpy(&chip->chip.pic, &pic->pics[1],
6014                         sizeof(struct kvm_pic_state));
6015                 break;
6016         case KVM_IRQCHIP_IOAPIC:
6017                 kvm_get_ioapic(kvm, &chip->chip.ioapic);
6018                 break;
6019         default:
6020                 r = -EINVAL;
6021                 break;
6022         }
6023         return r;
6024 }
6025
6026 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
6027 {
6028         struct kvm_pic *pic = kvm->arch.vpic;
6029         int r;
6030
6031         r = 0;
6032         switch (chip->chip_id) {
6033         case KVM_IRQCHIP_PIC_MASTER:
6034                 spin_lock(&pic->lock);
6035                 memcpy(&pic->pics[0], &chip->chip.pic,
6036                         sizeof(struct kvm_pic_state));
6037                 spin_unlock(&pic->lock);
6038                 break;
6039         case KVM_IRQCHIP_PIC_SLAVE:
6040                 spin_lock(&pic->lock);
6041                 memcpy(&pic->pics[1], &chip->chip.pic,
6042                         sizeof(struct kvm_pic_state));
6043                 spin_unlock(&pic->lock);
6044                 break;
6045         case KVM_IRQCHIP_IOAPIC:
6046                 kvm_set_ioapic(kvm, &chip->chip.ioapic);
6047                 break;
6048         default:
6049                 r = -EINVAL;
6050                 break;
6051         }
6052         kvm_pic_update_irq(pic);
6053         return r;
6054 }
6055
6056 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
6057 {
6058         struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
6059
6060         BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
6061
6062         mutex_lock(&kps->lock);
6063         memcpy(ps, &kps->channels, sizeof(*ps));
6064         mutex_unlock(&kps->lock);
6065         return 0;
6066 }
6067
6068 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
6069 {
6070         int i;
6071         struct kvm_pit *pit = kvm->arch.vpit;
6072
6073         mutex_lock(&pit->pit_state.lock);
6074         memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
6075         for (i = 0; i < 3; i++)
6076                 kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
6077         mutex_unlock(&pit->pit_state.lock);
6078         return 0;
6079 }
6080
6081 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
6082 {
6083         mutex_lock(&kvm->arch.vpit->pit_state.lock);
6084         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
6085                 sizeof(ps->channels));
6086         ps->flags = kvm->arch.vpit->pit_state.flags;
6087         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
6088         memset(&ps->reserved, 0, sizeof(ps->reserved));
6089         return 0;
6090 }
6091
6092 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
6093 {
6094         int start = 0;
6095         int i;
6096         u32 prev_legacy, cur_legacy;
6097         struct kvm_pit *pit = kvm->arch.vpit;
6098
6099         mutex_lock(&pit->pit_state.lock);
6100         prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
6101         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
6102         if (!prev_legacy && cur_legacy)
6103                 start = 1;
6104         memcpy(&pit->pit_state.channels, &ps->channels,
6105                sizeof(pit->pit_state.channels));
6106         pit->pit_state.flags = ps->flags;
6107         for (i = 0; i < 3; i++)
6108                 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
6109                                    start && i == 0);
6110         mutex_unlock(&pit->pit_state.lock);
6111         return 0;
6112 }
6113
6114 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
6115                                  struct kvm_reinject_control *control)
6116 {
6117         struct kvm_pit *pit = kvm->arch.vpit;
6118
6119         /* pit->pit_state.lock was overloaded to prevent userspace from getting
6120          * an inconsistent state after running multiple KVM_REINJECT_CONTROL
6121          * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
6122          */
6123         mutex_lock(&pit->pit_state.lock);
6124         kvm_pit_set_reinject(pit, control->pit_reinject);
6125         mutex_unlock(&pit->pit_state.lock);
6126
6127         return 0;
6128 }
6129
6130 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
6131 {
6132
6133         /*
6134          * Flush all CPUs' dirty log buffers to the  dirty_bitmap.  Called
6135          * before reporting dirty_bitmap to userspace.  KVM flushes the buffers
6136          * on all VM-Exits, thus we only need to kick running vCPUs to force a
6137          * VM-Exit.
6138          */
6139         struct kvm_vcpu *vcpu;
6140         unsigned long i;
6141
6142         kvm_for_each_vcpu(i, vcpu, kvm)
6143                 kvm_vcpu_kick(vcpu);
6144 }
6145
6146 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
6147                         bool line_status)
6148 {
6149         if (!irqchip_in_kernel(kvm))
6150                 return -ENXIO;
6151
6152         irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
6153                                         irq_event->irq, irq_event->level,
6154                                         line_status);
6155         return 0;
6156 }
6157
6158 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
6159                             struct kvm_enable_cap *cap)
6160 {
6161         int r;
6162
6163         if (cap->flags)
6164                 return -EINVAL;
6165
6166         switch (cap->cap) {
6167         case KVM_CAP_DISABLE_QUIRKS2:
6168                 r = -EINVAL;
6169                 if (cap->args[0] & ~KVM_X86_VALID_QUIRKS)
6170                         break;
6171                 fallthrough;
6172         case KVM_CAP_DISABLE_QUIRKS:
6173                 kvm->arch.disabled_quirks = cap->args[0];
6174                 r = 0;
6175                 break;
6176         case KVM_CAP_SPLIT_IRQCHIP: {
6177                 mutex_lock(&kvm->lock);
6178                 r = -EINVAL;
6179                 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
6180                         goto split_irqchip_unlock;
6181                 r = -EEXIST;
6182                 if (irqchip_in_kernel(kvm))
6183                         goto split_irqchip_unlock;
6184                 if (kvm->created_vcpus)
6185                         goto split_irqchip_unlock;
6186                 r = kvm_setup_empty_irq_routing(kvm);
6187                 if (r)
6188                         goto split_irqchip_unlock;
6189                 /* Pairs with irqchip_in_kernel. */
6190                 smp_wmb();
6191                 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
6192                 kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
6193                 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6194                 r = 0;
6195 split_irqchip_unlock:
6196                 mutex_unlock(&kvm->lock);
6197                 break;
6198         }
6199         case KVM_CAP_X2APIC_API:
6200                 r = -EINVAL;
6201                 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
6202                         break;
6203
6204                 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
6205                         kvm->arch.x2apic_format = true;
6206                 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
6207                         kvm->arch.x2apic_broadcast_quirk_disabled = true;
6208
6209                 r = 0;
6210                 break;
6211         case KVM_CAP_X86_DISABLE_EXITS:
6212                 r = -EINVAL;
6213                 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
6214                         break;
6215
6216                 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
6217                         kvm_can_mwait_in_guest())
6218                         kvm->arch.mwait_in_guest = true;
6219                 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
6220                         kvm->arch.hlt_in_guest = true;
6221                 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
6222                         kvm->arch.pause_in_guest = true;
6223                 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
6224                         kvm->arch.cstate_in_guest = true;
6225                 r = 0;
6226                 break;
6227         case KVM_CAP_MSR_PLATFORM_INFO:
6228                 kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
6229                 r = 0;
6230                 break;
6231         case KVM_CAP_EXCEPTION_PAYLOAD:
6232                 kvm->arch.exception_payload_enabled = cap->args[0];
6233                 r = 0;
6234                 break;
6235         case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
6236                 kvm->arch.triple_fault_event = cap->args[0];
6237                 r = 0;
6238                 break;
6239         case KVM_CAP_X86_USER_SPACE_MSR:
6240                 r = -EINVAL;
6241                 if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL |
6242                                      KVM_MSR_EXIT_REASON_UNKNOWN |
6243                                      KVM_MSR_EXIT_REASON_FILTER))
6244                         break;
6245                 kvm->arch.user_space_msr_mask = cap->args[0];
6246                 r = 0;
6247                 break;
6248         case KVM_CAP_X86_BUS_LOCK_EXIT:
6249                 r = -EINVAL;
6250                 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE)
6251                         break;
6252
6253                 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) &&
6254                     (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT))
6255                         break;
6256
6257                 if (kvm_caps.has_bus_lock_exit &&
6258                     cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)
6259                         kvm->arch.bus_lock_detection_enabled = true;
6260                 r = 0;
6261                 break;
6262 #ifdef CONFIG_X86_SGX_KVM
6263         case KVM_CAP_SGX_ATTRIBUTE: {
6264                 unsigned long allowed_attributes = 0;
6265
6266                 r = sgx_set_attribute(&allowed_attributes, cap->args[0]);
6267                 if (r)
6268                         break;
6269
6270                 /* KVM only supports the PROVISIONKEY privileged attribute. */
6271                 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) &&
6272                     !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY))
6273                         kvm->arch.sgx_provisioning_allowed = true;
6274                 else
6275                         r = -EINVAL;
6276                 break;
6277         }
6278 #endif
6279         case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
6280                 r = -EINVAL;
6281                 if (!kvm_x86_ops.vm_copy_enc_context_from)
6282                         break;
6283
6284                 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]);
6285                 break;
6286         case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
6287                 r = -EINVAL;
6288                 if (!kvm_x86_ops.vm_move_enc_context_from)
6289                         break;
6290
6291                 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]);
6292                 break;
6293         case KVM_CAP_EXIT_HYPERCALL:
6294                 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
6295                         r = -EINVAL;
6296                         break;
6297                 }
6298                 kvm->arch.hypercall_exit_enabled = cap->args[0];
6299                 r = 0;
6300                 break;
6301         case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
6302                 r = -EINVAL;
6303                 if (cap->args[0] & ~1)
6304                         break;
6305                 kvm->arch.exit_on_emulation_error = cap->args[0];
6306                 r = 0;
6307                 break;
6308         case KVM_CAP_PMU_CAPABILITY:
6309                 r = -EINVAL;
6310                 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK))
6311                         break;
6312
6313                 mutex_lock(&kvm->lock);
6314                 if (!kvm->created_vcpus) {
6315                         kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE);
6316                         r = 0;
6317                 }
6318                 mutex_unlock(&kvm->lock);
6319                 break;
6320         case KVM_CAP_MAX_VCPU_ID:
6321                 r = -EINVAL;
6322                 if (cap->args[0] > KVM_MAX_VCPU_IDS)
6323                         break;
6324
6325                 mutex_lock(&kvm->lock);
6326                 if (kvm->arch.max_vcpu_ids == cap->args[0]) {
6327                         r = 0;
6328                 } else if (!kvm->arch.max_vcpu_ids) {
6329                         kvm->arch.max_vcpu_ids = cap->args[0];
6330                         r = 0;
6331                 }
6332                 mutex_unlock(&kvm->lock);
6333                 break;
6334         case KVM_CAP_X86_NOTIFY_VMEXIT:
6335                 r = -EINVAL;
6336                 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS)
6337                         break;
6338                 if (!kvm_caps.has_notify_vmexit)
6339                         break;
6340                 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED))
6341                         break;
6342                 mutex_lock(&kvm->lock);
6343                 if (!kvm->created_vcpus) {
6344                         kvm->arch.notify_window = cap->args[0] >> 32;
6345                         kvm->arch.notify_vmexit_flags = (u32)cap->args[0];
6346                         r = 0;
6347                 }
6348                 mutex_unlock(&kvm->lock);
6349                 break;
6350         case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
6351                 r = -EINVAL;
6352
6353                 /*
6354                  * Since the risk of disabling NX hugepages is a guest crashing
6355                  * the system, ensure the userspace process has permission to
6356                  * reboot the system.
6357                  *
6358                  * Note that unlike the reboot() syscall, the process must have
6359                  * this capability in the root namespace because exposing
6360                  * /dev/kvm into a container does not limit the scope of the
6361                  * iTLB multihit bug to that container. In other words,
6362                  * this must use capable(), not ns_capable().
6363                  */
6364                 if (!capable(CAP_SYS_BOOT)) {
6365                         r = -EPERM;
6366                         break;
6367                 }
6368
6369                 if (cap->args[0])
6370                         break;
6371
6372                 mutex_lock(&kvm->lock);
6373                 if (!kvm->created_vcpus) {
6374                         kvm->arch.disable_nx_huge_pages = true;
6375                         r = 0;
6376                 }
6377                 mutex_unlock(&kvm->lock);
6378                 break;
6379         default:
6380                 r = -EINVAL;
6381                 break;
6382         }
6383         return r;
6384 }
6385
6386 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
6387 {
6388         struct kvm_x86_msr_filter *msr_filter;
6389
6390         msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
6391         if (!msr_filter)
6392                 return NULL;
6393
6394         msr_filter->default_allow = default_allow;
6395         return msr_filter;
6396 }
6397
6398 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
6399 {
6400         u32 i;
6401
6402         if (!msr_filter)
6403                 return;
6404
6405         for (i = 0; i < msr_filter->count; i++)
6406                 kfree(msr_filter->ranges[i].bitmap);
6407
6408         kfree(msr_filter);
6409 }
6410
6411 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
6412                               struct kvm_msr_filter_range *user_range)
6413 {
6414         unsigned long *bitmap = NULL;
6415         size_t bitmap_size;
6416
6417         if (!user_range->nmsrs)
6418                 return 0;
6419
6420         if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE))
6421                 return -EINVAL;
6422
6423         if (!user_range->flags)
6424                 return -EINVAL;
6425
6426         bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long);
6427         if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE)
6428                 return -EINVAL;
6429
6430         bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size);
6431         if (IS_ERR(bitmap))
6432                 return PTR_ERR(bitmap);
6433
6434         msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) {
6435                 .flags = user_range->flags,
6436                 .base = user_range->base,
6437                 .nmsrs = user_range->nmsrs,
6438                 .bitmap = bitmap,
6439         };
6440
6441         msr_filter->count++;
6442         return 0;
6443 }
6444
6445 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
6446 {
6447         struct kvm_msr_filter __user *user_msr_filter = argp;
6448         struct kvm_x86_msr_filter *new_filter, *old_filter;
6449         struct kvm_msr_filter filter;
6450         bool default_allow;
6451         bool empty = true;
6452         int r = 0;
6453         u32 i;
6454
6455         if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
6456                 return -EFAULT;
6457
6458         if (filter.flags & ~KVM_MSR_FILTER_DEFAULT_DENY)
6459                 return -EINVAL;
6460
6461         for (i = 0; i < ARRAY_SIZE(filter.ranges); i++)
6462                 empty &= !filter.ranges[i].nmsrs;
6463
6464         default_allow = !(filter.flags & KVM_MSR_FILTER_DEFAULT_DENY);
6465         if (empty && !default_allow)
6466                 return -EINVAL;
6467
6468         new_filter = kvm_alloc_msr_filter(default_allow);
6469         if (!new_filter)
6470                 return -ENOMEM;
6471
6472         for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
6473                 r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
6474                 if (r) {
6475                         kvm_free_msr_filter(new_filter);
6476                         return r;
6477                 }
6478         }
6479
6480         mutex_lock(&kvm->lock);
6481
6482         /* The per-VM filter is protected by kvm->lock... */
6483         old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);
6484
6485         rcu_assign_pointer(kvm->arch.msr_filter, new_filter);
6486         synchronize_srcu(&kvm->srcu);
6487
6488         kvm_free_msr_filter(old_filter);
6489
6490         kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
6491         mutex_unlock(&kvm->lock);
6492
6493         return 0;
6494 }
6495
6496 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
6497 static int kvm_arch_suspend_notifier(struct kvm *kvm)
6498 {
6499         struct kvm_vcpu *vcpu;
6500         unsigned long i;
6501         int ret = 0;
6502
6503         mutex_lock(&kvm->lock);
6504         kvm_for_each_vcpu(i, vcpu, kvm) {
6505                 if (!vcpu->arch.pv_time.active)
6506                         continue;
6507
6508                 ret = kvm_set_guest_paused(vcpu);
6509                 if (ret) {
6510                         kvm_err("Failed to pause guest VCPU%d: %d\n",
6511                                 vcpu->vcpu_id, ret);
6512                         break;
6513                 }
6514         }
6515         mutex_unlock(&kvm->lock);
6516
6517         return ret ? NOTIFY_BAD : NOTIFY_DONE;
6518 }
6519
6520 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
6521 {
6522         switch (state) {
6523         case PM_HIBERNATION_PREPARE:
6524         case PM_SUSPEND_PREPARE:
6525                 return kvm_arch_suspend_notifier(kvm);
6526         }
6527
6528         return NOTIFY_DONE;
6529 }
6530 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
6531
6532 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp)
6533 {
6534         struct kvm_clock_data data = { 0 };
6535
6536         get_kvmclock(kvm, &data);
6537         if (copy_to_user(argp, &data, sizeof(data)))
6538                 return -EFAULT;
6539
6540         return 0;
6541 }
6542
6543 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
6544 {
6545         struct kvm_arch *ka = &kvm->arch;
6546         struct kvm_clock_data data;
6547         u64 now_raw_ns;
6548
6549         if (copy_from_user(&data, argp, sizeof(data)))
6550                 return -EFAULT;
6551
6552         /*
6553          * Only KVM_CLOCK_REALTIME is used, but allow passing the
6554          * result of KVM_GET_CLOCK back to KVM_SET_CLOCK.
6555          */
6556         if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
6557                 return -EINVAL;
6558
6559         kvm_hv_request_tsc_page_update(kvm);
6560         kvm_start_pvclock_update(kvm);
6561         pvclock_update_vm_gtod_copy(kvm);
6562
6563         /*
6564          * This pairs with kvm_guest_time_update(): when masterclock is
6565          * in use, we use master_kernel_ns + kvmclock_offset to set
6566          * unsigned 'system_time' so if we use get_kvmclock_ns() (which
6567          * is slightly ahead) here we risk going negative on unsigned
6568          * 'system_time' when 'data.clock' is very small.
6569          */
6570         if (data.flags & KVM_CLOCK_REALTIME) {
6571                 u64 now_real_ns = ktime_get_real_ns();
6572
6573                 /*
6574                  * Avoid stepping the kvmclock backwards.
6575                  */
6576                 if (now_real_ns > data.realtime)
6577                         data.clock += now_real_ns - data.realtime;
6578         }
6579
6580         if (ka->use_master_clock)
6581                 now_raw_ns = ka->master_kernel_ns;
6582         else
6583                 now_raw_ns = get_kvmclock_base_ns();
6584         ka->kvmclock_offset = data.clock - now_raw_ns;
6585         kvm_end_pvclock_update(kvm);
6586         return 0;
6587 }
6588
6589 long kvm_arch_vm_ioctl(struct file *filp,
6590                        unsigned int ioctl, unsigned long arg)
6591 {
6592         struct kvm *kvm = filp->private_data;
6593         void __user *argp = (void __user *)arg;
6594         int r = -ENOTTY;
6595         /*
6596          * This union makes it completely explicit to gcc-3.x
6597          * that these two variables' stack usage should be
6598          * combined, not added together.
6599          */
6600         union {
6601                 struct kvm_pit_state ps;
6602                 struct kvm_pit_state2 ps2;
6603                 struct kvm_pit_config pit_config;
6604         } u;
6605
6606         switch (ioctl) {
6607         case KVM_SET_TSS_ADDR:
6608                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
6609                 break;
6610         case KVM_SET_IDENTITY_MAP_ADDR: {
6611                 u64 ident_addr;
6612
6613                 mutex_lock(&kvm->lock);
6614                 r = -EINVAL;
6615                 if (kvm->created_vcpus)
6616                         goto set_identity_unlock;
6617                 r = -EFAULT;
6618                 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
6619                         goto set_identity_unlock;
6620                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
6621 set_identity_unlock:
6622                 mutex_unlock(&kvm->lock);
6623                 break;
6624         }
6625         case KVM_SET_NR_MMU_PAGES:
6626                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
6627                 break;
6628         case KVM_GET_NR_MMU_PAGES:
6629                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
6630                 break;
6631         case KVM_CREATE_IRQCHIP: {
6632                 mutex_lock(&kvm->lock);
6633
6634                 r = -EEXIST;
6635                 if (irqchip_in_kernel(kvm))
6636                         goto create_irqchip_unlock;
6637
6638                 r = -EINVAL;
6639                 if (kvm->created_vcpus)
6640                         goto create_irqchip_unlock;
6641
6642                 r = kvm_pic_init(kvm);
6643                 if (r)
6644                         goto create_irqchip_unlock;
6645
6646                 r = kvm_ioapic_init(kvm);
6647                 if (r) {
6648                         kvm_pic_destroy(kvm);
6649                         goto create_irqchip_unlock;
6650                 }
6651
6652                 r = kvm_setup_default_irq_routing(kvm);
6653                 if (r) {
6654                         kvm_ioapic_destroy(kvm);
6655                         kvm_pic_destroy(kvm);
6656                         goto create_irqchip_unlock;
6657                 }
6658                 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
6659                 smp_wmb();
6660                 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
6661                 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6662         create_irqchip_unlock:
6663                 mutex_unlock(&kvm->lock);
6664                 break;
6665         }
6666         case KVM_CREATE_PIT:
6667                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
6668                 goto create_pit;
6669         case KVM_CREATE_PIT2:
6670                 r = -EFAULT;
6671                 if (copy_from_user(&u.pit_config, argp,
6672                                    sizeof(struct kvm_pit_config)))
6673                         goto out;
6674         create_pit:
6675                 mutex_lock(&kvm->lock);
6676                 r = -EEXIST;
6677                 if (kvm->arch.vpit)
6678                         goto create_pit_unlock;
6679                 r = -ENOMEM;
6680                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
6681                 if (kvm->arch.vpit)
6682                         r = 0;
6683         create_pit_unlock:
6684                 mutex_unlock(&kvm->lock);
6685                 break;
6686         case KVM_GET_IRQCHIP: {
6687                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
6688                 struct kvm_irqchip *chip;
6689
6690                 chip = memdup_user(argp, sizeof(*chip));
6691                 if (IS_ERR(chip)) {
6692                         r = PTR_ERR(chip);
6693                         goto out;
6694                 }
6695
6696                 r = -ENXIO;
6697                 if (!irqchip_kernel(kvm))
6698                         goto get_irqchip_out;
6699                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
6700                 if (r)
6701                         goto get_irqchip_out;
6702                 r = -EFAULT;
6703                 if (copy_to_user(argp, chip, sizeof(*chip)))
6704                         goto get_irqchip_out;
6705                 r = 0;
6706         get_irqchip_out:
6707                 kfree(chip);
6708                 break;
6709         }
6710         case KVM_SET_IRQCHIP: {
6711                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
6712                 struct kvm_irqchip *chip;
6713
6714                 chip = memdup_user(argp, sizeof(*chip));
6715                 if (IS_ERR(chip)) {
6716                         r = PTR_ERR(chip);
6717                         goto out;
6718                 }
6719
6720                 r = -ENXIO;
6721                 if (!irqchip_kernel(kvm))
6722                         goto set_irqchip_out;
6723                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
6724         set_irqchip_out:
6725                 kfree(chip);
6726                 break;
6727         }
6728         case KVM_GET_PIT: {
6729                 r = -EFAULT;
6730                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
6731                         goto out;
6732                 r = -ENXIO;
6733                 if (!kvm->arch.vpit)
6734                         goto out;
6735                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
6736                 if (r)
6737                         goto out;
6738                 r = -EFAULT;
6739                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
6740                         goto out;
6741                 r = 0;
6742                 break;
6743         }
6744         case KVM_SET_PIT: {
6745                 r = -EFAULT;
6746                 if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
6747                         goto out;
6748                 mutex_lock(&kvm->lock);
6749                 r = -ENXIO;
6750                 if (!kvm->arch.vpit)
6751                         goto set_pit_out;
6752                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
6753 set_pit_out:
6754                 mutex_unlock(&kvm->lock);
6755                 break;
6756         }
6757         case KVM_GET_PIT2: {
6758                 r = -ENXIO;
6759                 if (!kvm->arch.vpit)
6760                         goto out;
6761                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
6762                 if (r)
6763                         goto out;
6764                 r = -EFAULT;
6765                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
6766                         goto out;
6767                 r = 0;
6768                 break;
6769         }
6770         case KVM_SET_PIT2: {
6771                 r = -EFAULT;
6772                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
6773                         goto out;
6774                 mutex_lock(&kvm->lock);
6775                 r = -ENXIO;
6776                 if (!kvm->arch.vpit)
6777                         goto set_pit2_out;
6778                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
6779 set_pit2_out:
6780                 mutex_unlock(&kvm->lock);
6781                 break;
6782         }
6783         case KVM_REINJECT_CONTROL: {
6784                 struct kvm_reinject_control control;
6785                 r =  -EFAULT;
6786                 if (copy_from_user(&control, argp, sizeof(control)))
6787                         goto out;
6788                 r = -ENXIO;
6789                 if (!kvm->arch.vpit)
6790                         goto out;
6791                 r = kvm_vm_ioctl_reinject(kvm, &control);
6792                 break;
6793         }
6794         case KVM_SET_BOOT_CPU_ID:
6795                 r = 0;
6796                 mutex_lock(&kvm->lock);
6797                 if (kvm->created_vcpus)
6798                         r = -EBUSY;
6799                 else
6800                         kvm->arch.bsp_vcpu_id = arg;
6801                 mutex_unlock(&kvm->lock);
6802                 break;
6803 #ifdef CONFIG_KVM_XEN
6804         case KVM_XEN_HVM_CONFIG: {
6805                 struct kvm_xen_hvm_config xhc;
6806                 r = -EFAULT;
6807                 if (copy_from_user(&xhc, argp, sizeof(xhc)))
6808                         goto out;
6809                 r = kvm_xen_hvm_config(kvm, &xhc);
6810                 break;
6811         }
6812         case KVM_XEN_HVM_GET_ATTR: {
6813                 struct kvm_xen_hvm_attr xha;
6814
6815                 r = -EFAULT;
6816                 if (copy_from_user(&xha, argp, sizeof(xha)))
6817                         goto out;
6818                 r = kvm_xen_hvm_get_attr(kvm, &xha);
6819                 if (!r && copy_to_user(argp, &xha, sizeof(xha)))
6820                         r = -EFAULT;
6821                 break;
6822         }
6823         case KVM_XEN_HVM_SET_ATTR: {
6824                 struct kvm_xen_hvm_attr xha;
6825
6826                 r = -EFAULT;
6827                 if (copy_from_user(&xha, argp, sizeof(xha)))
6828                         goto out;
6829                 r = kvm_xen_hvm_set_attr(kvm, &xha);
6830                 break;
6831         }
6832         case KVM_XEN_HVM_EVTCHN_SEND: {
6833                 struct kvm_irq_routing_xen_evtchn uxe;
6834
6835                 r = -EFAULT;
6836                 if (copy_from_user(&uxe, argp, sizeof(uxe)))
6837                         goto out;
6838                 r = kvm_xen_hvm_evtchn_send(kvm, &uxe);
6839                 break;
6840         }
6841 #endif
6842         case KVM_SET_CLOCK:
6843                 r = kvm_vm_ioctl_set_clock(kvm, argp);
6844                 break;
6845         case KVM_GET_CLOCK:
6846                 r = kvm_vm_ioctl_get_clock(kvm, argp);
6847                 break;
6848         case KVM_SET_TSC_KHZ: {
6849                 u32 user_tsc_khz;
6850
6851                 r = -EINVAL;
6852                 user_tsc_khz = (u32)arg;
6853
6854                 if (kvm_caps.has_tsc_control &&
6855                     user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
6856                         goto out;
6857
6858                 if (user_tsc_khz == 0)
6859                         user_tsc_khz = tsc_khz;
6860
6861                 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
6862                 r = 0;
6863
6864                 goto out;
6865         }
6866         case KVM_GET_TSC_KHZ: {
6867                 r = READ_ONCE(kvm->arch.default_tsc_khz);
6868                 goto out;
6869         }
6870         case KVM_MEMORY_ENCRYPT_OP: {
6871                 r = -ENOTTY;
6872                 if (!kvm_x86_ops.mem_enc_ioctl)
6873                         goto out;
6874
6875                 r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp);
6876                 break;
6877         }
6878         case KVM_MEMORY_ENCRYPT_REG_REGION: {
6879                 struct kvm_enc_region region;
6880
6881                 r = -EFAULT;
6882                 if (copy_from_user(&region, argp, sizeof(region)))
6883                         goto out;
6884
6885                 r = -ENOTTY;
6886                 if (!kvm_x86_ops.mem_enc_register_region)
6887                         goto out;
6888
6889                 r = static_call(kvm_x86_mem_enc_register_region)(kvm, &region);
6890                 break;
6891         }
6892         case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
6893                 struct kvm_enc_region region;
6894
6895                 r = -EFAULT;
6896                 if (copy_from_user(&region, argp, sizeof(region)))
6897                         goto out;
6898
6899                 r = -ENOTTY;
6900                 if (!kvm_x86_ops.mem_enc_unregister_region)
6901                         goto out;
6902
6903                 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, &region);
6904                 break;
6905         }
6906         case KVM_HYPERV_EVENTFD: {
6907                 struct kvm_hyperv_eventfd hvevfd;
6908
6909                 r = -EFAULT;
6910                 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
6911                         goto out;
6912                 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
6913                 break;
6914         }
6915         case KVM_SET_PMU_EVENT_FILTER:
6916                 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
6917                 break;
6918         case KVM_X86_SET_MSR_FILTER:
6919                 r = kvm_vm_ioctl_set_msr_filter(kvm, argp);
6920                 break;
6921         default:
6922                 r = -ENOTTY;
6923         }
6924 out:
6925         return r;
6926 }
6927
6928 static void kvm_init_msr_list(void)
6929 {
6930         u32 dummy[2];
6931         unsigned i;
6932
6933         BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3,
6934                          "Please update the fixed PMCs in msrs_to_saved_all[]");
6935
6936         num_msrs_to_save = 0;
6937         num_emulated_msrs = 0;
6938         num_msr_based_features = 0;
6939
6940         for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
6941                 if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
6942                         continue;
6943
6944                 /*
6945                  * Even MSRs that are valid in the host may not be exposed
6946                  * to the guests in some cases.
6947                  */
6948                 switch (msrs_to_save_all[i]) {
6949                 case MSR_IA32_BNDCFGS:
6950                         if (!kvm_mpx_supported())
6951                                 continue;
6952                         break;
6953                 case MSR_TSC_AUX:
6954                         if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
6955                             !kvm_cpu_cap_has(X86_FEATURE_RDPID))
6956                                 continue;
6957                         break;
6958                 case MSR_IA32_UMWAIT_CONTROL:
6959                         if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
6960                                 continue;
6961                         break;
6962                 case MSR_IA32_RTIT_CTL:
6963                 case MSR_IA32_RTIT_STATUS:
6964                         if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
6965                                 continue;
6966                         break;
6967                 case MSR_IA32_RTIT_CR3_MATCH:
6968                         if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
6969                             !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
6970                                 continue;
6971                         break;
6972                 case MSR_IA32_RTIT_OUTPUT_BASE:
6973                 case MSR_IA32_RTIT_OUTPUT_MASK:
6974                         if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
6975                                 (!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
6976                                  !intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
6977                                 continue;
6978                         break;
6979                 case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
6980                         if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
6981                                 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
6982                                 intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
6983                                 continue;
6984                         break;
6985                 case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
6986                         if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
6987                             min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
6988                                 continue;
6989                         break;
6990                 case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
6991                         if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
6992                             min(INTEL_PMC_MAX_GENERIC, kvm_pmu_cap.num_counters_gp))
6993                                 continue;
6994                         break;
6995                 case MSR_IA32_XFD:
6996                 case MSR_IA32_XFD_ERR:
6997                         if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
6998                                 continue;
6999                         break;
7000                 default:
7001                         break;
7002                 }
7003
7004                 msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
7005         }
7006
7007         for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
7008                 if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i]))
7009                         continue;
7010
7011                 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
7012         }
7013
7014         for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) {
7015                 struct kvm_msr_entry msr;
7016
7017                 msr.index = msr_based_features_all[i];
7018                 if (kvm_get_msr_feature(&msr))
7019                         continue;
7020
7021                 msr_based_features[num_msr_based_features++] = msr_based_features_all[i];
7022         }
7023 }
7024
7025 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
7026                            const void *v)
7027 {
7028         int handled = 0;
7029         int n;
7030
7031         do {
7032                 n = min(len, 8);
7033                 if (!(lapic_in_kernel(vcpu) &&
7034                       !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
7035                     && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
7036                         break;
7037                 handled += n;
7038                 addr += n;
7039                 len -= n;
7040                 v += n;
7041         } while (len);
7042
7043         return handled;
7044 }
7045
7046 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
7047 {
7048         int handled = 0;
7049         int n;
7050
7051         do {
7052                 n = min(len, 8);
7053                 if (!(lapic_in_kernel(vcpu) &&
7054                       !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
7055                                          addr, n, v))
7056                     && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
7057                         break;
7058                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
7059                 handled += n;
7060                 addr += n;
7061                 len -= n;
7062                 v += n;
7063         } while (len);
7064
7065         return handled;
7066 }
7067
7068 static void kvm_set_segment(struct kvm_vcpu *vcpu,
7069                         struct kvm_segment *var, int seg)
7070 {
7071         static_call(kvm_x86_set_segment)(vcpu, var, seg);
7072 }
7073
7074 void kvm_get_segment(struct kvm_vcpu *vcpu,
7075                      struct kvm_segment *var, int seg)
7076 {
7077         static_call(kvm_x86_get_segment)(vcpu, var, seg);
7078 }
7079
7080 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
7081                            struct x86_exception *exception)
7082 {
7083         struct kvm_mmu *mmu = vcpu->arch.mmu;
7084         gpa_t t_gpa;
7085
7086         BUG_ON(!mmu_is_nested(vcpu));
7087
7088         /* NPT walks are always user-walks */
7089         access |= PFERR_USER_MASK;
7090         t_gpa  = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
7091
7092         return t_gpa;
7093 }
7094
7095 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
7096                               struct x86_exception *exception)
7097 {
7098         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7099
7100         u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7101         return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7102 }
7103 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
7104
7105  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
7106                                 struct x86_exception *exception)
7107 {
7108         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7109
7110         u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7111         access |= PFERR_FETCH_MASK;
7112         return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7113 }
7114
7115 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
7116                                struct x86_exception *exception)
7117 {
7118         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7119
7120         u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7121         access |= PFERR_WRITE_MASK;
7122         return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7123 }
7124 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
7125
7126 /* uses this to access any guest's mapped memory without checking CPL */
7127 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
7128                                 struct x86_exception *exception)
7129 {
7130         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7131
7132         return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception);
7133 }
7134
7135 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7136                                       struct kvm_vcpu *vcpu, u64 access,
7137                                       struct x86_exception *exception)
7138 {
7139         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7140         void *data = val;
7141         int r = X86EMUL_CONTINUE;
7142
7143         while (bytes) {
7144                 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7145                 unsigned offset = addr & (PAGE_SIZE-1);
7146                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
7147                 int ret;
7148
7149                 if (gpa == INVALID_GPA)
7150                         return X86EMUL_PROPAGATE_FAULT;
7151                 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
7152                                                offset, toread);
7153                 if (ret < 0) {
7154                         r = X86EMUL_IO_NEEDED;
7155                         goto out;
7156                 }
7157
7158                 bytes -= toread;
7159                 data += toread;
7160                 addr += toread;
7161         }
7162 out:
7163         return r;
7164 }
7165
7166 /* used for instruction fetching */
7167 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
7168                                 gva_t addr, void *val, unsigned int bytes,
7169                                 struct x86_exception *exception)
7170 {
7171         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7172         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7173         u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7174         unsigned offset;
7175         int ret;
7176
7177         /* Inline kvm_read_guest_virt_helper for speed.  */
7178         gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
7179                                     exception);
7180         if (unlikely(gpa == INVALID_GPA))
7181                 return X86EMUL_PROPAGATE_FAULT;
7182
7183         offset = addr & (PAGE_SIZE-1);
7184         if (WARN_ON(offset + bytes > PAGE_SIZE))
7185                 bytes = (unsigned)PAGE_SIZE - offset;
7186         ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
7187                                        offset, bytes);
7188         if (unlikely(ret < 0))
7189                 return X86EMUL_IO_NEEDED;
7190
7191         return X86EMUL_CONTINUE;
7192 }
7193
7194 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
7195                                gva_t addr, void *val, unsigned int bytes,
7196                                struct x86_exception *exception)
7197 {
7198         u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7199
7200         /*
7201          * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
7202          * is returned, but our callers are not ready for that and they blindly
7203          * call kvm_inject_page_fault.  Ensure that they at least do not leak
7204          * uninitialized kernel stack memory into cr2 and error code.
7205          */
7206         memset(exception, 0, sizeof(*exception));
7207         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
7208                                           exception);
7209 }
7210 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
7211
7212 static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
7213                              gva_t addr, void *val, unsigned int bytes,
7214                              struct x86_exception *exception, bool system)
7215 {
7216         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7217         u64 access = 0;
7218
7219         if (system)
7220                 access |= PFERR_IMPLICIT_ACCESS;
7221         else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
7222                 access |= PFERR_USER_MASK;
7223
7224         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
7225 }
7226
7227 static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
7228                 unsigned long addr, void *val, unsigned int bytes)
7229 {
7230         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7231         int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
7232
7233         return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
7234 }
7235
7236 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7237                                       struct kvm_vcpu *vcpu, u64 access,
7238                                       struct x86_exception *exception)
7239 {
7240         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7241         void *data = val;
7242         int r = X86EMUL_CONTINUE;
7243
7244         while (bytes) {
7245                 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7246                 unsigned offset = addr & (PAGE_SIZE-1);
7247                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
7248                 int ret;
7249
7250                 if (gpa == INVALID_GPA)
7251                         return X86EMUL_PROPAGATE_FAULT;
7252                 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
7253                 if (ret < 0) {
7254                         r = X86EMUL_IO_NEEDED;
7255                         goto out;
7256                 }
7257
7258                 bytes -= towrite;
7259                 data += towrite;
7260                 addr += towrite;
7261         }
7262 out:
7263         return r;
7264 }
7265
7266 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
7267                               unsigned int bytes, struct x86_exception *exception,
7268                               bool system)
7269 {
7270         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7271         u64 access = PFERR_WRITE_MASK;
7272
7273         if (system)
7274                 access |= PFERR_IMPLICIT_ACCESS;
7275         else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
7276                 access |= PFERR_USER_MASK;
7277
7278         return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
7279                                            access, exception);
7280 }
7281
7282 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
7283                                 unsigned int bytes, struct x86_exception *exception)
7284 {
7285         /* kvm_write_guest_virt_system can pull in tons of pages. */
7286         vcpu->arch.l1tf_flush_l1d = true;
7287
7288         return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
7289                                            PFERR_WRITE_MASK, exception);
7290 }
7291 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
7292
7293 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
7294                                 void *insn, int insn_len)
7295 {
7296         return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type,
7297                                                             insn, insn_len);
7298 }
7299
7300 int handle_ud(struct kvm_vcpu *vcpu)
7301 {
7302         static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
7303         int fep_flags = READ_ONCE(force_emulation_prefix);
7304         int emul_type = EMULTYPE_TRAP_UD;
7305         char sig[5]; /* ud2; .ascii "kvm" */
7306         struct x86_exception e;
7307
7308         if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0)))
7309                 return 1;
7310
7311         if (fep_flags &&
7312             kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
7313                                 sig, sizeof(sig), &e) == 0 &&
7314             memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
7315                 if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF)
7316                         kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF);
7317                 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
7318                 emul_type = EMULTYPE_TRAP_UD_FORCED;
7319         }
7320
7321         return kvm_emulate_instruction(vcpu, emul_type);
7322 }
7323 EXPORT_SYMBOL_GPL(handle_ud);
7324
7325 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
7326                             gpa_t gpa, bool write)
7327 {
7328         /* For APIC access vmexit */
7329         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
7330                 return 1;
7331
7332         if (vcpu_match_mmio_gpa(vcpu, gpa)) {
7333                 trace_vcpu_match_mmio(gva, gpa, write, true);
7334                 return 1;
7335         }
7336
7337         return 0;
7338 }
7339
7340 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
7341                                 gpa_t *gpa, struct x86_exception *exception,
7342                                 bool write)
7343 {
7344         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7345         u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
7346                 | (write ? PFERR_WRITE_MASK : 0);
7347
7348         /*
7349          * currently PKRU is only applied to ept enabled guest so
7350          * there is no pkey in EPT page table for L1 guest or EPT
7351          * shadow page table for L2 guest.
7352          */
7353         if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) ||
7354             !permission_fault(vcpu, vcpu->arch.walk_mmu,
7355                               vcpu->arch.mmio_access, 0, access))) {
7356                 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
7357                                         (gva & (PAGE_SIZE - 1));
7358                 trace_vcpu_match_mmio(gva, *gpa, write, false);
7359                 return 1;
7360         }
7361
7362         *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7363
7364         if (*gpa == INVALID_GPA)
7365                 return -1;
7366
7367         return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
7368 }
7369
7370 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
7371                         const void *val, int bytes)
7372 {
7373         int ret;
7374
7375         ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
7376         if (ret < 0)
7377                 return 0;
7378         kvm_page_track_write(vcpu, gpa, val, bytes);
7379         return 1;
7380 }
7381
7382 struct read_write_emulator_ops {
7383         int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
7384                                   int bytes);
7385         int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
7386                                   void *val, int bytes);
7387         int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7388                                int bytes, void *val);
7389         int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7390                                     void *val, int bytes);
7391         bool write;
7392 };
7393
7394 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
7395 {
7396         if (vcpu->mmio_read_completed) {
7397                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
7398                                vcpu->mmio_fragments[0].gpa, val);
7399                 vcpu->mmio_read_completed = 0;
7400                 return 1;
7401         }
7402
7403         return 0;
7404 }
7405
7406 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
7407                         void *val, int bytes)
7408 {
7409         return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
7410 }
7411
7412 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
7413                          void *val, int bytes)
7414 {
7415         return emulator_write_phys(vcpu, gpa, val, bytes);
7416 }
7417
7418 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
7419 {
7420         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
7421         return vcpu_mmio_write(vcpu, gpa, bytes, val);
7422 }
7423
7424 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
7425                           void *val, int bytes)
7426 {
7427         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
7428         return X86EMUL_IO_NEEDED;
7429 }
7430
7431 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
7432                            void *val, int bytes)
7433 {
7434         struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
7435
7436         memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
7437         return X86EMUL_CONTINUE;
7438 }
7439
7440 static const struct read_write_emulator_ops read_emultor = {
7441         .read_write_prepare = read_prepare,
7442         .read_write_emulate = read_emulate,
7443         .read_write_mmio = vcpu_mmio_read,
7444         .read_write_exit_mmio = read_exit_mmio,
7445 };
7446
7447 static const struct read_write_emulator_ops write_emultor = {
7448         .read_write_emulate = write_emulate,
7449         .read_write_mmio = write_mmio,
7450         .read_write_exit_mmio = write_exit_mmio,
7451         .write = true,
7452 };
7453
7454 static int emulator_read_write_onepage(unsigned long addr, void *val,
7455                                        unsigned int bytes,
7456                                        struct x86_exception *exception,
7457                                        struct kvm_vcpu *vcpu,
7458                                        const struct read_write_emulator_ops *ops)
7459 {
7460         gpa_t gpa;
7461         int handled, ret;
7462         bool write = ops->write;
7463         struct kvm_mmio_fragment *frag;
7464         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
7465
7466         /*
7467          * If the exit was due to a NPF we may already have a GPA.
7468          * If the GPA is present, use it to avoid the GVA to GPA table walk.
7469          * Note, this cannot be used on string operations since string
7470          * operation using rep will only have the initial GPA from the NPF
7471          * occurred.
7472          */
7473         if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) &&
7474             (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) {
7475                 gpa = ctxt->gpa_val;
7476                 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
7477         } else {
7478                 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
7479                 if (ret < 0)
7480                         return X86EMUL_PROPAGATE_FAULT;
7481         }
7482
7483         if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
7484                 return X86EMUL_CONTINUE;
7485
7486         /*
7487          * Is this MMIO handled locally?
7488          */
7489         handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
7490         if (handled == bytes)
7491                 return X86EMUL_CONTINUE;
7492
7493         gpa += handled;
7494         bytes -= handled;
7495         val += handled;
7496
7497         WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
7498         frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
7499         frag->gpa = gpa;
7500         frag->data = val;
7501         frag->len = bytes;
7502         return X86EMUL_CONTINUE;
7503 }
7504
7505 static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
7506                         unsigned long addr,
7507                         void *val, unsigned int bytes,
7508                         struct x86_exception *exception,
7509                         const struct read_write_emulator_ops *ops)
7510 {
7511         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7512         gpa_t gpa;
7513         int rc;
7514
7515         if (ops->read_write_prepare &&
7516                   ops->read_write_prepare(vcpu, val, bytes))
7517                 return X86EMUL_CONTINUE;
7518
7519         vcpu->mmio_nr_fragments = 0;
7520
7521         /* Crossing a page boundary? */
7522         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
7523                 int now;
7524
7525                 now = -addr & ~PAGE_MASK;
7526                 rc = emulator_read_write_onepage(addr, val, now, exception,
7527                                                  vcpu, ops);
7528
7529                 if (rc != X86EMUL_CONTINUE)
7530                         return rc;
7531                 addr += now;
7532                 if (ctxt->mode != X86EMUL_MODE_PROT64)
7533                         addr = (u32)addr;
7534                 val += now;
7535                 bytes -= now;
7536         }
7537
7538         rc = emulator_read_write_onepage(addr, val, bytes, exception,
7539                                          vcpu, ops);
7540         if (rc != X86EMUL_CONTINUE)
7541                 return rc;
7542
7543         if (!vcpu->mmio_nr_fragments)
7544                 return rc;
7545
7546         gpa = vcpu->mmio_fragments[0].gpa;
7547
7548         vcpu->mmio_needed = 1;
7549         vcpu->mmio_cur_fragment = 0;
7550
7551         vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
7552         vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
7553         vcpu->run->exit_reason = KVM_EXIT_MMIO;
7554         vcpu->run->mmio.phys_addr = gpa;
7555
7556         return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
7557 }
7558
7559 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
7560                                   unsigned long addr,
7561                                   void *val,
7562                                   unsigned int bytes,
7563                                   struct x86_exception *exception)
7564 {
7565         return emulator_read_write(ctxt, addr, val, bytes,
7566                                    exception, &read_emultor);
7567 }
7568
7569 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
7570                             unsigned long addr,
7571                             const void *val,
7572                             unsigned int bytes,
7573                             struct x86_exception *exception)
7574 {
7575         return emulator_read_write(ctxt, addr, (void *)val, bytes,
7576                                    exception, &write_emultor);
7577 }
7578
7579 #define emulator_try_cmpxchg_user(t, ptr, old, new) \
7580         (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t))
7581
7582 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
7583                                      unsigned long addr,
7584                                      const void *old,
7585                                      const void *new,
7586                                      unsigned int bytes,
7587                                      struct x86_exception *exception)
7588 {
7589         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7590         u64 page_line_mask;
7591         unsigned long hva;
7592         gpa_t gpa;
7593         int r;
7594
7595         /* guests cmpxchg8b have to be emulated atomically */
7596         if (bytes > 8 || (bytes & (bytes - 1)))
7597                 goto emul_write;
7598
7599         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
7600
7601         if (gpa == INVALID_GPA ||
7602             (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
7603                 goto emul_write;
7604
7605         /*
7606          * Emulate the atomic as a straight write to avoid #AC if SLD is
7607          * enabled in the host and the access splits a cache line.
7608          */
7609         if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
7610                 page_line_mask = ~(cache_line_size() - 1);
7611         else
7612                 page_line_mask = PAGE_MASK;
7613
7614         if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
7615                 goto emul_write;
7616
7617         hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
7618         if (kvm_is_error_hva(hva))
7619                 goto emul_write;
7620
7621         hva += offset_in_page(gpa);
7622
7623         switch (bytes) {
7624         case 1:
7625                 r = emulator_try_cmpxchg_user(u8, hva, old, new);
7626                 break;
7627         case 2:
7628                 r = emulator_try_cmpxchg_user(u16, hva, old, new);
7629                 break;
7630         case 4:
7631                 r = emulator_try_cmpxchg_user(u32, hva, old, new);
7632                 break;
7633         case 8:
7634                 r = emulator_try_cmpxchg_user(u64, hva, old, new);
7635                 break;
7636         default:
7637                 BUG();
7638         }
7639
7640         if (r < 0)
7641                 return X86EMUL_UNHANDLEABLE;
7642         if (r)
7643                 return X86EMUL_CMPXCHG_FAILED;
7644
7645         kvm_page_track_write(vcpu, gpa, new, bytes);
7646
7647         return X86EMUL_CONTINUE;
7648
7649 emul_write:
7650         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
7651
7652         return emulator_write_emulated(ctxt, addr, new, bytes, exception);
7653 }
7654
7655 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
7656                                unsigned short port, void *data,
7657                                unsigned int count, bool in)
7658 {
7659         unsigned i;
7660         int r;
7661
7662         WARN_ON_ONCE(vcpu->arch.pio.count);
7663         for (i = 0; i < count; i++) {
7664                 if (in)
7665                         r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data);
7666                 else
7667                         r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data);
7668
7669                 if (r) {
7670                         if (i == 0)
7671                                 goto userspace_io;
7672
7673                         /*
7674                          * Userspace must have unregistered the device while PIO
7675                          * was running.  Drop writes / read as 0.
7676                          */
7677                         if (in)
7678                                 memset(data, 0, size * (count - i));
7679                         break;
7680                 }
7681
7682                 data += size;
7683         }
7684         return 1;
7685
7686 userspace_io:
7687         vcpu->arch.pio.port = port;
7688         vcpu->arch.pio.in = in;
7689         vcpu->arch.pio.count = count;
7690         vcpu->arch.pio.size = size;
7691
7692         if (in)
7693                 memset(vcpu->arch.pio_data, 0, size * count);
7694         else
7695                 memcpy(vcpu->arch.pio_data, data, size * count);
7696
7697         vcpu->run->exit_reason = KVM_EXIT_IO;
7698         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
7699         vcpu->run->io.size = size;
7700         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
7701         vcpu->run->io.count = count;
7702         vcpu->run->io.port = port;
7703         return 0;
7704 }
7705
7706 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
7707                            unsigned short port, void *val, unsigned int count)
7708 {
7709         int r = emulator_pio_in_out(vcpu, size, port, val, count, true);
7710         if (r)
7711                 trace_kvm_pio(KVM_PIO_IN, port, size, count, val);
7712
7713         return r;
7714 }
7715
7716 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
7717 {
7718         int size = vcpu->arch.pio.size;
7719         unsigned int count = vcpu->arch.pio.count;
7720         memcpy(val, vcpu->arch.pio_data, size * count);
7721         trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
7722         vcpu->arch.pio.count = 0;
7723 }
7724
7725 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
7726                                     int size, unsigned short port, void *val,
7727                                     unsigned int count)
7728 {
7729         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7730         if (vcpu->arch.pio.count) {
7731                 /*
7732                  * Complete a previous iteration that required userspace I/O.
7733                  * Note, @count isn't guaranteed to match pio.count as userspace
7734                  * can modify ECX before rerunning the vCPU.  Ignore any such
7735                  * shenanigans as KVM doesn't support modifying the rep count,
7736                  * and the emulator ensures @count doesn't overflow the buffer.
7737                  */
7738                 complete_emulator_pio_in(vcpu, val);
7739                 return 1;
7740         }
7741
7742         return emulator_pio_in(vcpu, size, port, val, count);
7743 }
7744
7745 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
7746                             unsigned short port, const void *val,
7747                             unsigned int count)
7748 {
7749         trace_kvm_pio(KVM_PIO_OUT, port, size, count, val);
7750         return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
7751 }
7752
7753 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
7754                                      int size, unsigned short port,
7755                                      const void *val, unsigned int count)
7756 {
7757         return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count);
7758 }
7759
7760 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
7761 {
7762         return static_call(kvm_x86_get_segment_base)(vcpu, seg);
7763 }
7764
7765 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
7766 {
7767         kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
7768 }
7769
7770 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
7771 {
7772         if (!need_emulate_wbinvd(vcpu))
7773                 return X86EMUL_CONTINUE;
7774
7775         if (static_call(kvm_x86_has_wbinvd_exit)()) {
7776                 int cpu = get_cpu();
7777
7778                 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
7779                 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
7780                                 wbinvd_ipi, NULL, 1);
7781                 put_cpu();
7782                 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
7783         } else
7784                 wbinvd();
7785         return X86EMUL_CONTINUE;
7786 }
7787
7788 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
7789 {
7790         kvm_emulate_wbinvd_noskip(vcpu);
7791         return kvm_skip_emulated_instruction(vcpu);
7792 }
7793 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
7794
7795
7796
7797 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
7798 {
7799         kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
7800 }
7801
7802 static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
7803                             unsigned long *dest)
7804 {
7805         kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
7806 }
7807
7808 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
7809                            unsigned long value)
7810 {
7811
7812         return kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
7813 }
7814
7815 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
7816 {
7817         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
7818 }
7819
7820 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
7821 {
7822         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7823         unsigned long value;
7824
7825         switch (cr) {
7826         case 0:
7827                 value = kvm_read_cr0(vcpu);
7828                 break;
7829         case 2:
7830                 value = vcpu->arch.cr2;
7831                 break;
7832         case 3:
7833                 value = kvm_read_cr3(vcpu);
7834                 break;
7835         case 4:
7836                 value = kvm_read_cr4(vcpu);
7837                 break;
7838         case 8:
7839                 value = kvm_get_cr8(vcpu);
7840                 break;
7841         default:
7842                 kvm_err("%s: unexpected cr %u\n", __func__, cr);
7843                 return 0;
7844         }
7845
7846         return value;
7847 }
7848
7849 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
7850 {
7851         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7852         int res = 0;
7853
7854         switch (cr) {
7855         case 0:
7856                 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
7857                 break;
7858         case 2:
7859                 vcpu->arch.cr2 = val;
7860                 break;
7861         case 3:
7862                 res = kvm_set_cr3(vcpu, val);
7863                 break;
7864         case 4:
7865                 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
7866                 break;
7867         case 8:
7868                 res = kvm_set_cr8(vcpu, val);
7869                 break;
7870         default:
7871                 kvm_err("%s: unexpected cr %u\n", __func__, cr);
7872                 res = -1;
7873         }
7874
7875         return res;
7876 }
7877
7878 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
7879 {
7880         return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt));
7881 }
7882
7883 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
7884 {
7885         static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt);
7886 }
7887
7888 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
7889 {
7890         static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt);
7891 }
7892
7893 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
7894 {
7895         static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt);
7896 }
7897
7898 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
7899 {
7900         static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt);
7901 }
7902
7903 static unsigned long emulator_get_cached_segment_base(
7904         struct x86_emulate_ctxt *ctxt, int seg)
7905 {
7906         return get_segment_base(emul_to_vcpu(ctxt), seg);
7907 }
7908
7909 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
7910                                  struct desc_struct *desc, u32 *base3,
7911                                  int seg)
7912 {
7913         struct kvm_segment var;
7914
7915         kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
7916         *selector = var.selector;
7917
7918         if (var.unusable) {
7919                 memset(desc, 0, sizeof(*desc));
7920                 if (base3)
7921                         *base3 = 0;
7922                 return false;
7923         }
7924
7925         if (var.g)
7926                 var.limit >>= 12;
7927         set_desc_limit(desc, var.limit);
7928         set_desc_base(desc, (unsigned long)var.base);
7929 #ifdef CONFIG_X86_64
7930         if (base3)
7931                 *base3 = var.base >> 32;
7932 #endif
7933         desc->type = var.type;
7934         desc->s = var.s;
7935         desc->dpl = var.dpl;
7936         desc->p = var.present;
7937         desc->avl = var.avl;
7938         desc->l = var.l;
7939         desc->d = var.db;
7940         desc->g = var.g;
7941
7942         return true;
7943 }
7944
7945 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
7946                                  struct desc_struct *desc, u32 base3,
7947                                  int seg)
7948 {
7949         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7950         struct kvm_segment var;
7951
7952         var.selector = selector;
7953         var.base = get_desc_base(desc);
7954 #ifdef CONFIG_X86_64
7955         var.base |= ((u64)base3) << 32;
7956 #endif
7957         var.limit = get_desc_limit(desc);
7958         if (desc->g)
7959                 var.limit = (var.limit << 12) | 0xfff;
7960         var.type = desc->type;
7961         var.dpl = desc->dpl;
7962         var.db = desc->d;
7963         var.s = desc->s;
7964         var.l = desc->l;
7965         var.g = desc->g;
7966         var.avl = desc->avl;
7967         var.present = desc->p;
7968         var.unusable = !var.present;
7969         var.padding = 0;
7970
7971         kvm_set_segment(vcpu, &var, seg);
7972         return;
7973 }
7974
7975 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
7976                                         u32 msr_index, u64 *pdata)
7977 {
7978         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7979         int r;
7980
7981         r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
7982         if (r < 0)
7983                 return X86EMUL_UNHANDLEABLE;
7984
7985         if (r) {
7986                 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
7987                                        complete_emulated_rdmsr, r))
7988                         return X86EMUL_IO_NEEDED;
7989
7990                 trace_kvm_msr_read_ex(msr_index);
7991                 return X86EMUL_PROPAGATE_FAULT;
7992         }
7993
7994         trace_kvm_msr_read(msr_index, *pdata);
7995         return X86EMUL_CONTINUE;
7996 }
7997
7998 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
7999                                         u32 msr_index, u64 data)
8000 {
8001         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8002         int r;
8003
8004         r = kvm_set_msr_with_filter(vcpu, msr_index, data);
8005         if (r < 0)
8006                 return X86EMUL_UNHANDLEABLE;
8007
8008         if (r) {
8009                 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
8010                                        complete_emulated_msr_access, r))
8011                         return X86EMUL_IO_NEEDED;
8012
8013                 trace_kvm_msr_write_ex(msr_index, data);
8014                 return X86EMUL_PROPAGATE_FAULT;
8015         }
8016
8017         trace_kvm_msr_write(msr_index, data);
8018         return X86EMUL_CONTINUE;
8019 }
8020
8021 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
8022                             u32 msr_index, u64 *pdata)
8023 {
8024         return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
8025 }
8026
8027 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
8028                             u32 msr_index, u64 data)
8029 {
8030         return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
8031 }
8032
8033 static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
8034 {
8035         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8036
8037         return vcpu->arch.smbase;
8038 }
8039
8040 static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
8041 {
8042         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8043
8044         vcpu->arch.smbase = smbase;
8045 }
8046
8047 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
8048                               u32 pmc)
8049 {
8050         if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc))
8051                 return 0;
8052         return -EINVAL;
8053 }
8054
8055 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
8056                              u32 pmc, u64 *pdata)
8057 {
8058         return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
8059 }
8060
8061 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
8062 {
8063         emul_to_vcpu(ctxt)->arch.halt_request = 1;
8064 }
8065
8066 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
8067                               struct x86_instruction_info *info,
8068                               enum x86_intercept_stage stage)
8069 {
8070         return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage,
8071                                             &ctxt->exception);
8072 }
8073
8074 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
8075                               u32 *eax, u32 *ebx, u32 *ecx, u32 *edx,
8076                               bool exact_only)
8077 {
8078         return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only);
8079 }
8080
8081 static bool emulator_guest_has_long_mode(struct x86_emulate_ctxt *ctxt)
8082 {
8083         return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_LM);
8084 }
8085
8086 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
8087 {
8088         return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
8089 }
8090
8091 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
8092 {
8093         return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
8094 }
8095
8096 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
8097 {
8098         return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
8099 }
8100
8101 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
8102 {
8103         return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
8104 }
8105
8106 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
8107 {
8108         kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val);
8109 }
8110
8111 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
8112 {
8113         static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked);
8114 }
8115
8116 static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
8117 {
8118         return emul_to_vcpu(ctxt)->arch.hflags;
8119 }
8120
8121 static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
8122 {
8123         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8124
8125         kvm_smm_changed(vcpu, false);
8126 }
8127
8128 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt,
8129                                   const char *smstate)
8130 {
8131         return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate);
8132 }
8133
8134 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
8135 {
8136         kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
8137 }
8138
8139 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
8140 {
8141         return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
8142 }
8143
8144 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
8145 {
8146         struct kvm *kvm = emul_to_vcpu(ctxt)->kvm;
8147
8148         if (!kvm->vm_bugged)
8149                 kvm_vm_bugged(kvm);
8150 }
8151
8152 static const struct x86_emulate_ops emulate_ops = {
8153         .vm_bugged           = emulator_vm_bugged,
8154         .read_gpr            = emulator_read_gpr,
8155         .write_gpr           = emulator_write_gpr,
8156         .read_std            = emulator_read_std,
8157         .write_std           = emulator_write_std,
8158         .read_phys           = kvm_read_guest_phys_system,
8159         .fetch               = kvm_fetch_guest_virt,
8160         .read_emulated       = emulator_read_emulated,
8161         .write_emulated      = emulator_write_emulated,
8162         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
8163         .invlpg              = emulator_invlpg,
8164         .pio_in_emulated     = emulator_pio_in_emulated,
8165         .pio_out_emulated    = emulator_pio_out_emulated,
8166         .get_segment         = emulator_get_segment,
8167         .set_segment         = emulator_set_segment,
8168         .get_cached_segment_base = emulator_get_cached_segment_base,
8169         .get_gdt             = emulator_get_gdt,
8170         .get_idt             = emulator_get_idt,
8171         .set_gdt             = emulator_set_gdt,
8172         .set_idt             = emulator_set_idt,
8173         .get_cr              = emulator_get_cr,
8174         .set_cr              = emulator_set_cr,
8175         .cpl                 = emulator_get_cpl,
8176         .get_dr              = emulator_get_dr,
8177         .set_dr              = emulator_set_dr,
8178         .get_smbase          = emulator_get_smbase,
8179         .set_smbase          = emulator_set_smbase,
8180         .set_msr_with_filter = emulator_set_msr_with_filter,
8181         .get_msr_with_filter = emulator_get_msr_with_filter,
8182         .set_msr             = emulator_set_msr,
8183         .get_msr             = emulator_get_msr,
8184         .check_pmc           = emulator_check_pmc,
8185         .read_pmc            = emulator_read_pmc,
8186         .halt                = emulator_halt,
8187         .wbinvd              = emulator_wbinvd,
8188         .fix_hypercall       = emulator_fix_hypercall,
8189         .intercept           = emulator_intercept,
8190         .get_cpuid           = emulator_get_cpuid,
8191         .guest_has_long_mode = emulator_guest_has_long_mode,
8192         .guest_has_movbe     = emulator_guest_has_movbe,
8193         .guest_has_fxsr      = emulator_guest_has_fxsr,
8194         .guest_has_rdpid     = emulator_guest_has_rdpid,
8195         .set_nmi_mask        = emulator_set_nmi_mask,
8196         .get_hflags          = emulator_get_hflags,
8197         .exiting_smm         = emulator_exiting_smm,
8198         .leave_smm           = emulator_leave_smm,
8199         .triple_fault        = emulator_triple_fault,
8200         .set_xcr             = emulator_set_xcr,
8201 };
8202
8203 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
8204 {
8205         u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
8206         /*
8207          * an sti; sti; sequence only disable interrupts for the first
8208          * instruction. So, if the last instruction, be it emulated or
8209          * not, left the system with the INT_STI flag enabled, it
8210          * means that the last instruction is an sti. We should not
8211          * leave the flag on in this case. The same goes for mov ss
8212          */
8213         if (int_shadow & mask)
8214                 mask = 0;
8215         if (unlikely(int_shadow || mask)) {
8216                 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask);
8217                 if (!mask)
8218                         kvm_make_request(KVM_REQ_EVENT, vcpu);
8219         }
8220 }
8221
8222 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
8223 {
8224         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8225
8226         if (ctxt->exception.vector == PF_VECTOR)
8227                 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception);
8228         else if (ctxt->exception.error_code_valid)
8229                 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
8230                                       ctxt->exception.error_code);
8231         else
8232                 kvm_queue_exception(vcpu, ctxt->exception.vector);
8233 }
8234
8235 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
8236 {
8237         struct x86_emulate_ctxt *ctxt;
8238
8239         ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT);
8240         if (!ctxt) {
8241                 pr_err("kvm: failed to allocate vcpu's emulator\n");
8242                 return NULL;
8243         }
8244
8245         ctxt->vcpu = vcpu;
8246         ctxt->ops = &emulate_ops;
8247         vcpu->arch.emulate_ctxt = ctxt;
8248
8249         return ctxt;
8250 }
8251
8252 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
8253 {
8254         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8255         int cs_db, cs_l;
8256
8257         static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
8258
8259         ctxt->gpa_available = false;
8260         ctxt->eflags = kvm_get_rflags(vcpu);
8261         ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
8262
8263         ctxt->eip = kvm_rip_read(vcpu);
8264         ctxt->mode = (!is_protmode(vcpu))               ? X86EMUL_MODE_REAL :
8265                      (ctxt->eflags & X86_EFLAGS_VM)     ? X86EMUL_MODE_VM86 :
8266                      (cs_l && is_long_mode(vcpu))       ? X86EMUL_MODE_PROT64 :
8267                      cs_db                              ? X86EMUL_MODE_PROT32 :
8268                                                           X86EMUL_MODE_PROT16;
8269         BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
8270         BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
8271         BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
8272
8273         ctxt->interruptibility = 0;
8274         ctxt->have_exception = false;
8275         ctxt->exception.vector = -1;
8276         ctxt->perm_ok = false;
8277
8278         init_decode_cache(ctxt);
8279         vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
8280 }
8281
8282 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
8283 {
8284         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8285         int ret;
8286
8287         init_emulate_ctxt(vcpu);
8288
8289         ctxt->op_bytes = 2;
8290         ctxt->ad_bytes = 2;
8291         ctxt->_eip = ctxt->eip + inc_eip;
8292         ret = emulate_int_real(ctxt, irq);
8293
8294         if (ret != X86EMUL_CONTINUE) {
8295                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
8296         } else {
8297                 ctxt->eip = ctxt->_eip;
8298                 kvm_rip_write(vcpu, ctxt->eip);
8299                 kvm_set_rflags(vcpu, ctxt->eflags);
8300         }
8301 }
8302 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
8303
8304 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
8305                                            u8 ndata, u8 *insn_bytes, u8 insn_size)
8306 {
8307         struct kvm_run *run = vcpu->run;
8308         u64 info[5];
8309         u8 info_start;
8310
8311         /*
8312          * Zero the whole array used to retrieve the exit info, as casting to
8313          * u32 for select entries will leave some chunks uninitialized.
8314          */
8315         memset(&info, 0, sizeof(info));
8316
8317         static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1],
8318                                            &info[2], (u32 *)&info[3],
8319                                            (u32 *)&info[4]);
8320
8321         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
8322         run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION;
8323
8324         /*
8325          * There's currently space for 13 entries, but 5 are used for the exit
8326          * reason and info.  Restrict to 4 to reduce the maintenance burden
8327          * when expanding kvm_run.emulation_failure in the future.
8328          */
8329         if (WARN_ON_ONCE(ndata > 4))
8330                 ndata = 4;
8331
8332         /* Always include the flags as a 'data' entry. */
8333         info_start = 1;
8334         run->emulation_failure.flags = 0;
8335
8336         if (insn_size) {
8337                 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) +
8338                               sizeof(run->emulation_failure.insn_bytes) != 16));
8339                 info_start += 2;
8340                 run->emulation_failure.flags |=
8341                         KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES;
8342                 run->emulation_failure.insn_size = insn_size;
8343                 memset(run->emulation_failure.insn_bytes, 0x90,
8344                        sizeof(run->emulation_failure.insn_bytes));
8345                 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size);
8346         }
8347
8348         memcpy(&run->internal.data[info_start], info, sizeof(info));
8349         memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data,
8350                ndata * sizeof(data[0]));
8351
8352         run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata;
8353 }
8354
8355 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu)
8356 {
8357         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8358
8359         prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data,
8360                                        ctxt->fetch.end - ctxt->fetch.data);
8361 }
8362
8363 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
8364                                           u8 ndata)
8365 {
8366         prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0);
8367 }
8368 EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit);
8369
8370 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
8371 {
8372         __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0);
8373 }
8374 EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit);
8375
8376 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
8377 {
8378         struct kvm *kvm = vcpu->kvm;
8379
8380         ++vcpu->stat.insn_emulation_fail;
8381         trace_kvm_emulate_insn_failed(vcpu);
8382
8383         if (emulation_type & EMULTYPE_VMWARE_GP) {
8384                 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
8385                 return 1;
8386         }
8387
8388         if (kvm->arch.exit_on_emulation_error ||
8389             (emulation_type & EMULTYPE_SKIP)) {
8390                 prepare_emulation_ctxt_failure_exit(vcpu);
8391                 return 0;
8392         }
8393
8394         kvm_queue_exception(vcpu, UD_VECTOR);
8395
8396         if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) {
8397                 prepare_emulation_ctxt_failure_exit(vcpu);
8398                 return 0;
8399         }
8400
8401         return 1;
8402 }
8403
8404 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
8405                                   bool write_fault_to_shadow_pgtable,
8406                                   int emulation_type)
8407 {
8408         gpa_t gpa = cr2_or_gpa;
8409         kvm_pfn_t pfn;
8410
8411         if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
8412                 return false;
8413
8414         if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
8415             WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
8416                 return false;
8417
8418         if (!vcpu->arch.mmu->root_role.direct) {
8419                 /*
8420                  * Write permission should be allowed since only
8421                  * write access need to be emulated.
8422                  */
8423                 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
8424
8425                 /*
8426                  * If the mapping is invalid in guest, let cpu retry
8427                  * it to generate fault.
8428                  */
8429                 if (gpa == INVALID_GPA)
8430                         return true;
8431         }
8432
8433         /*
8434          * Do not retry the unhandleable instruction if it faults on the
8435          * readonly host memory, otherwise it will goto a infinite loop:
8436          * retry instruction -> write #PF -> emulation fail -> retry
8437          * instruction -> ...
8438          */
8439         pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
8440
8441         /*
8442          * If the instruction failed on the error pfn, it can not be fixed,
8443          * report the error to userspace.
8444          */
8445         if (is_error_noslot_pfn(pfn))
8446                 return false;
8447
8448         kvm_release_pfn_clean(pfn);
8449
8450         /* The instructions are well-emulated on direct mmu. */
8451         if (vcpu->arch.mmu->root_role.direct) {
8452                 unsigned int indirect_shadow_pages;
8453
8454                 write_lock(&vcpu->kvm->mmu_lock);
8455                 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
8456                 write_unlock(&vcpu->kvm->mmu_lock);
8457
8458                 if (indirect_shadow_pages)
8459                         kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
8460
8461                 return true;
8462         }
8463
8464         /*
8465          * if emulation was due to access to shadowed page table
8466          * and it failed try to unshadow page and re-enter the
8467          * guest to let CPU execute the instruction.
8468          */
8469         kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
8470
8471         /*
8472          * If the access faults on its page table, it can not
8473          * be fixed by unprotecting shadow page and it should
8474          * be reported to userspace.
8475          */
8476         return !write_fault_to_shadow_pgtable;
8477 }
8478
8479 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
8480                               gpa_t cr2_or_gpa,  int emulation_type)
8481 {
8482         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8483         unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
8484
8485         last_retry_eip = vcpu->arch.last_retry_eip;
8486         last_retry_addr = vcpu->arch.last_retry_addr;
8487
8488         /*
8489          * If the emulation is caused by #PF and it is non-page_table
8490          * writing instruction, it means the VM-EXIT is caused by shadow
8491          * page protected, we can zap the shadow page and retry this
8492          * instruction directly.
8493          *
8494          * Note: if the guest uses a non-page-table modifying instruction
8495          * on the PDE that points to the instruction, then we will unmap
8496          * the instruction and go to an infinite loop. So, we cache the
8497          * last retried eip and the last fault address, if we meet the eip
8498          * and the address again, we can break out of the potential infinite
8499          * loop.
8500          */
8501         vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
8502
8503         if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
8504                 return false;
8505
8506         if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
8507             WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
8508                 return false;
8509
8510         if (x86_page_table_writing_insn(ctxt))
8511                 return false;
8512
8513         if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
8514                 return false;
8515
8516         vcpu->arch.last_retry_eip = ctxt->eip;
8517         vcpu->arch.last_retry_addr = cr2_or_gpa;
8518
8519         if (!vcpu->arch.mmu->root_role.direct)
8520                 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
8521
8522         kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
8523
8524         return true;
8525 }
8526
8527 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
8528 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
8529
8530 static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
8531 {
8532         trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
8533
8534         if (entering_smm) {
8535                 vcpu->arch.hflags |= HF_SMM_MASK;
8536         } else {
8537                 vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
8538
8539                 /* Process a latched INIT or SMI, if any.  */
8540                 kvm_make_request(KVM_REQ_EVENT, vcpu);
8541
8542                 /*
8543                  * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
8544                  * on SMM exit we still need to reload them from
8545                  * guest memory
8546                  */
8547                 vcpu->arch.pdptrs_from_userspace = false;
8548         }
8549
8550         kvm_mmu_reset_context(vcpu);
8551 }
8552
8553 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
8554                                 unsigned long *db)
8555 {
8556         u32 dr6 = 0;
8557         int i;
8558         u32 enable, rwlen;
8559
8560         enable = dr7;
8561         rwlen = dr7 >> 16;
8562         for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
8563                 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
8564                         dr6 |= (1 << i);
8565         return dr6;
8566 }
8567
8568 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
8569 {
8570         struct kvm_run *kvm_run = vcpu->run;
8571
8572         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
8573                 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW;
8574                 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
8575                 kvm_run->debug.arch.exception = DB_VECTOR;
8576                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
8577                 return 0;
8578         }
8579         kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
8580         return 1;
8581 }
8582
8583 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
8584 {
8585         unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
8586         int r;
8587
8588         r = static_call(kvm_x86_skip_emulated_instruction)(vcpu);
8589         if (unlikely(!r))
8590                 return 0;
8591
8592         kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
8593
8594         /*
8595          * rflags is the old, "raw" value of the flags.  The new value has
8596          * not been saved yet.
8597          *
8598          * This is correct even for TF set by the guest, because "the
8599          * processor will not generate this exception after the instruction
8600          * that sets the TF flag".
8601          */
8602         if (unlikely(rflags & X86_EFLAGS_TF))
8603                 r = kvm_vcpu_do_singlestep(vcpu);
8604         return r;
8605 }
8606 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
8607
8608 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu)
8609 {
8610         u32 shadow;
8611
8612         if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF)
8613                 return true;
8614
8615         /*
8616          * Intel CPUs inhibit code #DBs when MOV/POP SS blocking is active,
8617          * but AMD CPUs do not.  MOV/POP SS blocking is rare, check that first
8618          * to avoid the relatively expensive CPUID lookup.
8619          */
8620         shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
8621         return (shadow & KVM_X86_SHADOW_INT_MOV_SS) &&
8622                guest_cpuid_is_intel(vcpu);
8623 }
8624
8625 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu,
8626                                            int emulation_type, int *r)
8627 {
8628         WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE);
8629
8630         /*
8631          * Do not check for code breakpoints if hardware has already done the
8632          * checks, as inferred from the emulation type.  On NO_DECODE and SKIP,
8633          * the instruction has passed all exception checks, and all intercepted
8634          * exceptions that trigger emulation have lower priority than code
8635          * breakpoints, i.e. the fact that the intercepted exception occurred
8636          * means any code breakpoints have already been serviced.
8637          *
8638          * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as
8639          * hardware has checked the RIP of the magic prefix, but not the RIP of
8640          * the instruction being emulated.  The intent of forced emulation is
8641          * to behave as if KVM intercepted the instruction without an exception
8642          * and without a prefix.
8643          */
8644         if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
8645                               EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF))
8646                 return false;
8647
8648         if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
8649             (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
8650                 struct kvm_run *kvm_run = vcpu->run;
8651                 unsigned long eip = kvm_get_linear_rip(vcpu);
8652                 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
8653                                            vcpu->arch.guest_debug_dr7,
8654                                            vcpu->arch.eff_db);
8655
8656                 if (dr6 != 0) {
8657                         kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
8658                         kvm_run->debug.arch.pc = eip;
8659                         kvm_run->debug.arch.exception = DB_VECTOR;
8660                         kvm_run->exit_reason = KVM_EXIT_DEBUG;
8661                         *r = 0;
8662                         return true;
8663                 }
8664         }
8665
8666         if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
8667             !kvm_is_code_breakpoint_inhibited(vcpu)) {
8668                 unsigned long eip = kvm_get_linear_rip(vcpu);
8669                 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
8670                                            vcpu->arch.dr7,
8671                                            vcpu->arch.db);
8672
8673                 if (dr6 != 0) {
8674                         kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
8675                         *r = 1;
8676                         return true;
8677                 }
8678         }
8679
8680         return false;
8681 }
8682
8683 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
8684 {
8685         switch (ctxt->opcode_len) {
8686         case 1:
8687                 switch (ctxt->b) {
8688                 case 0xe4:      /* IN */
8689                 case 0xe5:
8690                 case 0xec:
8691                 case 0xed:
8692                 case 0xe6:      /* OUT */
8693                 case 0xe7:
8694                 case 0xee:
8695                 case 0xef:
8696                 case 0x6c:      /* INS */
8697                 case 0x6d:
8698                 case 0x6e:      /* OUTS */
8699                 case 0x6f:
8700                         return true;
8701                 }
8702                 break;
8703         case 2:
8704                 switch (ctxt->b) {
8705                 case 0x33:      /* RDPMC */
8706                         return true;
8707                 }
8708                 break;
8709         }
8710
8711         return false;
8712 }
8713
8714 /*
8715  * Decode an instruction for emulation.  The caller is responsible for handling
8716  * code breakpoints.  Note, manually detecting code breakpoints is unnecessary
8717  * (and wrong) when emulating on an intercepted fault-like exception[*], as
8718  * code breakpoints have higher priority and thus have already been done by
8719  * hardware.
8720  *
8721  * [*] Except #MC, which is higher priority, but KVM should never emulate in
8722  *     response to a machine check.
8723  */
8724 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
8725                                     void *insn, int insn_len)
8726 {
8727         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8728         int r;
8729
8730         init_emulate_ctxt(vcpu);
8731
8732         r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
8733
8734         trace_kvm_emulate_insn_start(vcpu);
8735         ++vcpu->stat.insn_emulation;
8736
8737         return r;
8738 }
8739 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction);
8740
8741 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
8742                             int emulation_type, void *insn, int insn_len)
8743 {
8744         int r;
8745         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8746         bool writeback = true;
8747         bool write_fault_to_spt;
8748
8749         if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len)))
8750                 return 1;
8751
8752         vcpu->arch.l1tf_flush_l1d = true;
8753
8754         /*
8755          * Clear write_fault_to_shadow_pgtable here to ensure it is
8756          * never reused.
8757          */
8758         write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
8759         vcpu->arch.write_fault_to_shadow_pgtable = false;
8760
8761         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
8762                 kvm_clear_exception_queue(vcpu);
8763
8764                 /*
8765                  * Return immediately if RIP hits a code breakpoint, such #DBs
8766                  * are fault-like and are higher priority than any faults on
8767                  * the code fetch itself.
8768                  */
8769                 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r))
8770                         return r;
8771
8772                 r = x86_decode_emulated_instruction(vcpu, emulation_type,
8773                                                     insn, insn_len);
8774                 if (r != EMULATION_OK)  {
8775                         if ((emulation_type & EMULTYPE_TRAP_UD) ||
8776                             (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
8777                                 kvm_queue_exception(vcpu, UD_VECTOR);
8778                                 return 1;
8779                         }
8780                         if (reexecute_instruction(vcpu, cr2_or_gpa,
8781                                                   write_fault_to_spt,
8782                                                   emulation_type))
8783                                 return 1;
8784                         if (ctxt->have_exception) {
8785                                 /*
8786                                  * #UD should result in just EMULATION_FAILED, and trap-like
8787                                  * exception should not be encountered during decode.
8788                                  */
8789                                 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
8790                                              exception_type(ctxt->exception.vector) == EXCPT_TRAP);
8791                                 inject_emulated_exception(vcpu);
8792                                 return 1;
8793                         }
8794                         return handle_emulation_failure(vcpu, emulation_type);
8795                 }
8796         }
8797
8798         if ((emulation_type & EMULTYPE_VMWARE_GP) &&
8799             !is_vmware_backdoor_opcode(ctxt)) {
8800                 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
8801                 return 1;
8802         }
8803
8804         /*
8805          * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for
8806          * use *only* by vendor callbacks for kvm_skip_emulated_instruction().
8807          * The caller is responsible for updating interruptibility state and
8808          * injecting single-step #DBs.
8809          */
8810         if (emulation_type & EMULTYPE_SKIP) {
8811                 if (ctxt->mode != X86EMUL_MODE_PROT64)
8812                         ctxt->eip = (u32)ctxt->_eip;
8813                 else
8814                         ctxt->eip = ctxt->_eip;
8815
8816                 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) {
8817                         r = 1;
8818                         goto writeback;
8819                 }
8820
8821                 kvm_rip_write(vcpu, ctxt->eip);
8822                 if (ctxt->eflags & X86_EFLAGS_RF)
8823                         kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
8824                 return 1;
8825         }
8826
8827         if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
8828                 return 1;
8829
8830         /* this is needed for vmware backdoor interface to work since it
8831            changes registers values  during IO operation */
8832         if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
8833                 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
8834                 emulator_invalidate_register_cache(ctxt);
8835         }
8836
8837 restart:
8838         if (emulation_type & EMULTYPE_PF) {
8839                 /* Save the faulting GPA (cr2) in the address field */
8840                 ctxt->exception.address = cr2_or_gpa;
8841
8842                 /* With shadow page tables, cr2 contains a GVA or nGPA. */
8843                 if (vcpu->arch.mmu->root_role.direct) {
8844                         ctxt->gpa_available = true;
8845                         ctxt->gpa_val = cr2_or_gpa;
8846                 }
8847         } else {
8848                 /* Sanitize the address out of an abundance of paranoia. */
8849                 ctxt->exception.address = 0;
8850         }
8851
8852         r = x86_emulate_insn(ctxt);
8853
8854         if (r == EMULATION_INTERCEPTED)
8855                 return 1;
8856
8857         if (r == EMULATION_FAILED) {
8858                 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt,
8859                                         emulation_type))
8860                         return 1;
8861
8862                 return handle_emulation_failure(vcpu, emulation_type);
8863         }
8864
8865         if (ctxt->have_exception) {
8866                 r = 1;
8867                 inject_emulated_exception(vcpu);
8868         } else if (vcpu->arch.pio.count) {
8869                 if (!vcpu->arch.pio.in) {
8870                         /* FIXME: return into emulator if single-stepping.  */
8871                         vcpu->arch.pio.count = 0;
8872                 } else {
8873                         writeback = false;
8874                         vcpu->arch.complete_userspace_io = complete_emulated_pio;
8875                 }
8876                 r = 0;
8877         } else if (vcpu->mmio_needed) {
8878                 ++vcpu->stat.mmio_exits;
8879
8880                 if (!vcpu->mmio_is_write)
8881                         writeback = false;
8882                 r = 0;
8883                 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
8884         } else if (vcpu->arch.complete_userspace_io) {
8885                 writeback = false;
8886                 r = 0;
8887         } else if (r == EMULATION_RESTART)
8888                 goto restart;
8889         else
8890                 r = 1;
8891
8892 writeback:
8893         if (writeback) {
8894                 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
8895                 toggle_interruptibility(vcpu, ctxt->interruptibility);
8896                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
8897
8898                 /*
8899                  * Note, EXCPT_DB is assumed to be fault-like as the emulator
8900                  * only supports code breakpoints and general detect #DB, both
8901                  * of which are fault-like.
8902                  */
8903                 if (!ctxt->have_exception ||
8904                     exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
8905                         kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
8906                         if (ctxt->is_branch)
8907                                 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
8908                         kvm_rip_write(vcpu, ctxt->eip);
8909                         if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
8910                                 r = kvm_vcpu_do_singlestep(vcpu);
8911                         static_call_cond(kvm_x86_update_emulated_instruction)(vcpu);
8912                         __kvm_set_rflags(vcpu, ctxt->eflags);
8913                 }
8914
8915                 /*
8916                  * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
8917                  * do nothing, and it will be requested again as soon as
8918                  * the shadow expires.  But we still need to check here,
8919                  * because POPF has no interrupt shadow.
8920                  */
8921                 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
8922                         kvm_make_request(KVM_REQ_EVENT, vcpu);
8923         } else
8924                 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
8925
8926         return r;
8927 }
8928
8929 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
8930 {
8931         return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
8932 }
8933 EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
8934
8935 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
8936                                         void *insn, int insn_len)
8937 {
8938         return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
8939 }
8940 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
8941
8942 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
8943 {
8944         vcpu->arch.pio.count = 0;
8945         return 1;
8946 }
8947
8948 static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
8949 {
8950         vcpu->arch.pio.count = 0;
8951
8952         if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
8953                 return 1;
8954
8955         return kvm_skip_emulated_instruction(vcpu);
8956 }
8957
8958 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
8959                             unsigned short port)
8960 {
8961         unsigned long val = kvm_rax_read(vcpu);
8962         int ret = emulator_pio_out(vcpu, size, port, &val, 1);
8963
8964         if (ret)
8965                 return ret;
8966
8967         /*
8968          * Workaround userspace that relies on old KVM behavior of %rip being
8969          * incremented prior to exiting to userspace to handle "OUT 0x7e".
8970          */
8971         if (port == 0x7e &&
8972             kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
8973                 vcpu->arch.complete_userspace_io =
8974                         complete_fast_pio_out_port_0x7e;
8975                 kvm_skip_emulated_instruction(vcpu);
8976         } else {
8977                 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
8978                 vcpu->arch.complete_userspace_io = complete_fast_pio_out;
8979         }
8980         return 0;
8981 }
8982
8983 static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
8984 {
8985         unsigned long val;
8986
8987         /* We should only ever be called with arch.pio.count equal to 1 */
8988         BUG_ON(vcpu->arch.pio.count != 1);
8989
8990         if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
8991                 vcpu->arch.pio.count = 0;
8992                 return 1;
8993         }
8994
8995         /* For size less than 4 we merge, else we zero extend */
8996         val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
8997
8998         complete_emulator_pio_in(vcpu, &val);
8999         kvm_rax_write(vcpu, val);
9000
9001         return kvm_skip_emulated_instruction(vcpu);
9002 }
9003
9004 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
9005                            unsigned short port)
9006 {
9007         unsigned long val;
9008         int ret;
9009
9010         /* For size less than 4 we merge, else we zero extend */
9011         val = (size < 4) ? kvm_rax_read(vcpu) : 0;
9012
9013         ret = emulator_pio_in(vcpu, size, port, &val, 1);
9014         if (ret) {
9015                 kvm_rax_write(vcpu, val);
9016                 return ret;
9017         }
9018
9019         vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
9020         vcpu->arch.complete_userspace_io = complete_fast_pio_in;
9021
9022         return 0;
9023 }
9024
9025 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
9026 {
9027         int ret;
9028
9029         if (in)
9030                 ret = kvm_fast_pio_in(vcpu, size, port);
9031         else
9032                 ret = kvm_fast_pio_out(vcpu, size, port);
9033         return ret && kvm_skip_emulated_instruction(vcpu);
9034 }
9035 EXPORT_SYMBOL_GPL(kvm_fast_pio);
9036
9037 static int kvmclock_cpu_down_prep(unsigned int cpu)
9038 {
9039         __this_cpu_write(cpu_tsc_khz, 0);
9040         return 0;
9041 }
9042
9043 static void tsc_khz_changed(void *data)
9044 {
9045         struct cpufreq_freqs *freq = data;
9046         unsigned long khz = 0;
9047
9048         if (data)
9049                 khz = freq->new;
9050         else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
9051                 khz = cpufreq_quick_get(raw_smp_processor_id());
9052         if (!khz)
9053                 khz = tsc_khz;
9054         __this_cpu_write(cpu_tsc_khz, khz);
9055 }
9056
9057 #ifdef CONFIG_X86_64
9058 static void kvm_hyperv_tsc_notifier(void)
9059 {
9060         struct kvm *kvm;
9061         int cpu;
9062
9063         mutex_lock(&kvm_lock);
9064         list_for_each_entry(kvm, &vm_list, vm_list)
9065                 kvm_make_mclock_inprogress_request(kvm);
9066
9067         /* no guest entries from this point */
9068         hyperv_stop_tsc_emulation();
9069
9070         /* TSC frequency always matches when on Hyper-V */
9071         for_each_present_cpu(cpu)
9072                 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
9073         kvm_caps.max_guest_tsc_khz = tsc_khz;
9074
9075         list_for_each_entry(kvm, &vm_list, vm_list) {
9076                 __kvm_start_pvclock_update(kvm);
9077                 pvclock_update_vm_gtod_copy(kvm);
9078                 kvm_end_pvclock_update(kvm);
9079         }
9080
9081         mutex_unlock(&kvm_lock);
9082 }
9083 #endif
9084
9085 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
9086 {
9087         struct kvm *kvm;
9088         struct kvm_vcpu *vcpu;
9089         int send_ipi = 0;
9090         unsigned long i;
9091
9092         /*
9093          * We allow guests to temporarily run on slowing clocks,
9094          * provided we notify them after, or to run on accelerating
9095          * clocks, provided we notify them before.  Thus time never
9096          * goes backwards.
9097          *
9098          * However, we have a problem.  We can't atomically update
9099          * the frequency of a given CPU from this function; it is
9100          * merely a notifier, which can be called from any CPU.
9101          * Changing the TSC frequency at arbitrary points in time
9102          * requires a recomputation of local variables related to
9103          * the TSC for each VCPU.  We must flag these local variables
9104          * to be updated and be sure the update takes place with the
9105          * new frequency before any guests proceed.
9106          *
9107          * Unfortunately, the combination of hotplug CPU and frequency
9108          * change creates an intractable locking scenario; the order
9109          * of when these callouts happen is undefined with respect to
9110          * CPU hotplug, and they can race with each other.  As such,
9111          * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
9112          * undefined; you can actually have a CPU frequency change take
9113          * place in between the computation of X and the setting of the
9114          * variable.  To protect against this problem, all updates of
9115          * the per_cpu tsc_khz variable are done in an interrupt
9116          * protected IPI, and all callers wishing to update the value
9117          * must wait for a synchronous IPI to complete (which is trivial
9118          * if the caller is on the CPU already).  This establishes the
9119          * necessary total order on variable updates.
9120          *
9121          * Note that because a guest time update may take place
9122          * anytime after the setting of the VCPU's request bit, the
9123          * correct TSC value must be set before the request.  However,
9124          * to ensure the update actually makes it to any guest which
9125          * starts running in hardware virtualization between the set
9126          * and the acquisition of the spinlock, we must also ping the
9127          * CPU after setting the request bit.
9128          *
9129          */
9130
9131         smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9132
9133         mutex_lock(&kvm_lock);
9134         list_for_each_entry(kvm, &vm_list, vm_list) {
9135                 kvm_for_each_vcpu(i, vcpu, kvm) {
9136                         if (vcpu->cpu != cpu)
9137                                 continue;
9138                         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
9139                         if (vcpu->cpu != raw_smp_processor_id())
9140                                 send_ipi = 1;
9141                 }
9142         }
9143         mutex_unlock(&kvm_lock);
9144
9145         if (freq->old < freq->new && send_ipi) {
9146                 /*
9147                  * We upscale the frequency.  Must make the guest
9148                  * doesn't see old kvmclock values while running with
9149                  * the new frequency, otherwise we risk the guest sees
9150                  * time go backwards.
9151                  *
9152                  * In case we update the frequency for another cpu
9153                  * (which might be in guest context) send an interrupt
9154                  * to kick the cpu out of guest context.  Next time
9155                  * guest context is entered kvmclock will be updated,
9156                  * so the guest will not see stale values.
9157                  */
9158                 smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9159         }
9160 }
9161
9162 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
9163                                      void *data)
9164 {
9165         struct cpufreq_freqs *freq = data;
9166         int cpu;
9167
9168         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
9169                 return 0;
9170         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
9171                 return 0;
9172
9173         for_each_cpu(cpu, freq->policy->cpus)
9174                 __kvmclock_cpufreq_notifier(freq, cpu);
9175
9176         return 0;
9177 }
9178
9179 static struct notifier_block kvmclock_cpufreq_notifier_block = {
9180         .notifier_call  = kvmclock_cpufreq_notifier
9181 };
9182
9183 static int kvmclock_cpu_online(unsigned int cpu)
9184 {
9185         tsc_khz_changed(NULL);
9186         return 0;
9187 }
9188
9189 static void kvm_timer_init(void)
9190 {
9191         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9192                 max_tsc_khz = tsc_khz;
9193
9194                 if (IS_ENABLED(CONFIG_CPU_FREQ)) {
9195                         struct cpufreq_policy *policy;
9196                         int cpu;
9197
9198                         cpu = get_cpu();
9199                         policy = cpufreq_cpu_get(cpu);
9200                         if (policy) {
9201                                 if (policy->cpuinfo.max_freq)
9202                                         max_tsc_khz = policy->cpuinfo.max_freq;
9203                                 cpufreq_cpu_put(policy);
9204                         }
9205                         put_cpu();
9206                 }
9207                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
9208                                           CPUFREQ_TRANSITION_NOTIFIER);
9209         }
9210
9211         cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
9212                           kvmclock_cpu_online, kvmclock_cpu_down_prep);
9213 }
9214
9215 #ifdef CONFIG_X86_64
9216 static void pvclock_gtod_update_fn(struct work_struct *work)
9217 {
9218         struct kvm *kvm;
9219         struct kvm_vcpu *vcpu;
9220         unsigned long i;
9221
9222         mutex_lock(&kvm_lock);
9223         list_for_each_entry(kvm, &vm_list, vm_list)
9224                 kvm_for_each_vcpu(i, vcpu, kvm)
9225                         kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
9226         atomic_set(&kvm_guest_has_master_clock, 0);
9227         mutex_unlock(&kvm_lock);
9228 }
9229
9230 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
9231
9232 /*
9233  * Indirection to move queue_work() out of the tk_core.seq write held
9234  * region to prevent possible deadlocks against time accessors which
9235  * are invoked with work related locks held.
9236  */
9237 static void pvclock_irq_work_fn(struct irq_work *w)
9238 {
9239         queue_work(system_long_wq, &pvclock_gtod_work);
9240 }
9241
9242 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
9243
9244 /*
9245  * Notification about pvclock gtod data update.
9246  */
9247 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
9248                                void *priv)
9249 {
9250         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
9251         struct timekeeper *tk = priv;
9252
9253         update_pvclock_gtod(tk);
9254
9255         /*
9256          * Disable master clock if host does not trust, or does not use,
9257          * TSC based clocksource. Delegate queue_work() to irq_work as
9258          * this is invoked with tk_core.seq write held.
9259          */
9260         if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
9261             atomic_read(&kvm_guest_has_master_clock) != 0)
9262                 irq_work_queue(&pvclock_irq_work);
9263         return 0;
9264 }
9265
9266 static struct notifier_block pvclock_gtod_notifier = {
9267         .notifier_call = pvclock_gtod_notify,
9268 };
9269 #endif
9270
9271 int kvm_arch_init(void *opaque)
9272 {
9273         struct kvm_x86_init_ops *ops = opaque;
9274         u64 host_pat;
9275         int r;
9276
9277         if (kvm_x86_ops.hardware_enable) {
9278                 pr_err("kvm: already loaded vendor module '%s'\n", kvm_x86_ops.name);
9279                 return -EEXIST;
9280         }
9281
9282         if (!ops->cpu_has_kvm_support()) {
9283                 pr_err_ratelimited("kvm: no hardware support for '%s'\n",
9284                                    ops->runtime_ops->name);
9285                 return -EOPNOTSUPP;
9286         }
9287         if (ops->disabled_by_bios()) {
9288                 pr_err_ratelimited("kvm: support for '%s' disabled by bios\n",
9289                                    ops->runtime_ops->name);
9290                 return -EOPNOTSUPP;
9291         }
9292
9293         /*
9294          * KVM explicitly assumes that the guest has an FPU and
9295          * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
9296          * vCPU's FPU state as a fxregs_state struct.
9297          */
9298         if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) {
9299                 printk(KERN_ERR "kvm: inadequate fpu\n");
9300                 return -EOPNOTSUPP;
9301         }
9302
9303         if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9304                 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n");
9305                 return -EOPNOTSUPP;
9306         }
9307
9308         /*
9309          * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes
9310          * the PAT bits in SPTEs.  Bail if PAT[0] is programmed to something
9311          * other than WB.  Note, EPT doesn't utilize the PAT, but don't bother
9312          * with an exception.  PAT[0] is set to WB on RESET and also by the
9313          * kernel, i.e. failure indicates a kernel bug or broken firmware.
9314          */
9315         if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) ||
9316             (host_pat & GENMASK(2, 0)) != 6) {
9317                 pr_err("kvm: host PAT[0] is not WB\n");
9318                 return -EIO;
9319         }
9320
9321         x86_emulator_cache = kvm_alloc_emulator_cache();
9322         if (!x86_emulator_cache) {
9323                 pr_err("kvm: failed to allocate cache for x86 emulator\n");
9324                 return -ENOMEM;
9325         }
9326
9327         user_return_msrs = alloc_percpu(struct kvm_user_return_msrs);
9328         if (!user_return_msrs) {
9329                 printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n");
9330                 r = -ENOMEM;
9331                 goto out_free_x86_emulator_cache;
9332         }
9333         kvm_nr_uret_msrs = 0;
9334
9335         r = kvm_mmu_vendor_module_init();
9336         if (r)
9337                 goto out_free_percpu;
9338
9339         kvm_timer_init();
9340
9341         if (boot_cpu_has(X86_FEATURE_XSAVE)) {
9342                 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
9343                 kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
9344         }
9345
9346         if (pi_inject_timer == -1)
9347                 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER);
9348 #ifdef CONFIG_X86_64
9349         pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
9350
9351         if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
9352                 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
9353 #endif
9354
9355         return 0;
9356
9357 out_free_percpu:
9358         free_percpu(user_return_msrs);
9359 out_free_x86_emulator_cache:
9360         kmem_cache_destroy(x86_emulator_cache);
9361         return r;
9362 }
9363
9364 void kvm_arch_exit(void)
9365 {
9366 #ifdef CONFIG_X86_64
9367         if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
9368                 clear_hv_tscchange_cb();
9369 #endif
9370         kvm_lapic_exit();
9371
9372         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
9373                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
9374                                             CPUFREQ_TRANSITION_NOTIFIER);
9375         cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
9376 #ifdef CONFIG_X86_64
9377         pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
9378         irq_work_sync(&pvclock_irq_work);
9379         cancel_work_sync(&pvclock_gtod_work);
9380 #endif
9381         kvm_x86_ops.hardware_enable = NULL;
9382         kvm_mmu_vendor_module_exit();
9383         free_percpu(user_return_msrs);
9384         kmem_cache_destroy(x86_emulator_cache);
9385 #ifdef CONFIG_KVM_XEN
9386         static_key_deferred_flush(&kvm_xen_enabled);
9387         WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
9388 #endif
9389 }
9390
9391 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
9392 {
9393         /*
9394          * The vCPU has halted, e.g. executed HLT.  Update the run state if the
9395          * local APIC is in-kernel, the run loop will detect the non-runnable
9396          * state and halt the vCPU.  Exit to userspace if the local APIC is
9397          * managed by userspace, in which case userspace is responsible for
9398          * handling wake events.
9399          */
9400         ++vcpu->stat.halt_exits;
9401         if (lapic_in_kernel(vcpu)) {
9402                 vcpu->arch.mp_state = state;
9403                 return 1;
9404         } else {
9405                 vcpu->run->exit_reason = reason;
9406                 return 0;
9407         }
9408 }
9409
9410 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
9411 {
9412         return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
9413 }
9414 EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
9415
9416 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
9417 {
9418         int ret = kvm_skip_emulated_instruction(vcpu);
9419         /*
9420          * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
9421          * KVM_EXIT_DEBUG here.
9422          */
9423         return kvm_emulate_halt_noskip(vcpu) && ret;
9424 }
9425 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
9426
9427 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
9428 {
9429         int ret = kvm_skip_emulated_instruction(vcpu);
9430
9431         return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
9432                                         KVM_EXIT_AP_RESET_HOLD) && ret;
9433 }
9434 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
9435
9436 #ifdef CONFIG_X86_64
9437 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
9438                                 unsigned long clock_type)
9439 {
9440         struct kvm_clock_pairing clock_pairing;
9441         struct timespec64 ts;
9442         u64 cycle;
9443         int ret;
9444
9445         if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
9446                 return -KVM_EOPNOTSUPP;
9447
9448         /*
9449          * When tsc is in permanent catchup mode guests won't be able to use
9450          * pvclock_read_retry loop to get consistent view of pvclock
9451          */
9452         if (vcpu->arch.tsc_always_catchup)
9453                 return -KVM_EOPNOTSUPP;
9454
9455         if (!kvm_get_walltime_and_clockread(&ts, &cycle))
9456                 return -KVM_EOPNOTSUPP;
9457
9458         clock_pairing.sec = ts.tv_sec;
9459         clock_pairing.nsec = ts.tv_nsec;
9460         clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
9461         clock_pairing.flags = 0;
9462         memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
9463
9464         ret = 0;
9465         if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
9466                             sizeof(struct kvm_clock_pairing)))
9467                 ret = -KVM_EFAULT;
9468
9469         return ret;
9470 }
9471 #endif
9472
9473 /*
9474  * kvm_pv_kick_cpu_op:  Kick a vcpu.
9475  *
9476  * @apicid - apicid of vcpu to be kicked.
9477  */
9478 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
9479 {
9480         /*
9481          * All other fields are unused for APIC_DM_REMRD, but may be consumed by
9482          * common code, e.g. for tracing. Defer initialization to the compiler.
9483          */
9484         struct kvm_lapic_irq lapic_irq = {
9485                 .delivery_mode = APIC_DM_REMRD,
9486                 .dest_mode = APIC_DEST_PHYSICAL,
9487                 .shorthand = APIC_DEST_NOSHORT,
9488                 .dest_id = apicid,
9489         };
9490
9491         kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
9492 }
9493
9494 bool kvm_apicv_activated(struct kvm *kvm)
9495 {
9496         return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
9497 }
9498 EXPORT_SYMBOL_GPL(kvm_apicv_activated);
9499
9500 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
9501 {
9502         ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons);
9503         ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu);
9504
9505         return (vm_reasons | vcpu_reasons) == 0;
9506 }
9507 EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated);
9508
9509 static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
9510                                        enum kvm_apicv_inhibit reason, bool set)
9511 {
9512         if (set)
9513                 __set_bit(reason, inhibits);
9514         else
9515                 __clear_bit(reason, inhibits);
9516
9517         trace_kvm_apicv_inhibit_changed(reason, set, *inhibits);
9518 }
9519
9520 static void kvm_apicv_init(struct kvm *kvm)
9521 {
9522         unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons;
9523
9524         init_rwsem(&kvm->arch.apicv_update_lock);
9525
9526         set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true);
9527
9528         if (!enable_apicv)
9529                 set_or_clear_apicv_inhibit(inhibits,
9530                                            APICV_INHIBIT_REASON_DISABLE, true);
9531 }
9532
9533 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
9534 {
9535         struct kvm_vcpu *target = NULL;
9536         struct kvm_apic_map *map;
9537
9538         vcpu->stat.directed_yield_attempted++;
9539
9540         if (single_task_running())
9541                 goto no_yield;
9542
9543         rcu_read_lock();
9544         map = rcu_dereference(vcpu->kvm->arch.apic_map);
9545
9546         if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
9547                 target = map->phys_map[dest_id]->vcpu;
9548
9549         rcu_read_unlock();
9550
9551         if (!target || !READ_ONCE(target->ready))
9552                 goto no_yield;
9553
9554         /* Ignore requests to yield to self */
9555         if (vcpu == target)
9556                 goto no_yield;
9557
9558         if (kvm_vcpu_yield_to(target) <= 0)
9559                 goto no_yield;
9560
9561         vcpu->stat.directed_yield_successful++;
9562
9563 no_yield:
9564         return;
9565 }
9566
9567 static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
9568 {
9569         u64 ret = vcpu->run->hypercall.ret;
9570
9571         if (!is_64_bit_mode(vcpu))
9572                 ret = (u32)ret;
9573         kvm_rax_write(vcpu, ret);
9574         ++vcpu->stat.hypercalls;
9575         return kvm_skip_emulated_instruction(vcpu);
9576 }
9577
9578 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
9579 {
9580         unsigned long nr, a0, a1, a2, a3, ret;
9581         int op_64_bit;
9582
9583         if (kvm_xen_hypercall_enabled(vcpu->kvm))
9584                 return kvm_xen_hypercall(vcpu);
9585
9586         if (kvm_hv_hypercall_enabled(vcpu))
9587                 return kvm_hv_hypercall(vcpu);
9588
9589         nr = kvm_rax_read(vcpu);
9590         a0 = kvm_rbx_read(vcpu);
9591         a1 = kvm_rcx_read(vcpu);
9592         a2 = kvm_rdx_read(vcpu);
9593         a3 = kvm_rsi_read(vcpu);
9594
9595         trace_kvm_hypercall(nr, a0, a1, a2, a3);
9596
9597         op_64_bit = is_64_bit_hypercall(vcpu);
9598         if (!op_64_bit) {
9599                 nr &= 0xFFFFFFFF;
9600                 a0 &= 0xFFFFFFFF;
9601                 a1 &= 0xFFFFFFFF;
9602                 a2 &= 0xFFFFFFFF;
9603                 a3 &= 0xFFFFFFFF;
9604         }
9605
9606         if (static_call(kvm_x86_get_cpl)(vcpu) != 0) {
9607                 ret = -KVM_EPERM;
9608                 goto out;
9609         }
9610
9611         ret = -KVM_ENOSYS;
9612
9613         switch (nr) {
9614         case KVM_HC_VAPIC_POLL_IRQ:
9615                 ret = 0;
9616                 break;
9617         case KVM_HC_KICK_CPU:
9618                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT))
9619                         break;
9620
9621                 kvm_pv_kick_cpu_op(vcpu->kvm, a1);
9622                 kvm_sched_yield(vcpu, a1);
9623                 ret = 0;
9624                 break;
9625 #ifdef CONFIG_X86_64
9626         case KVM_HC_CLOCK_PAIRING:
9627                 ret = kvm_pv_clock_pairing(vcpu, a0, a1);
9628                 break;
9629 #endif
9630         case KVM_HC_SEND_IPI:
9631                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI))
9632                         break;
9633
9634                 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
9635                 break;
9636         case KVM_HC_SCHED_YIELD:
9637                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD))
9638                         break;
9639
9640                 kvm_sched_yield(vcpu, a0);
9641                 ret = 0;
9642                 break;
9643         case KVM_HC_MAP_GPA_RANGE: {
9644                 u64 gpa = a0, npages = a1, attrs = a2;
9645
9646                 ret = -KVM_ENOSYS;
9647                 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE)))
9648                         break;
9649
9650                 if (!PAGE_ALIGNED(gpa) || !npages ||
9651                     gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) {
9652                         ret = -KVM_EINVAL;
9653                         break;
9654                 }
9655
9656                 vcpu->run->exit_reason        = KVM_EXIT_HYPERCALL;
9657                 vcpu->run->hypercall.nr       = KVM_HC_MAP_GPA_RANGE;
9658                 vcpu->run->hypercall.args[0]  = gpa;
9659                 vcpu->run->hypercall.args[1]  = npages;
9660                 vcpu->run->hypercall.args[2]  = attrs;
9661                 vcpu->run->hypercall.longmode = op_64_bit;
9662                 vcpu->arch.complete_userspace_io = complete_hypercall_exit;
9663                 return 0;
9664         }
9665         default:
9666                 ret = -KVM_ENOSYS;
9667                 break;
9668         }
9669 out:
9670         if (!op_64_bit)
9671                 ret = (u32)ret;
9672         kvm_rax_write(vcpu, ret);
9673
9674         ++vcpu->stat.hypercalls;
9675         return kvm_skip_emulated_instruction(vcpu);
9676 }
9677 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
9678
9679 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
9680 {
9681         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
9682         char instruction[3];
9683         unsigned long rip = kvm_rip_read(vcpu);
9684
9685         /*
9686          * If the quirk is disabled, synthesize a #UD and let the guest pick up
9687          * the pieces.
9688          */
9689         if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) {
9690                 ctxt->exception.error_code_valid = false;
9691                 ctxt->exception.vector = UD_VECTOR;
9692                 ctxt->have_exception = true;
9693                 return X86EMUL_PROPAGATE_FAULT;
9694         }
9695
9696         static_call(kvm_x86_patch_hypercall)(vcpu, instruction);
9697
9698         return emulator_write_emulated(ctxt, rip, instruction, 3,
9699                 &ctxt->exception);
9700 }
9701
9702 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
9703 {
9704         return vcpu->run->request_interrupt_window &&
9705                 likely(!pic_in_kernel(vcpu->kvm));
9706 }
9707
9708 /* Called within kvm->srcu read side.  */
9709 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
9710 {
9711         struct kvm_run *kvm_run = vcpu->run;
9712
9713         kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu);
9714         kvm_run->cr8 = kvm_get_cr8(vcpu);
9715         kvm_run->apic_base = kvm_get_apic_base(vcpu);
9716
9717         kvm_run->ready_for_interrupt_injection =
9718                 pic_in_kernel(vcpu->kvm) ||
9719                 kvm_vcpu_ready_for_interrupt_injection(vcpu);
9720
9721         if (is_smm(vcpu))
9722                 kvm_run->flags |= KVM_RUN_X86_SMM;
9723 }
9724
9725 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
9726 {
9727         int max_irr, tpr;
9728
9729         if (!kvm_x86_ops.update_cr8_intercept)
9730                 return;
9731
9732         if (!lapic_in_kernel(vcpu))
9733                 return;
9734
9735         if (vcpu->arch.apic->apicv_active)
9736                 return;
9737
9738         if (!vcpu->arch.apic->vapic_addr)
9739                 max_irr = kvm_lapic_find_highest_irr(vcpu);
9740         else
9741                 max_irr = -1;
9742
9743         if (max_irr != -1)
9744                 max_irr >>= 4;
9745
9746         tpr = kvm_lapic_get_cr8(vcpu);
9747
9748         static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr);
9749 }
9750
9751
9752 int kvm_check_nested_events(struct kvm_vcpu *vcpu)
9753 {
9754         if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
9755                 kvm_x86_ops.nested_ops->triple_fault(vcpu);
9756                 return 1;
9757         }
9758
9759         return kvm_x86_ops.nested_ops->check_events(vcpu);
9760 }
9761
9762 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
9763 {
9764         trace_kvm_inj_exception(vcpu->arch.exception.vector,
9765                                 vcpu->arch.exception.has_error_code,
9766                                 vcpu->arch.exception.error_code,
9767                                 vcpu->arch.exception.injected);
9768
9769         if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
9770                 vcpu->arch.exception.error_code = false;
9771         static_call(kvm_x86_inject_exception)(vcpu);
9772 }
9773
9774 /*
9775  * Check for any event (interrupt or exception) that is ready to be injected,
9776  * and if there is at least one event, inject the event with the highest
9777  * priority.  This handles both "pending" events, i.e. events that have never
9778  * been injected into the guest, and "injected" events, i.e. events that were
9779  * injected as part of a previous VM-Enter, but weren't successfully delivered
9780  * and need to be re-injected.
9781  *
9782  * Note, this is not guaranteed to be invoked on a guest instruction boundary,
9783  * i.e. doesn't guarantee that there's an event window in the guest.  KVM must
9784  * be able to inject exceptions in the "middle" of an instruction, and so must
9785  * also be able to re-inject NMIs and IRQs in the middle of an instruction.
9786  * I.e. for exceptions and re-injected events, NOT invoking this on instruction
9787  * boundaries is necessary and correct.
9788  *
9789  * For simplicity, KVM uses a single path to inject all events (except events
9790  * that are injected directly from L1 to L2) and doesn't explicitly track
9791  * instruction boundaries for asynchronous events.  However, because VM-Exits
9792  * that can occur during instruction execution typically result in KVM skipping
9793  * the instruction or injecting an exception, e.g. instruction and exception
9794  * intercepts, and because pending exceptions have higher priority than pending
9795  * interrupts, KVM still honors instruction boundaries in most scenarios.
9796  *
9797  * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
9798  * the instruction or inject an exception, then KVM can incorrecty inject a new
9799  * asynchrounous event if the event became pending after the CPU fetched the
9800  * instruction (in the guest).  E.g. if a page fault (#PF, #NPF, EPT violation)
9801  * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be
9802  * injected on the restarted instruction instead of being deferred until the
9803  * instruction completes.
9804  *
9805  * In practice, this virtualization hole is unlikely to be observed by the
9806  * guest, and even less likely to cause functional problems.  To detect the
9807  * hole, the guest would have to trigger an event on a side effect of an early
9808  * phase of instruction execution, e.g. on the instruction fetch from memory.
9809  * And for it to be a functional problem, the guest would need to depend on the
9810  * ordering between that side effect, the instruction completing, _and_ the
9811  * delivery of the asynchronous event.
9812  */
9813 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
9814                                        bool *req_immediate_exit)
9815 {
9816         bool can_inject;
9817         int r;
9818
9819         /*
9820          * Process nested events first, as nested VM-Exit supercedes event
9821          * re-injection.  If there's an event queued for re-injection, it will
9822          * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit.
9823          */
9824         if (is_guest_mode(vcpu))
9825                 r = kvm_check_nested_events(vcpu);
9826         else
9827                 r = 0;
9828
9829         /*
9830          * Re-inject exceptions and events *especially* if immediate entry+exit
9831          * to/from L2 is needed, as any event that has already been injected
9832          * into L2 needs to complete its lifecycle before injecting a new event.
9833          *
9834          * Don't re-inject an NMI or interrupt if there is a pending exception.
9835          * This collision arises if an exception occurred while vectoring the
9836          * injected event, KVM intercepted said exception, and KVM ultimately
9837          * determined the fault belongs to the guest and queues the exception
9838          * for injection back into the guest.
9839          *
9840          * "Injected" interrupts can also collide with pending exceptions if
9841          * userspace ignores the "ready for injection" flag and blindly queues
9842          * an interrupt.  In that case, prioritizing the exception is correct,
9843          * as the exception "occurred" before the exit to userspace.  Trap-like
9844          * exceptions, e.g. most #DBs, have higher priority than interrupts.
9845          * And while fault-like exceptions, e.g. #GP and #PF, are the lowest
9846          * priority, they're only generated (pended) during instruction
9847          * execution, and interrupts are recognized at instruction boundaries.
9848          * Thus a pending fault-like exception means the fault occurred on the
9849          * *previous* instruction and must be serviced prior to recognizing any
9850          * new events in order to fully complete the previous instruction.
9851          */
9852         if (vcpu->arch.exception.injected)
9853                 kvm_inject_exception(vcpu);
9854         else if (kvm_is_exception_pending(vcpu))
9855                 ; /* see above */
9856         else if (vcpu->arch.nmi_injected)
9857                 static_call(kvm_x86_inject_nmi)(vcpu);
9858         else if (vcpu->arch.interrupt.injected)
9859                 static_call(kvm_x86_inject_irq)(vcpu, true);
9860
9861         /*
9862          * Exceptions that morph to VM-Exits are handled above, and pending
9863          * exceptions on top of injected exceptions that do not VM-Exit should
9864          * either morph to #DF or, sadly, override the injected exception.
9865          */
9866         WARN_ON_ONCE(vcpu->arch.exception.injected &&
9867                      vcpu->arch.exception.pending);
9868
9869         /*
9870          * Bail if immediate entry+exit to/from the guest is needed to complete
9871          * nested VM-Enter or event re-injection so that a different pending
9872          * event can be serviced (or if KVM needs to exit to userspace).
9873          *
9874          * Otherwise, continue processing events even if VM-Exit occurred.  The
9875          * VM-Exit will have cleared exceptions that were meant for L2, but
9876          * there may now be events that can be injected into L1.
9877          */
9878         if (r < 0)
9879                 goto out;
9880
9881         /*
9882          * A pending exception VM-Exit should either result in nested VM-Exit
9883          * or force an immediate re-entry and exit to/from L2, and exception
9884          * VM-Exits cannot be injected (flag should _never_ be set).
9885          */
9886         WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected ||
9887                      vcpu->arch.exception_vmexit.pending);
9888
9889         /*
9890          * New events, other than exceptions, cannot be injected if KVM needs
9891          * to re-inject a previous event.  See above comments on re-injecting
9892          * for why pending exceptions get priority.
9893          */
9894         can_inject = !kvm_event_needs_reinjection(vcpu);
9895
9896         if (vcpu->arch.exception.pending) {
9897                 /*
9898                  * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
9899                  * value pushed on the stack.  Trap-like exception and all #DBs
9900                  * leave RF as-is (KVM follows Intel's behavior in this regard;
9901                  * AMD states that code breakpoint #DBs excplitly clear RF=0).
9902                  *
9903                  * Note, most versions of Intel's SDM and AMD's APM incorrectly
9904                  * describe the behavior of General Detect #DBs, which are
9905                  * fault-like.  They do _not_ set RF, a la code breakpoints.
9906                  */
9907                 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT)
9908                         __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
9909                                              X86_EFLAGS_RF);
9910
9911                 if (vcpu->arch.exception.vector == DB_VECTOR) {
9912                         kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception);
9913                         if (vcpu->arch.dr7 & DR7_GD) {
9914                                 vcpu->arch.dr7 &= ~DR7_GD;
9915                                 kvm_update_dr7(vcpu);
9916                         }
9917                 }
9918
9919                 kvm_inject_exception(vcpu);
9920
9921                 vcpu->arch.exception.pending = false;
9922                 vcpu->arch.exception.injected = true;
9923
9924                 can_inject = false;
9925         }
9926
9927         /* Don't inject interrupts if the user asked to avoid doing so */
9928         if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ)
9929                 return 0;
9930
9931         /*
9932          * Finally, inject interrupt events.  If an event cannot be injected
9933          * due to architectural conditions (e.g. IF=0) a window-open exit
9934          * will re-request KVM_REQ_EVENT.  Sometimes however an event is pending
9935          * and can architecturally be injected, but we cannot do it right now:
9936          * an interrupt could have arrived just now and we have to inject it
9937          * as a vmexit, or there could already an event in the queue, which is
9938          * indicated by can_inject.  In that case we request an immediate exit
9939          * in order to make progress and get back here for another iteration.
9940          * The kvm_x86_ops hooks communicate this by returning -EBUSY.
9941          */
9942         if (vcpu->arch.smi_pending) {
9943                 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
9944                 if (r < 0)
9945                         goto out;
9946                 if (r) {
9947                         vcpu->arch.smi_pending = false;
9948                         ++vcpu->arch.smi_count;
9949                         enter_smm(vcpu);
9950                         can_inject = false;
9951                 } else
9952                         static_call(kvm_x86_enable_smi_window)(vcpu);
9953         }
9954
9955         if (vcpu->arch.nmi_pending) {
9956                 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
9957                 if (r < 0)
9958                         goto out;
9959                 if (r) {
9960                         --vcpu->arch.nmi_pending;
9961                         vcpu->arch.nmi_injected = true;
9962                         static_call(kvm_x86_inject_nmi)(vcpu);
9963                         can_inject = false;
9964                         WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0);
9965                 }
9966                 if (vcpu->arch.nmi_pending)
9967                         static_call(kvm_x86_enable_nmi_window)(vcpu);
9968         }
9969
9970         if (kvm_cpu_has_injectable_intr(vcpu)) {
9971                 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY;
9972                 if (r < 0)
9973                         goto out;
9974                 if (r) {
9975                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false);
9976                         static_call(kvm_x86_inject_irq)(vcpu, false);
9977                         WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0);
9978                 }
9979                 if (kvm_cpu_has_injectable_intr(vcpu))
9980                         static_call(kvm_x86_enable_irq_window)(vcpu);
9981         }
9982
9983         if (is_guest_mode(vcpu) &&
9984             kvm_x86_ops.nested_ops->has_events &&
9985             kvm_x86_ops.nested_ops->has_events(vcpu))
9986                 *req_immediate_exit = true;
9987
9988         WARN_ON(kvm_is_exception_pending(vcpu));
9989         return 0;
9990
9991 out:
9992         if (r == -EBUSY) {
9993                 *req_immediate_exit = true;
9994                 r = 0;
9995         }
9996         return r;
9997 }
9998
9999 static void process_nmi(struct kvm_vcpu *vcpu)
10000 {
10001         unsigned limit = 2;
10002
10003         /*
10004          * x86 is limited to one NMI running, and one NMI pending after it.
10005          * If an NMI is already in progress, limit further NMIs to just one.
10006          * Otherwise, allow two (and we'll inject the first one immediately).
10007          */
10008         if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected)
10009                 limit = 1;
10010
10011         vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
10012         vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
10013         kvm_make_request(KVM_REQ_EVENT, vcpu);
10014 }
10015
10016 static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
10017 {
10018         u32 flags = 0;
10019         flags |= seg->g       << 23;
10020         flags |= seg->db      << 22;
10021         flags |= seg->l       << 21;
10022         flags |= seg->avl     << 20;
10023         flags |= seg->present << 15;
10024         flags |= seg->dpl     << 13;
10025         flags |= seg->s       << 12;
10026         flags |= seg->type    << 8;
10027         return flags;
10028 }
10029
10030 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
10031 {
10032         struct kvm_segment seg;
10033         int offset;
10034
10035         kvm_get_segment(vcpu, &seg, n);
10036         put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
10037
10038         if (n < 3)
10039                 offset = 0x7f84 + n * 12;
10040         else
10041                 offset = 0x7f2c + (n - 3) * 12;
10042
10043         put_smstate(u32, buf, offset + 8, seg.base);
10044         put_smstate(u32, buf, offset + 4, seg.limit);
10045         put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
10046 }
10047
10048 #ifdef CONFIG_X86_64
10049 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
10050 {
10051         struct kvm_segment seg;
10052         int offset;
10053         u16 flags;
10054
10055         kvm_get_segment(vcpu, &seg, n);
10056         offset = 0x7e00 + n * 16;
10057
10058         flags = enter_smm_get_segment_flags(&seg) >> 8;
10059         put_smstate(u16, buf, offset, seg.selector);
10060         put_smstate(u16, buf, offset + 2, flags);
10061         put_smstate(u32, buf, offset + 4, seg.limit);
10062         put_smstate(u64, buf, offset + 8, seg.base);
10063 }
10064 #endif
10065
10066 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
10067 {
10068         struct desc_ptr dt;
10069         struct kvm_segment seg;
10070         unsigned long val;
10071         int i;
10072
10073         put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
10074         put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
10075         put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
10076         put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
10077
10078         for (i = 0; i < 8; i++)
10079                 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
10080
10081         kvm_get_dr(vcpu, 6, &val);
10082         put_smstate(u32, buf, 0x7fcc, (u32)val);
10083         kvm_get_dr(vcpu, 7, &val);
10084         put_smstate(u32, buf, 0x7fc8, (u32)val);
10085
10086         kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
10087         put_smstate(u32, buf, 0x7fc4, seg.selector);
10088         put_smstate(u32, buf, 0x7f64, seg.base);
10089         put_smstate(u32, buf, 0x7f60, seg.limit);
10090         put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
10091
10092         kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
10093         put_smstate(u32, buf, 0x7fc0, seg.selector);
10094         put_smstate(u32, buf, 0x7f80, seg.base);
10095         put_smstate(u32, buf, 0x7f7c, seg.limit);
10096         put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
10097
10098         static_call(kvm_x86_get_gdt)(vcpu, &dt);
10099         put_smstate(u32, buf, 0x7f74, dt.address);
10100         put_smstate(u32, buf, 0x7f70, dt.size);
10101
10102         static_call(kvm_x86_get_idt)(vcpu, &dt);
10103         put_smstate(u32, buf, 0x7f58, dt.address);
10104         put_smstate(u32, buf, 0x7f54, dt.size);
10105
10106         for (i = 0; i < 6; i++)
10107                 enter_smm_save_seg_32(vcpu, buf, i);
10108
10109         put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
10110
10111         /* revision id */
10112         put_smstate(u32, buf, 0x7efc, 0x00020000);
10113         put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
10114 }
10115
10116 #ifdef CONFIG_X86_64
10117 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
10118 {
10119         struct desc_ptr dt;
10120         struct kvm_segment seg;
10121         unsigned long val;
10122         int i;
10123
10124         for (i = 0; i < 16; i++)
10125                 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
10126
10127         put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
10128         put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
10129
10130         kvm_get_dr(vcpu, 6, &val);
10131         put_smstate(u64, buf, 0x7f68, val);
10132         kvm_get_dr(vcpu, 7, &val);
10133         put_smstate(u64, buf, 0x7f60, val);
10134
10135         put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
10136         put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
10137         put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
10138
10139         put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
10140
10141         /* revision id */
10142         put_smstate(u32, buf, 0x7efc, 0x00020064);
10143
10144         put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
10145
10146         kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
10147         put_smstate(u16, buf, 0x7e90, seg.selector);
10148         put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
10149         put_smstate(u32, buf, 0x7e94, seg.limit);
10150         put_smstate(u64, buf, 0x7e98, seg.base);
10151
10152         static_call(kvm_x86_get_idt)(vcpu, &dt);
10153         put_smstate(u32, buf, 0x7e84, dt.size);
10154         put_smstate(u64, buf, 0x7e88, dt.address);
10155
10156         kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
10157         put_smstate(u16, buf, 0x7e70, seg.selector);
10158         put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
10159         put_smstate(u32, buf, 0x7e74, seg.limit);
10160         put_smstate(u64, buf, 0x7e78, seg.base);
10161
10162         static_call(kvm_x86_get_gdt)(vcpu, &dt);
10163         put_smstate(u32, buf, 0x7e64, dt.size);
10164         put_smstate(u64, buf, 0x7e68, dt.address);
10165
10166         for (i = 0; i < 6; i++)
10167                 enter_smm_save_seg_64(vcpu, buf, i);
10168 }
10169 #endif
10170
10171 static void enter_smm(struct kvm_vcpu *vcpu)
10172 {
10173         struct kvm_segment cs, ds;
10174         struct desc_ptr dt;
10175         unsigned long cr0;
10176         char buf[512];
10177
10178         memset(buf, 0, 512);
10179 #ifdef CONFIG_X86_64
10180         if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
10181                 enter_smm_save_state_64(vcpu, buf);
10182         else
10183 #endif
10184                 enter_smm_save_state_32(vcpu, buf);
10185
10186         /*
10187          * Give enter_smm() a chance to make ISA-specific changes to the vCPU
10188          * state (e.g. leave guest mode) after we've saved the state into the
10189          * SMM state-save area.
10190          */
10191         static_call(kvm_x86_enter_smm)(vcpu, buf);
10192
10193         kvm_smm_changed(vcpu, true);
10194         kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
10195
10196         if (static_call(kvm_x86_get_nmi_mask)(vcpu))
10197                 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
10198         else
10199                 static_call(kvm_x86_set_nmi_mask)(vcpu, true);
10200
10201         kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
10202         kvm_rip_write(vcpu, 0x8000);
10203
10204         cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
10205         static_call(kvm_x86_set_cr0)(vcpu, cr0);
10206         vcpu->arch.cr0 = cr0;
10207
10208         static_call(kvm_x86_set_cr4)(vcpu, 0);
10209
10210         /* Undocumented: IDT limit is set to zero on entry to SMM.  */
10211         dt.address = dt.size = 0;
10212         static_call(kvm_x86_set_idt)(vcpu, &dt);
10213
10214         kvm_set_dr(vcpu, 7, DR7_FIXED_1);
10215
10216         cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
10217         cs.base = vcpu->arch.smbase;
10218
10219         ds.selector = 0;
10220         ds.base = 0;
10221
10222         cs.limit    = ds.limit = 0xffffffff;
10223         cs.type     = ds.type = 0x3;
10224         cs.dpl      = ds.dpl = 0;
10225         cs.db       = ds.db = 0;
10226         cs.s        = ds.s = 1;
10227         cs.l        = ds.l = 0;
10228         cs.g        = ds.g = 1;
10229         cs.avl      = ds.avl = 0;
10230         cs.present  = ds.present = 1;
10231         cs.unusable = ds.unusable = 0;
10232         cs.padding  = ds.padding = 0;
10233
10234         kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
10235         kvm_set_segment(vcpu, &ds, VCPU_SREG_DS);
10236         kvm_set_segment(vcpu, &ds, VCPU_SREG_ES);
10237         kvm_set_segment(vcpu, &ds, VCPU_SREG_FS);
10238         kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
10239         kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
10240
10241 #ifdef CONFIG_X86_64
10242         if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
10243                 static_call(kvm_x86_set_efer)(vcpu, 0);
10244 #endif
10245
10246         kvm_update_cpuid_runtime(vcpu);
10247         kvm_mmu_reset_context(vcpu);
10248 }
10249
10250 static void process_smi(struct kvm_vcpu *vcpu)
10251 {
10252         vcpu->arch.smi_pending = true;
10253         kvm_make_request(KVM_REQ_EVENT, vcpu);
10254 }
10255
10256 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
10257                                        unsigned long *vcpu_bitmap)
10258 {
10259         kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap);
10260 }
10261
10262 void kvm_make_scan_ioapic_request(struct kvm *kvm)
10263 {
10264         kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
10265 }
10266
10267 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
10268 {
10269         struct kvm_lapic *apic = vcpu->arch.apic;
10270         bool activate;
10271
10272         if (!lapic_in_kernel(vcpu))
10273                 return;
10274
10275         down_read(&vcpu->kvm->arch.apicv_update_lock);
10276         preempt_disable();
10277
10278         /* Do not activate APICV when APIC is disabled */
10279         activate = kvm_vcpu_apicv_activated(vcpu) &&
10280                    (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED);
10281
10282         if (apic->apicv_active == activate)
10283                 goto out;
10284
10285         apic->apicv_active = activate;
10286         kvm_apic_update_apicv(vcpu);
10287         static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
10288
10289         /*
10290          * When APICv gets disabled, we may still have injected interrupts
10291          * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
10292          * still active when the interrupt got accepted. Make sure
10293          * kvm_check_and_inject_events() is called to check for that.
10294          */
10295         if (!apic->apicv_active)
10296                 kvm_make_request(KVM_REQ_EVENT, vcpu);
10297
10298 out:
10299         preempt_enable();
10300         up_read(&vcpu->kvm->arch.apicv_update_lock);
10301 }
10302 EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
10303
10304 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
10305                                       enum kvm_apicv_inhibit reason, bool set)
10306 {
10307         unsigned long old, new;
10308
10309         lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
10310
10311         if (!static_call(kvm_x86_check_apicv_inhibit_reasons)(reason))
10312                 return;
10313
10314         old = new = kvm->arch.apicv_inhibit_reasons;
10315
10316         set_or_clear_apicv_inhibit(&new, reason, set);
10317
10318         if (!!old != !!new) {
10319                 /*
10320                  * Kick all vCPUs before setting apicv_inhibit_reasons to avoid
10321                  * false positives in the sanity check WARN in svm_vcpu_run().
10322                  * This task will wait for all vCPUs to ack the kick IRQ before
10323                  * updating apicv_inhibit_reasons, and all other vCPUs will
10324                  * block on acquiring apicv_update_lock so that vCPUs can't
10325                  * redo svm_vcpu_run() without seeing the new inhibit state.
10326                  *
10327                  * Note, holding apicv_update_lock and taking it in the read
10328                  * side (handling the request) also prevents other vCPUs from
10329                  * servicing the request with a stale apicv_inhibit_reasons.
10330                  */
10331                 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
10332                 kvm->arch.apicv_inhibit_reasons = new;
10333                 if (new) {
10334                         unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
10335                         kvm_zap_gfn_range(kvm, gfn, gfn+1);
10336                 }
10337         } else {
10338                 kvm->arch.apicv_inhibit_reasons = new;
10339         }
10340 }
10341
10342 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
10343                                     enum kvm_apicv_inhibit reason, bool set)
10344 {
10345         if (!enable_apicv)
10346                 return;
10347
10348         down_write(&kvm->arch.apicv_update_lock);
10349         __kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
10350         up_write(&kvm->arch.apicv_update_lock);
10351 }
10352 EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit);
10353
10354 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
10355 {
10356         if (!kvm_apic_present(vcpu))
10357                 return;
10358
10359         bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
10360
10361         if (irqchip_split(vcpu->kvm))
10362                 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
10363         else {
10364                 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
10365                 if (ioapic_in_kernel(vcpu->kvm))
10366                         kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
10367         }
10368
10369         if (is_guest_mode(vcpu))
10370                 vcpu->arch.load_eoi_exitmap_pending = true;
10371         else
10372                 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
10373 }
10374
10375 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
10376 {
10377         u64 eoi_exit_bitmap[4];
10378
10379         if (!kvm_apic_hw_enabled(vcpu->arch.apic))
10380                 return;
10381
10382         if (to_hv_vcpu(vcpu)) {
10383                 bitmap_or((ulong *)eoi_exit_bitmap,
10384                           vcpu->arch.ioapic_handled_vectors,
10385                           to_hv_synic(vcpu)->vec_bitmap, 256);
10386                 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
10387                 return;
10388         }
10389
10390         static_call_cond(kvm_x86_load_eoi_exitmap)(
10391                 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
10392 }
10393
10394 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
10395                                             unsigned long start, unsigned long end)
10396 {
10397         unsigned long apic_address;
10398
10399         /*
10400          * The physical address of apic access page is stored in the VMCS.
10401          * Update it when it becomes invalid.
10402          */
10403         apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
10404         if (start <= apic_address && apic_address < end)
10405                 kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
10406 }
10407
10408 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
10409 {
10410         static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
10411 }
10412
10413 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
10414 {
10415         if (!lapic_in_kernel(vcpu))
10416                 return;
10417
10418         static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu);
10419 }
10420
10421 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
10422 {
10423         smp_send_reschedule(vcpu->cpu);
10424 }
10425 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
10426
10427 /*
10428  * Called within kvm->srcu read side.
10429  * Returns 1 to let vcpu_run() continue the guest execution loop without
10430  * exiting to the userspace.  Otherwise, the value will be returned to the
10431  * userspace.
10432  */
10433 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
10434 {
10435         int r;
10436         bool req_int_win =
10437                 dm_request_for_irq_injection(vcpu) &&
10438                 kvm_cpu_accept_dm_intr(vcpu);
10439         fastpath_t exit_fastpath;
10440
10441         bool req_immediate_exit = false;
10442
10443         /* Forbid vmenter if vcpu dirty ring is soft-full */
10444         if (unlikely(vcpu->kvm->dirty_ring_size &&
10445                      kvm_dirty_ring_soft_full(&vcpu->dirty_ring))) {
10446                 vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
10447                 trace_kvm_dirty_ring_exit(vcpu);
10448                 r = 0;
10449                 goto out;
10450         }
10451
10452         if (kvm_request_pending(vcpu)) {
10453                 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
10454                         r = -EIO;
10455                         goto out;
10456                 }
10457                 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
10458                         if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
10459                                 r = 0;
10460                                 goto out;
10461                         }
10462                 }
10463                 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
10464                         kvm_mmu_free_obsolete_roots(vcpu);
10465                 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
10466                         __kvm_migrate_timers(vcpu);
10467                 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
10468                         kvm_update_masterclock(vcpu->kvm);
10469                 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
10470                         kvm_gen_kvmclock_update(vcpu);
10471                 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
10472                         r = kvm_guest_time_update(vcpu);
10473                         if (unlikely(r))
10474                                 goto out;
10475                 }
10476                 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
10477                         kvm_mmu_sync_roots(vcpu);
10478                 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
10479                         kvm_mmu_load_pgd(vcpu);
10480                 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
10481                         kvm_vcpu_flush_tlb_all(vcpu);
10482
10483                         /* Flushing all ASIDs flushes the current ASID... */
10484                         kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
10485                 }
10486                 kvm_service_local_tlb_flush_requests(vcpu);
10487
10488                 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
10489                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
10490                         r = 0;
10491                         goto out;
10492                 }
10493                 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
10494                         if (is_guest_mode(vcpu)) {
10495                                 kvm_x86_ops.nested_ops->triple_fault(vcpu);
10496                         } else {
10497                                 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
10498                                 vcpu->mmio_needed = 0;
10499                                 r = 0;
10500                                 goto out;
10501                         }
10502                 }
10503                 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
10504                         /* Page is swapped out. Do synthetic halt */
10505                         vcpu->arch.apf.halted = true;
10506                         r = 1;
10507                         goto out;
10508                 }
10509                 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
10510                         record_steal_time(vcpu);
10511                 if (kvm_check_request(KVM_REQ_SMI, vcpu))
10512                         process_smi(vcpu);
10513                 if (kvm_check_request(KVM_REQ_NMI, vcpu))
10514                         process_nmi(vcpu);
10515                 if (kvm_check_request(KVM_REQ_PMU, vcpu))
10516                         kvm_pmu_handle_event(vcpu);
10517                 if (kvm_check_request(KVM_REQ_PMI, vcpu))
10518                         kvm_pmu_deliver_pmi(vcpu);
10519                 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
10520                         BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
10521                         if (test_bit(vcpu->arch.pending_ioapic_eoi,
10522                                      vcpu->arch.ioapic_handled_vectors)) {
10523                                 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
10524                                 vcpu->run->eoi.vector =
10525                                                 vcpu->arch.pending_ioapic_eoi;
10526                                 r = 0;
10527                                 goto out;
10528                         }
10529                 }
10530                 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
10531                         vcpu_scan_ioapic(vcpu);
10532                 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
10533                         vcpu_load_eoi_exitmap(vcpu);
10534                 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
10535                         kvm_vcpu_reload_apic_access_page(vcpu);
10536                 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
10537                         vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
10538                         vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
10539                         vcpu->run->system_event.ndata = 0;
10540                         r = 0;
10541                         goto out;
10542                 }
10543                 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
10544                         vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
10545                         vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
10546                         vcpu->run->system_event.ndata = 0;
10547                         r = 0;
10548                         goto out;
10549                 }
10550                 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
10551                         struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
10552
10553                         vcpu->run->exit_reason = KVM_EXIT_HYPERV;
10554                         vcpu->run->hyperv = hv_vcpu->exit;
10555                         r = 0;
10556                         goto out;
10557                 }
10558
10559                 /*
10560                  * KVM_REQ_HV_STIMER has to be processed after
10561                  * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
10562                  * depend on the guest clock being up-to-date
10563                  */
10564                 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
10565                         kvm_hv_process_stimers(vcpu);
10566                 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
10567                         kvm_vcpu_update_apicv(vcpu);
10568                 if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
10569                         kvm_check_async_pf_completion(vcpu);
10570                 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
10571                         static_call(kvm_x86_msr_filter_changed)(vcpu);
10572
10573                 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
10574                         static_call(kvm_x86_update_cpu_dirty_logging)(vcpu);
10575         }
10576
10577         if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
10578             kvm_xen_has_interrupt(vcpu)) {
10579                 ++vcpu->stat.req_event;
10580                 r = kvm_apic_accept_events(vcpu);
10581                 if (r < 0) {
10582                         r = 0;
10583                         goto out;
10584                 }
10585                 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
10586                         r = 1;
10587                         goto out;
10588                 }
10589
10590                 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit);
10591                 if (r < 0) {
10592                         r = 0;
10593                         goto out;
10594                 }
10595                 if (req_int_win)
10596                         static_call(kvm_x86_enable_irq_window)(vcpu);
10597
10598                 if (kvm_lapic_enabled(vcpu)) {
10599                         update_cr8_intercept(vcpu);
10600                         kvm_lapic_sync_to_vapic(vcpu);
10601                 }
10602         }
10603
10604         r = kvm_mmu_reload(vcpu);
10605         if (unlikely(r)) {
10606                 goto cancel_injection;
10607         }
10608
10609         preempt_disable();
10610
10611         static_call(kvm_x86_prepare_switch_to_guest)(vcpu);
10612
10613         /*
10614          * Disable IRQs before setting IN_GUEST_MODE.  Posted interrupt
10615          * IPI are then delayed after guest entry, which ensures that they
10616          * result in virtual interrupt delivery.
10617          */
10618         local_irq_disable();
10619
10620         /* Store vcpu->apicv_active before vcpu->mode.  */
10621         smp_store_release(&vcpu->mode, IN_GUEST_MODE);
10622
10623         kvm_vcpu_srcu_read_unlock(vcpu);
10624
10625         /*
10626          * 1) We should set ->mode before checking ->requests.  Please see
10627          * the comment in kvm_vcpu_exiting_guest_mode().
10628          *
10629          * 2) For APICv, we should set ->mode before checking PID.ON. This
10630          * pairs with the memory barrier implicit in pi_test_and_set_on
10631          * (see vmx_deliver_posted_interrupt).
10632          *
10633          * 3) This also orders the write to mode from any reads to the page
10634          * tables done while the VCPU is running.  Please see the comment
10635          * in kvm_flush_remote_tlbs.
10636          */
10637         smp_mb__after_srcu_read_unlock();
10638
10639         /*
10640          * Process pending posted interrupts to handle the case where the
10641          * notification IRQ arrived in the host, or was never sent (because the
10642          * target vCPU wasn't running).  Do this regardless of the vCPU's APICv
10643          * status, KVM doesn't update assigned devices when APICv is inhibited,
10644          * i.e. they can post interrupts even if APICv is temporarily disabled.
10645          */
10646         if (kvm_lapic_enabled(vcpu))
10647                 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
10648
10649         if (kvm_vcpu_exit_request(vcpu)) {
10650                 vcpu->mode = OUTSIDE_GUEST_MODE;
10651                 smp_wmb();
10652                 local_irq_enable();
10653                 preempt_enable();
10654                 kvm_vcpu_srcu_read_lock(vcpu);
10655                 r = 1;
10656                 goto cancel_injection;
10657         }
10658
10659         if (req_immediate_exit) {
10660                 kvm_make_request(KVM_REQ_EVENT, vcpu);
10661                 static_call(kvm_x86_request_immediate_exit)(vcpu);
10662         }
10663
10664         fpregs_assert_state_consistent();
10665         if (test_thread_flag(TIF_NEED_FPU_LOAD))
10666                 switch_fpu_return();
10667
10668         if (vcpu->arch.guest_fpu.xfd_err)
10669                 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
10670
10671         if (unlikely(vcpu->arch.switch_db_regs)) {
10672                 set_debugreg(0, 7);
10673                 set_debugreg(vcpu->arch.eff_db[0], 0);
10674                 set_debugreg(vcpu->arch.eff_db[1], 1);
10675                 set_debugreg(vcpu->arch.eff_db[2], 2);
10676                 set_debugreg(vcpu->arch.eff_db[3], 3);
10677         } else if (unlikely(hw_breakpoint_active())) {
10678                 set_debugreg(0, 7);
10679         }
10680
10681         guest_timing_enter_irqoff();
10682
10683         for (;;) {
10684                 /*
10685                  * Assert that vCPU vs. VM APICv state is consistent.  An APICv
10686                  * update must kick and wait for all vCPUs before toggling the
10687                  * per-VM state, and responsing vCPUs must wait for the update
10688                  * to complete before servicing KVM_REQ_APICV_UPDATE.
10689                  */
10690                 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
10691                              (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
10692
10693                 exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu);
10694                 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
10695                         break;
10696
10697                 if (kvm_lapic_enabled(vcpu))
10698                         static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
10699
10700                 if (unlikely(kvm_vcpu_exit_request(vcpu))) {
10701                         exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
10702                         break;
10703                 }
10704         }
10705
10706         /*
10707          * Do this here before restoring debug registers on the host.  And
10708          * since we do this before handling the vmexit, a DR access vmexit
10709          * can (a) read the correct value of the debug registers, (b) set
10710          * KVM_DEBUGREG_WONT_EXIT again.
10711          */
10712         if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
10713                 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
10714                 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu);
10715                 kvm_update_dr0123(vcpu);
10716                 kvm_update_dr7(vcpu);
10717         }
10718
10719         /*
10720          * If the guest has used debug registers, at least dr7
10721          * will be disabled while returning to the host.
10722          * If we don't have active breakpoints in the host, we don't
10723          * care about the messed up debug address registers. But if
10724          * we have some of them active, restore the old state.
10725          */
10726         if (hw_breakpoint_active())
10727                 hw_breakpoint_restore();
10728
10729         vcpu->arch.last_vmentry_cpu = vcpu->cpu;
10730         vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
10731
10732         vcpu->mode = OUTSIDE_GUEST_MODE;
10733         smp_wmb();
10734
10735         /*
10736          * Sync xfd before calling handle_exit_irqoff() which may
10737          * rely on the fact that guest_fpu::xfd is up-to-date (e.g.
10738          * in #NM irqoff handler).
10739          */
10740         if (vcpu->arch.xfd_no_write_intercept)
10741                 fpu_sync_guest_vmexit_xfd_state();
10742
10743         static_call(kvm_x86_handle_exit_irqoff)(vcpu);
10744
10745         if (vcpu->arch.guest_fpu.xfd_err)
10746                 wrmsrl(MSR_IA32_XFD_ERR, 0);
10747
10748         /*
10749          * Consume any pending interrupts, including the possible source of
10750          * VM-Exit on SVM and any ticks that occur between VM-Exit and now.
10751          * An instruction is required after local_irq_enable() to fully unblock
10752          * interrupts on processors that implement an interrupt shadow, the
10753          * stat.exits increment will do nicely.
10754          */
10755         kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
10756         local_irq_enable();
10757         ++vcpu->stat.exits;
10758         local_irq_disable();
10759         kvm_after_interrupt(vcpu);
10760
10761         /*
10762          * Wait until after servicing IRQs to account guest time so that any
10763          * ticks that occurred while running the guest are properly accounted
10764          * to the guest.  Waiting until IRQs are enabled degrades the accuracy
10765          * of accounting via context tracking, but the loss of accuracy is
10766          * acceptable for all known use cases.
10767          */
10768         guest_timing_exit_irqoff();
10769
10770         local_irq_enable();
10771         preempt_enable();
10772
10773         kvm_vcpu_srcu_read_lock(vcpu);
10774
10775         /*
10776          * Profile KVM exit RIPs:
10777          */
10778         if (unlikely(prof_on == KVM_PROFILING)) {
10779                 unsigned long rip = kvm_rip_read(vcpu);
10780                 profile_hit(KVM_PROFILING, (void *)rip);
10781         }
10782
10783         if (unlikely(vcpu->arch.tsc_always_catchup))
10784                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
10785
10786         if (vcpu->arch.apic_attention)
10787                 kvm_lapic_sync_from_vapic(vcpu);
10788
10789         r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath);
10790         return r;
10791
10792 cancel_injection:
10793         if (req_immediate_exit)
10794                 kvm_make_request(KVM_REQ_EVENT, vcpu);
10795         static_call(kvm_x86_cancel_injection)(vcpu);
10796         if (unlikely(vcpu->arch.apic_attention))
10797                 kvm_lapic_sync_from_vapic(vcpu);
10798 out:
10799         return r;
10800 }
10801
10802 /* Called within kvm->srcu read side.  */
10803 static inline int vcpu_block(struct kvm_vcpu *vcpu)
10804 {
10805         bool hv_timer;
10806
10807         if (!kvm_arch_vcpu_runnable(vcpu)) {
10808                 /*
10809                  * Switch to the software timer before halt-polling/blocking as
10810                  * the guest's timer may be a break event for the vCPU, and the
10811                  * hypervisor timer runs only when the CPU is in guest mode.
10812                  * Switch before halt-polling so that KVM recognizes an expired
10813                  * timer before blocking.
10814                  */
10815                 hv_timer = kvm_lapic_hv_timer_in_use(vcpu);
10816                 if (hv_timer)
10817                         kvm_lapic_switch_to_sw_timer(vcpu);
10818
10819                 kvm_vcpu_srcu_read_unlock(vcpu);
10820                 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
10821                         kvm_vcpu_halt(vcpu);
10822                 else
10823                         kvm_vcpu_block(vcpu);
10824                 kvm_vcpu_srcu_read_lock(vcpu);
10825
10826                 if (hv_timer)
10827                         kvm_lapic_switch_to_hv_timer(vcpu);
10828
10829                 /*
10830                  * If the vCPU is not runnable, a signal or another host event
10831                  * of some kind is pending; service it without changing the
10832                  * vCPU's activity state.
10833                  */
10834                 if (!kvm_arch_vcpu_runnable(vcpu))
10835                         return 1;
10836         }
10837
10838         /*
10839          * Evaluate nested events before exiting the halted state.  This allows
10840          * the halt state to be recorded properly in the VMCS12's activity
10841          * state field (AMD does not have a similar field and a VM-Exit always
10842          * causes a spurious wakeup from HLT).
10843          */
10844         if (is_guest_mode(vcpu)) {
10845                 if (kvm_check_nested_events(vcpu) < 0)
10846                         return 0;
10847         }
10848
10849         if (kvm_apic_accept_events(vcpu) < 0)
10850                 return 0;
10851         switch(vcpu->arch.mp_state) {
10852         case KVM_MP_STATE_HALTED:
10853         case KVM_MP_STATE_AP_RESET_HOLD:
10854                 vcpu->arch.pv.pv_unhalted = false;
10855                 vcpu->arch.mp_state =
10856                         KVM_MP_STATE_RUNNABLE;
10857                 fallthrough;
10858         case KVM_MP_STATE_RUNNABLE:
10859                 vcpu->arch.apf.halted = false;
10860                 break;
10861         case KVM_MP_STATE_INIT_RECEIVED:
10862                 break;
10863         default:
10864                 WARN_ON_ONCE(1);
10865                 break;
10866         }
10867         return 1;
10868 }
10869
10870 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
10871 {
10872         return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
10873                 !vcpu->arch.apf.halted);
10874 }
10875
10876 /* Called within kvm->srcu read side.  */
10877 static int vcpu_run(struct kvm_vcpu *vcpu)
10878 {
10879         int r;
10880
10881         vcpu->arch.l1tf_flush_l1d = true;
10882
10883         for (;;) {
10884                 /*
10885                  * If another guest vCPU requests a PV TLB flush in the middle
10886                  * of instruction emulation, the rest of the emulation could
10887                  * use a stale page translation. Assume that any code after
10888                  * this point can start executing an instruction.
10889                  */
10890                 vcpu->arch.at_instruction_boundary = false;
10891                 if (kvm_vcpu_running(vcpu)) {
10892                         r = vcpu_enter_guest(vcpu);
10893                 } else {
10894                         r = vcpu_block(vcpu);
10895                 }
10896
10897                 if (r <= 0)
10898                         break;
10899
10900                 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
10901                 if (kvm_xen_has_pending_events(vcpu))
10902                         kvm_xen_inject_pending_events(vcpu);
10903
10904                 if (kvm_cpu_has_pending_timer(vcpu))
10905                         kvm_inject_pending_timer_irqs(vcpu);
10906
10907                 if (dm_request_for_irq_injection(vcpu) &&
10908                         kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
10909                         r = 0;
10910                         vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
10911                         ++vcpu->stat.request_irq_exits;
10912                         break;
10913                 }
10914
10915                 if (__xfer_to_guest_mode_work_pending()) {
10916                         kvm_vcpu_srcu_read_unlock(vcpu);
10917                         r = xfer_to_guest_mode_handle_work(vcpu);
10918                         kvm_vcpu_srcu_read_lock(vcpu);
10919                         if (r)
10920                                 return r;
10921                 }
10922         }
10923
10924         return r;
10925 }
10926
10927 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
10928 {
10929         return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
10930 }
10931
10932 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
10933 {
10934         BUG_ON(!vcpu->arch.pio.count);
10935
10936         return complete_emulated_io(vcpu);
10937 }
10938
10939 /*
10940  * Implements the following, as a state machine:
10941  *
10942  * read:
10943  *   for each fragment
10944  *     for each mmio piece in the fragment
10945  *       write gpa, len
10946  *       exit
10947  *       copy data
10948  *   execute insn
10949  *
10950  * write:
10951  *   for each fragment
10952  *     for each mmio piece in the fragment
10953  *       write gpa, len
10954  *       copy data
10955  *       exit
10956  */
10957 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
10958 {
10959         struct kvm_run *run = vcpu->run;
10960         struct kvm_mmio_fragment *frag;
10961         unsigned len;
10962
10963         BUG_ON(!vcpu->mmio_needed);
10964
10965         /* Complete previous fragment */
10966         frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
10967         len = min(8u, frag->len);
10968         if (!vcpu->mmio_is_write)
10969                 memcpy(frag->data, run->mmio.data, len);
10970
10971         if (frag->len <= 8) {
10972                 /* Switch to the next fragment. */
10973                 frag++;
10974                 vcpu->mmio_cur_fragment++;
10975         } else {
10976                 /* Go forward to the next mmio piece. */
10977                 frag->data += len;
10978                 frag->gpa += len;
10979                 frag->len -= len;
10980         }
10981
10982         if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
10983                 vcpu->mmio_needed = 0;
10984
10985                 /* FIXME: return into emulator if single-stepping.  */
10986                 if (vcpu->mmio_is_write)
10987                         return 1;
10988                 vcpu->mmio_read_completed = 1;
10989                 return complete_emulated_io(vcpu);
10990         }
10991
10992         run->exit_reason = KVM_EXIT_MMIO;
10993         run->mmio.phys_addr = frag->gpa;
10994         if (vcpu->mmio_is_write)
10995                 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
10996         run->mmio.len = min(8u, frag->len);
10997         run->mmio.is_write = vcpu->mmio_is_write;
10998         vcpu->arch.complete_userspace_io = complete_emulated_mmio;
10999         return 0;
11000 }
11001
11002 /* Swap (qemu) user FPU context for the guest FPU context. */
11003 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
11004 {
11005         /* Exclude PKRU, it's restored separately immediately after VM-Exit. */
11006         fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true);
11007         trace_kvm_fpu(1);
11008 }
11009
11010 /* When vcpu_run ends, restore user space FPU context. */
11011 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
11012 {
11013         fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false);
11014         ++vcpu->stat.fpu_reload;
11015         trace_kvm_fpu(0);
11016 }
11017
11018 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11019 {
11020         struct kvm_queued_exception *ex = &vcpu->arch.exception;
11021         struct kvm_run *kvm_run = vcpu->run;
11022         int r;
11023
11024         vcpu_load(vcpu);
11025         kvm_sigset_activate(vcpu);
11026         kvm_run->flags = 0;
11027         kvm_load_guest_fpu(vcpu);
11028
11029         kvm_vcpu_srcu_read_lock(vcpu);
11030         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
11031                 if (kvm_run->immediate_exit) {
11032                         r = -EINTR;
11033                         goto out;
11034                 }
11035                 /*
11036                  * It should be impossible for the hypervisor timer to be in
11037                  * use before KVM has ever run the vCPU.
11038                  */
11039                 WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
11040
11041                 kvm_vcpu_srcu_read_unlock(vcpu);
11042                 kvm_vcpu_block(vcpu);
11043                 kvm_vcpu_srcu_read_lock(vcpu);
11044
11045                 if (kvm_apic_accept_events(vcpu) < 0) {
11046                         r = 0;
11047                         goto out;
11048                 }
11049                 r = -EAGAIN;
11050                 if (signal_pending(current)) {
11051                         r = -EINTR;
11052                         kvm_run->exit_reason = KVM_EXIT_INTR;
11053                         ++vcpu->stat.signal_exits;
11054                 }
11055                 goto out;
11056         }
11057
11058         if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) ||
11059             (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) {
11060                 r = -EINVAL;
11061                 goto out;
11062         }
11063
11064         if (kvm_run->kvm_dirty_regs) {
11065                 r = sync_regs(vcpu);
11066                 if (r != 0)
11067                         goto out;
11068         }
11069
11070         /* re-sync apic's tpr */
11071         if (!lapic_in_kernel(vcpu)) {
11072                 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
11073                         r = -EINVAL;
11074                         goto out;
11075                 }
11076         }
11077
11078         /*
11079          * If userspace set a pending exception and L2 is active, convert it to
11080          * a pending VM-Exit if L1 wants to intercept the exception.
11081          */
11082         if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) &&
11083             kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector,
11084                                                         ex->error_code)) {
11085                 kvm_queue_exception_vmexit(vcpu, ex->vector,
11086                                            ex->has_error_code, ex->error_code,
11087                                            ex->has_payload, ex->payload);
11088                 ex->injected = false;
11089                 ex->pending = false;
11090         }
11091         vcpu->arch.exception_from_userspace = false;
11092
11093         if (unlikely(vcpu->arch.complete_userspace_io)) {
11094                 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
11095                 vcpu->arch.complete_userspace_io = NULL;
11096                 r = cui(vcpu);
11097                 if (r <= 0)
11098                         goto out;
11099         } else {
11100                 WARN_ON_ONCE(vcpu->arch.pio.count);
11101                 WARN_ON_ONCE(vcpu->mmio_needed);
11102         }
11103
11104         if (kvm_run->immediate_exit) {
11105                 r = -EINTR;
11106                 goto out;
11107         }
11108
11109         r = static_call(kvm_x86_vcpu_pre_run)(vcpu);
11110         if (r <= 0)
11111                 goto out;
11112
11113         r = vcpu_run(vcpu);
11114
11115 out:
11116         kvm_put_guest_fpu(vcpu);
11117         if (kvm_run->kvm_valid_regs)
11118                 store_regs(vcpu);
11119         post_kvm_run_save(vcpu);
11120         kvm_vcpu_srcu_read_unlock(vcpu);
11121
11122         kvm_sigset_deactivate(vcpu);
11123         vcpu_put(vcpu);
11124         return r;
11125 }
11126
11127 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11128 {
11129         if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
11130                 /*
11131                  * We are here if userspace calls get_regs() in the middle of
11132                  * instruction emulation. Registers state needs to be copied
11133                  * back from emulation context to vcpu. Userspace shouldn't do
11134                  * that usually, but some bad designed PV devices (vmware
11135                  * backdoor interface) need this to work
11136                  */
11137                 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt);
11138                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
11139         }
11140         regs->rax = kvm_rax_read(vcpu);
11141         regs->rbx = kvm_rbx_read(vcpu);
11142         regs->rcx = kvm_rcx_read(vcpu);
11143         regs->rdx = kvm_rdx_read(vcpu);
11144         regs->rsi = kvm_rsi_read(vcpu);
11145         regs->rdi = kvm_rdi_read(vcpu);
11146         regs->rsp = kvm_rsp_read(vcpu);
11147         regs->rbp = kvm_rbp_read(vcpu);
11148 #ifdef CONFIG_X86_64
11149         regs->r8 = kvm_r8_read(vcpu);
11150         regs->r9 = kvm_r9_read(vcpu);
11151         regs->r10 = kvm_r10_read(vcpu);
11152         regs->r11 = kvm_r11_read(vcpu);
11153         regs->r12 = kvm_r12_read(vcpu);
11154         regs->r13 = kvm_r13_read(vcpu);
11155         regs->r14 = kvm_r14_read(vcpu);
11156         regs->r15 = kvm_r15_read(vcpu);
11157 #endif
11158
11159         regs->rip = kvm_rip_read(vcpu);
11160         regs->rflags = kvm_get_rflags(vcpu);
11161 }
11162
11163 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11164 {
11165         vcpu_load(vcpu);
11166         __get_regs(vcpu, regs);
11167         vcpu_put(vcpu);
11168         return 0;
11169 }
11170
11171 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11172 {
11173         vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
11174         vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
11175
11176         kvm_rax_write(vcpu, regs->rax);
11177         kvm_rbx_write(vcpu, regs->rbx);
11178         kvm_rcx_write(vcpu, regs->rcx);
11179         kvm_rdx_write(vcpu, regs->rdx);
11180         kvm_rsi_write(vcpu, regs->rsi);
11181         kvm_rdi_write(vcpu, regs->rdi);
11182         kvm_rsp_write(vcpu, regs->rsp);
11183         kvm_rbp_write(vcpu, regs->rbp);
11184 #ifdef CONFIG_X86_64
11185         kvm_r8_write(vcpu, regs->r8);
11186         kvm_r9_write(vcpu, regs->r9);
11187         kvm_r10_write(vcpu, regs->r10);
11188         kvm_r11_write(vcpu, regs->r11);
11189         kvm_r12_write(vcpu, regs->r12);
11190         kvm_r13_write(vcpu, regs->r13);
11191         kvm_r14_write(vcpu, regs->r14);
11192         kvm_r15_write(vcpu, regs->r15);
11193 #endif
11194
11195         kvm_rip_write(vcpu, regs->rip);
11196         kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
11197
11198         vcpu->arch.exception.pending = false;
11199         vcpu->arch.exception_vmexit.pending = false;
11200
11201         kvm_make_request(KVM_REQ_EVENT, vcpu);
11202 }
11203
11204 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11205 {
11206         vcpu_load(vcpu);
11207         __set_regs(vcpu, regs);
11208         vcpu_put(vcpu);
11209         return 0;
11210 }
11211
11212 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11213 {
11214         struct desc_ptr dt;
11215
11216         if (vcpu->arch.guest_state_protected)
11217                 goto skip_protected_regs;
11218
11219         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
11220         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
11221         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
11222         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
11223         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
11224         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
11225
11226         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
11227         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
11228
11229         static_call(kvm_x86_get_idt)(vcpu, &dt);
11230         sregs->idt.limit = dt.size;
11231         sregs->idt.base = dt.address;
11232         static_call(kvm_x86_get_gdt)(vcpu, &dt);
11233         sregs->gdt.limit = dt.size;
11234         sregs->gdt.base = dt.address;
11235
11236         sregs->cr2 = vcpu->arch.cr2;
11237         sregs->cr3 = kvm_read_cr3(vcpu);
11238
11239 skip_protected_regs:
11240         sregs->cr0 = kvm_read_cr0(vcpu);
11241         sregs->cr4 = kvm_read_cr4(vcpu);
11242         sregs->cr8 = kvm_get_cr8(vcpu);
11243         sregs->efer = vcpu->arch.efer;
11244         sregs->apic_base = kvm_get_apic_base(vcpu);
11245 }
11246
11247 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11248 {
11249         __get_sregs_common(vcpu, sregs);
11250
11251         if (vcpu->arch.guest_state_protected)
11252                 return;
11253
11254         if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
11255                 set_bit(vcpu->arch.interrupt.nr,
11256                         (unsigned long *)sregs->interrupt_bitmap);
11257 }
11258
11259 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
11260 {
11261         int i;
11262
11263         __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2);
11264
11265         if (vcpu->arch.guest_state_protected)
11266                 return;
11267
11268         if (is_pae_paging(vcpu)) {
11269                 for (i = 0 ; i < 4 ; i++)
11270                         sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i);
11271                 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
11272         }
11273 }
11274
11275 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
11276                                   struct kvm_sregs *sregs)
11277 {
11278         vcpu_load(vcpu);
11279         __get_sregs(vcpu, sregs);
11280         vcpu_put(vcpu);
11281         return 0;
11282 }
11283
11284 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
11285                                     struct kvm_mp_state *mp_state)
11286 {
11287         int r;
11288
11289         vcpu_load(vcpu);
11290         if (kvm_mpx_supported())
11291                 kvm_load_guest_fpu(vcpu);
11292
11293         r = kvm_apic_accept_events(vcpu);
11294         if (r < 0)
11295                 goto out;
11296         r = 0;
11297
11298         if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
11299              vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
11300             vcpu->arch.pv.pv_unhalted)
11301                 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
11302         else
11303                 mp_state->mp_state = vcpu->arch.mp_state;
11304
11305 out:
11306         if (kvm_mpx_supported())
11307                 kvm_put_guest_fpu(vcpu);
11308         vcpu_put(vcpu);
11309         return r;
11310 }
11311
11312 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
11313                                     struct kvm_mp_state *mp_state)
11314 {
11315         int ret = -EINVAL;
11316
11317         vcpu_load(vcpu);
11318
11319         switch (mp_state->mp_state) {
11320         case KVM_MP_STATE_UNINITIALIZED:
11321         case KVM_MP_STATE_HALTED:
11322         case KVM_MP_STATE_AP_RESET_HOLD:
11323         case KVM_MP_STATE_INIT_RECEIVED:
11324         case KVM_MP_STATE_SIPI_RECEIVED:
11325                 if (!lapic_in_kernel(vcpu))
11326                         goto out;
11327                 break;
11328
11329         case KVM_MP_STATE_RUNNABLE:
11330                 break;
11331
11332         default:
11333                 goto out;
11334         }
11335
11336         /*
11337          * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow
11338          * forcing the guest into INIT/SIPI if those events are supposed to be
11339          * blocked.  KVM prioritizes SMI over INIT, so reject INIT/SIPI state
11340          * if an SMI is pending as well.
11341          */
11342         if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) &&
11343             (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
11344              mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
11345                 goto out;
11346
11347         if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
11348                 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
11349                 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
11350         } else
11351                 vcpu->arch.mp_state = mp_state->mp_state;
11352         kvm_make_request(KVM_REQ_EVENT, vcpu);
11353
11354         ret = 0;
11355 out:
11356         vcpu_put(vcpu);
11357         return ret;
11358 }
11359
11360 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
11361                     int reason, bool has_error_code, u32 error_code)
11362 {
11363         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
11364         int ret;
11365
11366         init_emulate_ctxt(vcpu);
11367
11368         ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
11369                                    has_error_code, error_code);
11370         if (ret) {
11371                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
11372                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
11373                 vcpu->run->internal.ndata = 0;
11374                 return 0;
11375         }
11376
11377         kvm_rip_write(vcpu, ctxt->eip);
11378         kvm_set_rflags(vcpu, ctxt->eflags);
11379         return 1;
11380 }
11381 EXPORT_SYMBOL_GPL(kvm_task_switch);
11382
11383 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11384 {
11385         if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
11386                 /*
11387                  * When EFER.LME and CR0.PG are set, the processor is in
11388                  * 64-bit mode (though maybe in a 32-bit code segment).
11389                  * CR4.PAE and EFER.LMA must be set.
11390                  */
11391                 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
11392                         return false;
11393                 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3))
11394                         return false;
11395         } else {
11396                 /*
11397                  * Not in 64-bit mode: EFER.LMA is clear and the code
11398                  * segment cannot be 64-bit.
11399                  */
11400                 if (sregs->efer & EFER_LMA || sregs->cs.l)
11401                         return false;
11402         }
11403
11404         return kvm_is_valid_cr4(vcpu, sregs->cr4);
11405 }
11406
11407 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
11408                 int *mmu_reset_needed, bool update_pdptrs)
11409 {
11410         struct msr_data apic_base_msr;
11411         int idx;
11412         struct desc_ptr dt;
11413
11414         if (!kvm_is_valid_sregs(vcpu, sregs))
11415                 return -EINVAL;
11416
11417         apic_base_msr.data = sregs->apic_base;
11418         apic_base_msr.host_initiated = true;
11419         if (kvm_set_apic_base(vcpu, &apic_base_msr))
11420                 return -EINVAL;
11421
11422         if (vcpu->arch.guest_state_protected)
11423                 return 0;
11424
11425         dt.size = sregs->idt.limit;
11426         dt.address = sregs->idt.base;
11427         static_call(kvm_x86_set_idt)(vcpu, &dt);
11428         dt.size = sregs->gdt.limit;
11429         dt.address = sregs->gdt.base;
11430         static_call(kvm_x86_set_gdt)(vcpu, &dt);
11431
11432         vcpu->arch.cr2 = sregs->cr2;
11433         *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
11434         vcpu->arch.cr3 = sregs->cr3;
11435         kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
11436         static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3);
11437
11438         kvm_set_cr8(vcpu, sregs->cr8);
11439
11440         *mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
11441         static_call(kvm_x86_set_efer)(vcpu, sregs->efer);
11442
11443         *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
11444         static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0);
11445         vcpu->arch.cr0 = sregs->cr0;
11446
11447         *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
11448         static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4);
11449
11450         if (update_pdptrs) {
11451                 idx = srcu_read_lock(&vcpu->kvm->srcu);
11452                 if (is_pae_paging(vcpu)) {
11453                         load_pdptrs(vcpu, kvm_read_cr3(vcpu));
11454                         *mmu_reset_needed = 1;
11455                 }
11456                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
11457         }
11458
11459         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
11460         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
11461         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
11462         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
11463         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
11464         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
11465
11466         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
11467         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
11468
11469         update_cr8_intercept(vcpu);
11470
11471         /* Older userspace won't unhalt the vcpu on reset. */
11472         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
11473             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
11474             !is_protmode(vcpu))
11475                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
11476
11477         return 0;
11478 }
11479
11480 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11481 {
11482         int pending_vec, max_bits;
11483         int mmu_reset_needed = 0;
11484         int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true);
11485
11486         if (ret)
11487                 return ret;
11488
11489         if (mmu_reset_needed)
11490                 kvm_mmu_reset_context(vcpu);
11491
11492         max_bits = KVM_NR_INTERRUPTS;
11493         pending_vec = find_first_bit(
11494                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
11495
11496         if (pending_vec < max_bits) {
11497                 kvm_queue_interrupt(vcpu, pending_vec, false);
11498                 pr_debug("Set back pending irq %d\n", pending_vec);
11499                 kvm_make_request(KVM_REQ_EVENT, vcpu);
11500         }
11501         return 0;
11502 }
11503
11504 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
11505 {
11506         int mmu_reset_needed = 0;
11507         bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
11508         bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) &&
11509                 !(sregs2->efer & EFER_LMA);
11510         int i, ret;
11511
11512         if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID)
11513                 return -EINVAL;
11514
11515         if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
11516                 return -EINVAL;
11517
11518         ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2,
11519                                  &mmu_reset_needed, !valid_pdptrs);
11520         if (ret)
11521                 return ret;
11522
11523         if (valid_pdptrs) {
11524                 for (i = 0; i < 4 ; i++)
11525                         kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]);
11526
11527                 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
11528                 mmu_reset_needed = 1;
11529                 vcpu->arch.pdptrs_from_userspace = true;
11530         }
11531         if (mmu_reset_needed)
11532                 kvm_mmu_reset_context(vcpu);
11533         return 0;
11534 }
11535
11536 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
11537                                   struct kvm_sregs *sregs)
11538 {
11539         int ret;
11540
11541         vcpu_load(vcpu);
11542         ret = __set_sregs(vcpu, sregs);
11543         vcpu_put(vcpu);
11544         return ret;
11545 }
11546
11547 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
11548 {
11549         bool set = false;
11550         struct kvm_vcpu *vcpu;
11551         unsigned long i;
11552
11553         if (!enable_apicv)
11554                 return;
11555
11556         down_write(&kvm->arch.apicv_update_lock);
11557
11558         kvm_for_each_vcpu(i, vcpu, kvm) {
11559                 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
11560                         set = true;
11561                         break;
11562                 }
11563         }
11564         __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
11565         up_write(&kvm->arch.apicv_update_lock);
11566 }
11567
11568 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
11569                                         struct kvm_guest_debug *dbg)
11570 {
11571         unsigned long rflags;
11572         int i, r;
11573
11574         if (vcpu->arch.guest_state_protected)
11575                 return -EINVAL;
11576
11577         vcpu_load(vcpu);
11578
11579         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
11580                 r = -EBUSY;
11581                 if (kvm_is_exception_pending(vcpu))
11582                         goto out;
11583                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
11584                         kvm_queue_exception(vcpu, DB_VECTOR);
11585                 else
11586                         kvm_queue_exception(vcpu, BP_VECTOR);
11587         }
11588
11589         /*
11590          * Read rflags as long as potentially injected trace flags are still
11591          * filtered out.
11592          */
11593         rflags = kvm_get_rflags(vcpu);
11594
11595         vcpu->guest_debug = dbg->control;
11596         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
11597                 vcpu->guest_debug = 0;
11598
11599         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
11600                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
11601                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
11602                 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
11603         } else {
11604                 for (i = 0; i < KVM_NR_DB_REGS; i++)
11605                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
11606         }
11607         kvm_update_dr7(vcpu);
11608
11609         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
11610                 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
11611
11612         /*
11613          * Trigger an rflags update that will inject or remove the trace
11614          * flags.
11615          */
11616         kvm_set_rflags(vcpu, rflags);
11617
11618         static_call(kvm_x86_update_exception_bitmap)(vcpu);
11619
11620         kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm);
11621
11622         r = 0;
11623
11624 out:
11625         vcpu_put(vcpu);
11626         return r;
11627 }
11628
11629 /*
11630  * Translate a guest virtual address to a guest physical address.
11631  */
11632 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
11633                                     struct kvm_translation *tr)
11634 {
11635         unsigned long vaddr = tr->linear_address;
11636         gpa_t gpa;
11637         int idx;
11638
11639         vcpu_load(vcpu);
11640
11641         idx = srcu_read_lock(&vcpu->kvm->srcu);
11642         gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
11643         srcu_read_unlock(&vcpu->kvm->srcu, idx);
11644         tr->physical_address = gpa;
11645         tr->valid = gpa != INVALID_GPA;
11646         tr->writeable = 1;
11647         tr->usermode = 0;
11648
11649         vcpu_put(vcpu);
11650         return 0;
11651 }
11652
11653 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
11654 {
11655         struct fxregs_state *fxsave;
11656
11657         if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
11658                 return 0;
11659
11660         vcpu_load(vcpu);
11661
11662         fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
11663         memcpy(fpu->fpr, fxsave->st_space, 128);
11664         fpu->fcw = fxsave->cwd;
11665         fpu->fsw = fxsave->swd;
11666         fpu->ftwx = fxsave->twd;
11667         fpu->last_opcode = fxsave->fop;
11668         fpu->last_ip = fxsave->rip;
11669         fpu->last_dp = fxsave->rdp;
11670         memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
11671
11672         vcpu_put(vcpu);
11673         return 0;
11674 }
11675
11676 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
11677 {
11678         struct fxregs_state *fxsave;
11679
11680         if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
11681                 return 0;
11682
11683         vcpu_load(vcpu);
11684
11685         fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
11686
11687         memcpy(fxsave->st_space, fpu->fpr, 128);
11688         fxsave->cwd = fpu->fcw;
11689         fxsave->swd = fpu->fsw;
11690         fxsave->twd = fpu->ftwx;
11691         fxsave->fop = fpu->last_opcode;
11692         fxsave->rip = fpu->last_ip;
11693         fxsave->rdp = fpu->last_dp;
11694         memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
11695
11696         vcpu_put(vcpu);
11697         return 0;
11698 }
11699
11700 static void store_regs(struct kvm_vcpu *vcpu)
11701 {
11702         BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);
11703
11704         if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
11705                 __get_regs(vcpu, &vcpu->run->s.regs.regs);
11706
11707         if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
11708                 __get_sregs(vcpu, &vcpu->run->s.regs.sregs);
11709
11710         if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
11711                 kvm_vcpu_ioctl_x86_get_vcpu_events(
11712                                 vcpu, &vcpu->run->s.regs.events);
11713 }
11714
11715 static int sync_regs(struct kvm_vcpu *vcpu)
11716 {
11717         if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
11718                 __set_regs(vcpu, &vcpu->run->s.regs.regs);
11719                 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
11720         }
11721         if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
11722                 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs))
11723                         return -EINVAL;
11724                 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
11725         }
11726         if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
11727                 if (kvm_vcpu_ioctl_x86_set_vcpu_events(
11728                                 vcpu, &vcpu->run->s.regs.events))
11729                         return -EINVAL;
11730                 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
11731         }
11732
11733         return 0;
11734 }
11735
11736 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
11737 {
11738         if (kvm_check_tsc_unstable() && kvm->created_vcpus)
11739                 pr_warn_once("kvm: SMP vm created on host with unstable TSC; "
11740                              "guest TSC will not be reliable\n");
11741
11742         if (!kvm->arch.max_vcpu_ids)
11743                 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS;
11744
11745         if (id >= kvm->arch.max_vcpu_ids)
11746                 return -EINVAL;
11747
11748         return static_call(kvm_x86_vcpu_precreate)(kvm);
11749 }
11750
11751 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
11752 {
11753         struct page *page;
11754         int r;
11755
11756         vcpu->arch.last_vmentry_cpu = -1;
11757         vcpu->arch.regs_avail = ~0;
11758         vcpu->arch.regs_dirty = ~0;
11759
11760         if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
11761                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
11762         else
11763                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
11764
11765         r = kvm_mmu_create(vcpu);
11766         if (r < 0)
11767                 return r;
11768
11769         if (irqchip_in_kernel(vcpu->kvm)) {
11770                 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
11771                 if (r < 0)
11772                         goto fail_mmu_destroy;
11773
11774                 /*
11775                  * Defer evaluating inhibits until the vCPU is first run, as
11776                  * this vCPU will not get notified of any changes until this
11777                  * vCPU is visible to other vCPUs (marked online and added to
11778                  * the set of vCPUs).  Opportunistically mark APICv active as
11779                  * VMX in particularly is highly unlikely to have inhibits.
11780                  * Ignore the current per-VM APICv state so that vCPU creation
11781                  * is guaranteed to run with a deterministic value, the request
11782                  * will ensure the vCPU gets the correct state before VM-Entry.
11783                  */
11784                 if (enable_apicv) {
11785                         vcpu->arch.apic->apicv_active = true;
11786                         kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
11787                 }
11788         } else
11789                 static_branch_inc(&kvm_has_noapic_vcpu);
11790
11791         r = -ENOMEM;
11792
11793         page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
11794         if (!page)
11795                 goto fail_free_lapic;
11796         vcpu->arch.pio_data = page_address(page);
11797
11798         vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64),
11799                                        GFP_KERNEL_ACCOUNT);
11800         vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64),
11801                                             GFP_KERNEL_ACCOUNT);
11802         if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks)
11803                 goto fail_free_mce_banks;
11804         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
11805
11806         if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
11807                                 GFP_KERNEL_ACCOUNT))
11808                 goto fail_free_mce_banks;
11809
11810         if (!alloc_emulate_ctxt(vcpu))
11811                 goto free_wbinvd_dirty_mask;
11812
11813         if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) {
11814                 pr_err("kvm: failed to allocate vcpu's fpu\n");
11815                 goto free_emulate_ctxt;
11816         }
11817
11818         vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
11819         vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
11820
11821         vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
11822
11823         kvm_async_pf_hash_reset(vcpu);
11824         kvm_pmu_init(vcpu);
11825
11826         vcpu->arch.pending_external_vector = -1;
11827         vcpu->arch.preempted_in_kernel = false;
11828
11829 #if IS_ENABLED(CONFIG_HYPERV)
11830         vcpu->arch.hv_root_tdp = INVALID_PAGE;
11831 #endif
11832
11833         r = static_call(kvm_x86_vcpu_create)(vcpu);
11834         if (r)
11835                 goto free_guest_fpu;
11836
11837         vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
11838         vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
11839         kvm_xen_init_vcpu(vcpu);
11840         kvm_vcpu_mtrr_init(vcpu);
11841         vcpu_load(vcpu);
11842         kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
11843         kvm_vcpu_reset(vcpu, false);
11844         kvm_init_mmu(vcpu);
11845         vcpu_put(vcpu);
11846         return 0;
11847
11848 free_guest_fpu:
11849         fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
11850 free_emulate_ctxt:
11851         kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
11852 free_wbinvd_dirty_mask:
11853         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
11854 fail_free_mce_banks:
11855         kfree(vcpu->arch.mce_banks);
11856         kfree(vcpu->arch.mci_ctl2_banks);
11857         free_page((unsigned long)vcpu->arch.pio_data);
11858 fail_free_lapic:
11859         kvm_free_lapic(vcpu);
11860 fail_mmu_destroy:
11861         kvm_mmu_destroy(vcpu);
11862         return r;
11863 }
11864
11865 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
11866 {
11867         struct kvm *kvm = vcpu->kvm;
11868
11869         if (mutex_lock_killable(&vcpu->mutex))
11870                 return;
11871         vcpu_load(vcpu);
11872         kvm_synchronize_tsc(vcpu, 0);
11873         vcpu_put(vcpu);
11874
11875         /* poll control enabled by default */
11876         vcpu->arch.msr_kvm_poll_control = 1;
11877
11878         mutex_unlock(&vcpu->mutex);
11879
11880         if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0)
11881                 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
11882                                                 KVMCLOCK_SYNC_PERIOD);
11883 }
11884
11885 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
11886 {
11887         int idx;
11888
11889         kvmclock_reset(vcpu);
11890
11891         static_call(kvm_x86_vcpu_free)(vcpu);
11892
11893         kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
11894         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
11895         fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
11896
11897         kvm_xen_destroy_vcpu(vcpu);
11898         kvm_hv_vcpu_uninit(vcpu);
11899         kvm_pmu_destroy(vcpu);
11900         kfree(vcpu->arch.mce_banks);
11901         kfree(vcpu->arch.mci_ctl2_banks);
11902         kvm_free_lapic(vcpu);
11903         idx = srcu_read_lock(&vcpu->kvm->srcu);
11904         kvm_mmu_destroy(vcpu);
11905         srcu_read_unlock(&vcpu->kvm->srcu, idx);
11906         free_page((unsigned long)vcpu->arch.pio_data);
11907         kvfree(vcpu->arch.cpuid_entries);
11908         if (!lapic_in_kernel(vcpu))
11909                 static_branch_dec(&kvm_has_noapic_vcpu);
11910 }
11911
11912 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
11913 {
11914         struct kvm_cpuid_entry2 *cpuid_0x1;
11915         unsigned long old_cr0 = kvm_read_cr0(vcpu);
11916         unsigned long new_cr0;
11917
11918         /*
11919          * Several of the "set" flows, e.g. ->set_cr0(), read other registers
11920          * to handle side effects.  RESET emulation hits those flows and relies
11921          * on emulated/virtualized registers, including those that are loaded
11922          * into hardware, to be zeroed at vCPU creation.  Use CRs as a sentinel
11923          * to detect improper or missing initialization.
11924          */
11925         WARN_ON_ONCE(!init_event &&
11926                      (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu)));
11927
11928         kvm_lapic_reset(vcpu, init_event);
11929
11930         vcpu->arch.hflags = 0;
11931
11932         vcpu->arch.smi_pending = 0;
11933         vcpu->arch.smi_count = 0;
11934         atomic_set(&vcpu->arch.nmi_queued, 0);
11935         vcpu->arch.nmi_pending = 0;
11936         vcpu->arch.nmi_injected = false;
11937         kvm_clear_interrupt_queue(vcpu);
11938         kvm_clear_exception_queue(vcpu);
11939
11940         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
11941         kvm_update_dr0123(vcpu);
11942         vcpu->arch.dr6 = DR6_ACTIVE_LOW;
11943         vcpu->arch.dr7 = DR7_FIXED_1;
11944         kvm_update_dr7(vcpu);
11945
11946         vcpu->arch.cr2 = 0;
11947
11948         kvm_make_request(KVM_REQ_EVENT, vcpu);
11949         vcpu->arch.apf.msr_en_val = 0;
11950         vcpu->arch.apf.msr_int_val = 0;
11951         vcpu->arch.st.msr_val = 0;
11952
11953         kvmclock_reset(vcpu);
11954
11955         kvm_clear_async_pf_completion_queue(vcpu);
11956         kvm_async_pf_hash_reset(vcpu);
11957         vcpu->arch.apf.halted = false;
11958
11959         if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) {
11960                 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
11961
11962                 /*
11963                  * All paths that lead to INIT are required to load the guest's
11964                  * FPU state (because most paths are buried in KVM_RUN).
11965                  */
11966                 if (init_event)
11967                         kvm_put_guest_fpu(vcpu);
11968
11969                 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS);
11970                 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR);
11971
11972                 if (init_event)
11973                         kvm_load_guest_fpu(vcpu);
11974         }
11975
11976         if (!init_event) {
11977                 kvm_pmu_reset(vcpu);
11978                 vcpu->arch.smbase = 0x30000;
11979
11980                 vcpu->arch.msr_misc_features_enables = 0;
11981                 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |
11982                                                   MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
11983
11984                 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP);
11985                 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true);
11986         }
11987
11988         /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
11989         memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
11990         kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP);
11991
11992         /*
11993          * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon)
11994          * if no CPUID match is found.  Note, it's impossible to get a match at
11995          * RESET since KVM emulates RESET before exposing the vCPU to userspace,
11996          * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry
11997          * on RESET.  But, go through the motions in case that's ever remedied.
11998          */
11999         cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1);
12000         kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600);
12001
12002         static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
12003
12004         kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
12005         kvm_rip_write(vcpu, 0xfff0);
12006
12007         vcpu->arch.cr3 = 0;
12008         kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
12009
12010         /*
12011          * CR0.CD/NW are set on RESET, preserved on INIT.  Note, some versions
12012          * of Intel's SDM list CD/NW as being set on INIT, but they contradict
12013          * (or qualify) that with a footnote stating that CD/NW are preserved.
12014          */
12015         new_cr0 = X86_CR0_ET;
12016         if (init_event)
12017                 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD));
12018         else
12019                 new_cr0 |= X86_CR0_NW | X86_CR0_CD;
12020
12021         static_call(kvm_x86_set_cr0)(vcpu, new_cr0);
12022         static_call(kvm_x86_set_cr4)(vcpu, 0);
12023         static_call(kvm_x86_set_efer)(vcpu, 0);
12024         static_call(kvm_x86_update_exception_bitmap)(vcpu);
12025
12026         /*
12027          * On the standard CR0/CR4/EFER modification paths, there are several
12028          * complex conditions determining whether the MMU has to be reset and/or
12029          * which PCIDs have to be flushed.  However, CR0.WP and the paging-related
12030          * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush
12031          * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as
12032          * CR0 will be '0' prior to RESET).  So we only need to check CR0.PG here.
12033          */
12034         if (old_cr0 & X86_CR0_PG) {
12035                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12036                 kvm_mmu_reset_context(vcpu);
12037         }
12038
12039         /*
12040          * Intel's SDM states that all TLB entries are flushed on INIT.  AMD's
12041          * APM states the TLBs are untouched by INIT, but it also states that
12042          * the TLBs are flushed on "External initialization of the processor."
12043          * Flush the guest TLB regardless of vendor, there is no meaningful
12044          * benefit in relying on the guest to flush the TLB immediately after
12045          * INIT.  A spurious TLB flush is benign and likely negligible from a
12046          * performance perspective.
12047          */
12048         if (init_event)
12049                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12050 }
12051 EXPORT_SYMBOL_GPL(kvm_vcpu_reset);
12052
12053 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
12054 {
12055         struct kvm_segment cs;
12056
12057         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
12058         cs.selector = vector << 8;
12059         cs.base = vector << 12;
12060         kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
12061         kvm_rip_write(vcpu, 0);
12062 }
12063 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
12064
12065 int kvm_arch_hardware_enable(void)
12066 {
12067         struct kvm *kvm;
12068         struct kvm_vcpu *vcpu;
12069         unsigned long i;
12070         int ret;
12071         u64 local_tsc;
12072         u64 max_tsc = 0;
12073         bool stable, backwards_tsc = false;
12074
12075         kvm_user_return_msr_cpu_online();
12076         ret = static_call(kvm_x86_hardware_enable)();
12077         if (ret != 0)
12078                 return ret;
12079
12080         local_tsc = rdtsc();
12081         stable = !kvm_check_tsc_unstable();
12082         list_for_each_entry(kvm, &vm_list, vm_list) {
12083                 kvm_for_each_vcpu(i, vcpu, kvm) {
12084                         if (!stable && vcpu->cpu == smp_processor_id())
12085                                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
12086                         if (stable && vcpu->arch.last_host_tsc > local_tsc) {
12087                                 backwards_tsc = true;
12088                                 if (vcpu->arch.last_host_tsc > max_tsc)
12089                                         max_tsc = vcpu->arch.last_host_tsc;
12090                         }
12091                 }
12092         }
12093
12094         /*
12095          * Sometimes, even reliable TSCs go backwards.  This happens on
12096          * platforms that reset TSC during suspend or hibernate actions, but
12097          * maintain synchronization.  We must compensate.  Fortunately, we can
12098          * detect that condition here, which happens early in CPU bringup,
12099          * before any KVM threads can be running.  Unfortunately, we can't
12100          * bring the TSCs fully up to date with real time, as we aren't yet far
12101          * enough into CPU bringup that we know how much real time has actually
12102          * elapsed; our helper function, ktime_get_boottime_ns() will be using boot
12103          * variables that haven't been updated yet.
12104          *
12105          * So we simply find the maximum observed TSC above, then record the
12106          * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
12107          * the adjustment will be applied.  Note that we accumulate
12108          * adjustments, in case multiple suspend cycles happen before some VCPU
12109          * gets a chance to run again.  In the event that no KVM threads get a
12110          * chance to run, we will miss the entire elapsed period, as we'll have
12111          * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
12112          * loose cycle time.  This isn't too big a deal, since the loss will be
12113          * uniform across all VCPUs (not to mention the scenario is extremely
12114          * unlikely). It is possible that a second hibernate recovery happens
12115          * much faster than a first, causing the observed TSC here to be
12116          * smaller; this would require additional padding adjustment, which is
12117          * why we set last_host_tsc to the local tsc observed here.
12118          *
12119          * N.B. - this code below runs only on platforms with reliable TSC,
12120          * as that is the only way backwards_tsc is set above.  Also note
12121          * that this runs for ALL vcpus, which is not a bug; all VCPUs should
12122          * have the same delta_cyc adjustment applied if backwards_tsc
12123          * is detected.  Note further, this adjustment is only done once,
12124          * as we reset last_host_tsc on all VCPUs to stop this from being
12125          * called multiple times (one for each physical CPU bringup).
12126          *
12127          * Platforms with unreliable TSCs don't have to deal with this, they
12128          * will be compensated by the logic in vcpu_load, which sets the TSC to
12129          * catchup mode.  This will catchup all VCPUs to real time, but cannot
12130          * guarantee that they stay in perfect synchronization.
12131          */
12132         if (backwards_tsc) {
12133                 u64 delta_cyc = max_tsc - local_tsc;
12134                 list_for_each_entry(kvm, &vm_list, vm_list) {
12135                         kvm->arch.backwards_tsc_observed = true;
12136                         kvm_for_each_vcpu(i, vcpu, kvm) {
12137                                 vcpu->arch.tsc_offset_adjustment += delta_cyc;
12138                                 vcpu->arch.last_host_tsc = local_tsc;
12139                                 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
12140                         }
12141
12142                         /*
12143                          * We have to disable TSC offset matching.. if you were
12144                          * booting a VM while issuing an S4 host suspend....
12145                          * you may have some problem.  Solving this issue is
12146                          * left as an exercise to the reader.
12147                          */
12148                         kvm->arch.last_tsc_nsec = 0;
12149                         kvm->arch.last_tsc_write = 0;
12150                 }
12151
12152         }
12153         return 0;
12154 }
12155
12156 void kvm_arch_hardware_disable(void)
12157 {
12158         static_call(kvm_x86_hardware_disable)();
12159         drop_user_return_notifiers();
12160 }
12161
12162 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
12163 {
12164         memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
12165
12166 #define __KVM_X86_OP(func) \
12167         static_call_update(kvm_x86_##func, kvm_x86_ops.func);
12168 #define KVM_X86_OP(func) \
12169         WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func)
12170 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP
12171 #define KVM_X86_OP_OPTIONAL_RET0(func) \
12172         static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \
12173                                            (void *)__static_call_return0);
12174 #include <asm/kvm-x86-ops.h>
12175 #undef __KVM_X86_OP
12176
12177         kvm_pmu_ops_update(ops->pmu_ops);
12178 }
12179
12180 int kvm_arch_hardware_setup(void *opaque)
12181 {
12182         struct kvm_x86_init_ops *ops = opaque;
12183         int r;
12184
12185         rdmsrl_safe(MSR_EFER, &host_efer);
12186
12187         if (boot_cpu_has(X86_FEATURE_XSAVES))
12188                 rdmsrl(MSR_IA32_XSS, host_xss);
12189
12190         kvm_init_pmu_capability();
12191
12192         r = ops->hardware_setup();
12193         if (r != 0)
12194                 return r;
12195
12196         kvm_ops_update(ops);
12197
12198         kvm_register_perf_callbacks(ops->handle_intel_pt_intr);
12199
12200         if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
12201                 kvm_caps.supported_xss = 0;
12202
12203 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
12204         cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
12205 #undef __kvm_cpu_cap_has
12206
12207         if (kvm_caps.has_tsc_control) {
12208                 /*
12209                  * Make sure the user can only configure tsc_khz values that
12210                  * fit into a signed integer.
12211                  * A min value is not calculated because it will always
12212                  * be 1 on all machines.
12213                  */
12214                 u64 max = min(0x7fffffffULL,
12215                               __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz));
12216                 kvm_caps.max_guest_tsc_khz = max;
12217         }
12218         kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits;
12219         kvm_init_msr_list();
12220         return 0;
12221 }
12222
12223 void kvm_arch_hardware_unsetup(void)
12224 {
12225         kvm_unregister_perf_callbacks();
12226
12227         static_call(kvm_x86_hardware_unsetup)();
12228 }
12229
12230 int kvm_arch_check_processor_compat(void *opaque)
12231 {
12232         struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
12233         struct kvm_x86_init_ops *ops = opaque;
12234
12235         WARN_ON(!irqs_disabled());
12236
12237         if (__cr4_reserved_bits(cpu_has, c) !=
12238             __cr4_reserved_bits(cpu_has, &boot_cpu_data))
12239                 return -EIO;
12240
12241         return ops->check_processor_compatibility();
12242 }
12243
12244 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
12245 {
12246         return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
12247 }
12248 EXPORT_SYMBOL_GPL(kvm_vcpu_is_reset_bsp);
12249
12250 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
12251 {
12252         return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
12253 }
12254
12255 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
12256 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
12257
12258 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
12259 {
12260         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
12261
12262         vcpu->arch.l1tf_flush_l1d = true;
12263         if (pmu->version && unlikely(pmu->event_count)) {
12264                 pmu->need_cleanup = true;
12265                 kvm_make_request(KVM_REQ_PMU, vcpu);
12266         }
12267         static_call(kvm_x86_sched_in)(vcpu, cpu);
12268 }
12269
12270 void kvm_arch_free_vm(struct kvm *kvm)
12271 {
12272         kfree(to_kvm_hv(kvm)->hv_pa_pg);
12273         __kvm_arch_free_vm(kvm);
12274 }
12275
12276
12277 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
12278 {
12279         int ret;
12280         unsigned long flags;
12281
12282         if (type)
12283                 return -EINVAL;
12284
12285         ret = kvm_page_track_init(kvm);
12286         if (ret)
12287                 goto out;
12288
12289         ret = kvm_mmu_init_vm(kvm);
12290         if (ret)
12291                 goto out_page_track;
12292
12293         ret = static_call(kvm_x86_vm_init)(kvm);
12294         if (ret)
12295                 goto out_uninit_mmu;
12296
12297         INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
12298         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
12299         atomic_set(&kvm->arch.noncoherent_dma_count, 0);
12300
12301         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
12302         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
12303         /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
12304         set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
12305                 &kvm->arch.irq_sources_bitmap);
12306
12307         raw_spin_lock_init(&kvm->arch.tsc_write_lock);
12308         mutex_init(&kvm->arch.apic_map_lock);
12309         seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock);
12310         kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
12311
12312         raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
12313         pvclock_update_vm_gtod_copy(kvm);
12314         raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
12315
12316         kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz;
12317         kvm->arch.guest_can_read_msr_platform_info = true;
12318         kvm->arch.enable_pmu = enable_pmu;
12319
12320 #if IS_ENABLED(CONFIG_HYPERV)
12321         spin_lock_init(&kvm->arch.hv_root_tdp_lock);
12322         kvm->arch.hv_root_tdp = INVALID_PAGE;
12323 #endif
12324
12325         INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
12326         INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
12327
12328         kvm_apicv_init(kvm);
12329         kvm_hv_init_vm(kvm);
12330         kvm_xen_init_vm(kvm);
12331
12332         return 0;
12333
12334 out_uninit_mmu:
12335         kvm_mmu_uninit_vm(kvm);
12336 out_page_track:
12337         kvm_page_track_cleanup(kvm);
12338 out:
12339         return ret;
12340 }
12341
12342 int kvm_arch_post_init_vm(struct kvm *kvm)
12343 {
12344         return kvm_mmu_post_init_vm(kvm);
12345 }
12346
12347 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
12348 {
12349         vcpu_load(vcpu);
12350         kvm_mmu_unload(vcpu);
12351         vcpu_put(vcpu);
12352 }
12353
12354 static void kvm_unload_vcpu_mmus(struct kvm *kvm)
12355 {
12356         unsigned long i;
12357         struct kvm_vcpu *vcpu;
12358
12359         kvm_for_each_vcpu(i, vcpu, kvm) {
12360                 kvm_clear_async_pf_completion_queue(vcpu);
12361                 kvm_unload_vcpu_mmu(vcpu);
12362         }
12363 }
12364
12365 void kvm_arch_sync_events(struct kvm *kvm)
12366 {
12367         cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
12368         cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
12369         kvm_free_pit(kvm);
12370 }
12371
12372 /**
12373  * __x86_set_memory_region: Setup KVM internal memory slot
12374  *
12375  * @kvm: the kvm pointer to the VM.
12376  * @id: the slot ID to setup.
12377  * @gpa: the GPA to install the slot (unused when @size == 0).
12378  * @size: the size of the slot. Set to zero to uninstall a slot.
12379  *
12380  * This function helps to setup a KVM internal memory slot.  Specify
12381  * @size > 0 to install a new slot, while @size == 0 to uninstall a
12382  * slot.  The return code can be one of the following:
12383  *
12384  *   HVA:           on success (uninstall will return a bogus HVA)
12385  *   -errno:        on error
12386  *
12387  * The caller should always use IS_ERR() to check the return value
12388  * before use.  Note, the KVM internal memory slots are guaranteed to
12389  * remain valid and unchanged until the VM is destroyed, i.e., the
12390  * GPA->HVA translation will not change.  However, the HVA is a user
12391  * address, i.e. its accessibility is not guaranteed, and must be
12392  * accessed via __copy_{to,from}_user().
12393  */
12394 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
12395                                       u32 size)
12396 {
12397         int i, r;
12398         unsigned long hva, old_npages;
12399         struct kvm_memslots *slots = kvm_memslots(kvm);
12400         struct kvm_memory_slot *slot;
12401
12402         /* Called with kvm->slots_lock held.  */
12403         if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
12404                 return ERR_PTR_USR(-EINVAL);
12405
12406         slot = id_to_memslot(slots, id);
12407         if (size) {
12408                 if (slot && slot->npages)
12409                         return ERR_PTR_USR(-EEXIST);
12410
12411                 /*
12412                  * MAP_SHARED to prevent internal slot pages from being moved
12413                  * by fork()/COW.
12414                  */
12415                 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
12416                               MAP_SHARED | MAP_ANONYMOUS, 0);
12417                 if (IS_ERR((void *)hva))
12418                         return (void __user *)hva;
12419         } else {
12420                 if (!slot || !slot->npages)
12421                         return NULL;
12422
12423                 old_npages = slot->npages;
12424                 hva = slot->userspace_addr;
12425         }
12426
12427         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
12428                 struct kvm_userspace_memory_region m;
12429
12430                 m.slot = id | (i << 16);
12431                 m.flags = 0;
12432                 m.guest_phys_addr = gpa;
12433                 m.userspace_addr = hva;
12434                 m.memory_size = size;
12435                 r = __kvm_set_memory_region(kvm, &m);
12436                 if (r < 0)
12437                         return ERR_PTR_USR(r);
12438         }
12439
12440         if (!size)
12441                 vm_munmap(hva, old_npages * PAGE_SIZE);
12442
12443         return (void __user *)hva;
12444 }
12445 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
12446
12447 void kvm_arch_pre_destroy_vm(struct kvm *kvm)
12448 {
12449         kvm_mmu_pre_destroy_vm(kvm);
12450 }
12451
12452 void kvm_arch_destroy_vm(struct kvm *kvm)
12453 {
12454         if (current->mm == kvm->mm) {
12455                 /*
12456                  * Free memory regions allocated on behalf of userspace,
12457                  * unless the memory map has changed due to process exit
12458                  * or fd copying.
12459                  */
12460                 mutex_lock(&kvm->slots_lock);
12461                 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
12462                                         0, 0);
12463                 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
12464                                         0, 0);
12465                 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
12466                 mutex_unlock(&kvm->slots_lock);
12467         }
12468         kvm_unload_vcpu_mmus(kvm);
12469         static_call_cond(kvm_x86_vm_destroy)(kvm);
12470         kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
12471         kvm_pic_destroy(kvm);
12472         kvm_ioapic_destroy(kvm);
12473         kvm_destroy_vcpus(kvm);
12474         kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
12475         kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
12476         kvm_mmu_uninit_vm(kvm);
12477         kvm_page_track_cleanup(kvm);
12478         kvm_xen_destroy_vm(kvm);
12479         kvm_hv_destroy_vm(kvm);
12480 }
12481
12482 static void memslot_rmap_free(struct kvm_memory_slot *slot)
12483 {
12484         int i;
12485
12486         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
12487                 kvfree(slot->arch.rmap[i]);
12488                 slot->arch.rmap[i] = NULL;
12489         }
12490 }
12491
12492 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
12493 {
12494         int i;
12495
12496         memslot_rmap_free(slot);
12497
12498         for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
12499                 kvfree(slot->arch.lpage_info[i - 1]);
12500                 slot->arch.lpage_info[i - 1] = NULL;
12501         }
12502
12503         kvm_page_track_free_memslot(slot);
12504 }
12505
12506 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
12507 {
12508         const int sz = sizeof(*slot->arch.rmap[0]);
12509         int i;
12510
12511         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
12512                 int level = i + 1;
12513                 int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
12514
12515                 if (slot->arch.rmap[i])
12516                         continue;
12517
12518                 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
12519                 if (!slot->arch.rmap[i]) {
12520                         memslot_rmap_free(slot);
12521                         return -ENOMEM;
12522                 }
12523         }
12524
12525         return 0;
12526 }
12527
12528 static int kvm_alloc_memslot_metadata(struct kvm *kvm,
12529                                       struct kvm_memory_slot *slot)
12530 {
12531         unsigned long npages = slot->npages;
12532         int i, r;
12533
12534         /*
12535          * Clear out the previous array pointers for the KVM_MR_MOVE case.  The
12536          * old arrays will be freed by __kvm_set_memory_region() if installing
12537          * the new memslot is successful.
12538          */
12539         memset(&slot->arch, 0, sizeof(slot->arch));
12540
12541         if (kvm_memslots_have_rmaps(kvm)) {
12542                 r = memslot_rmap_alloc(slot, npages);
12543                 if (r)
12544                         return r;
12545         }
12546
12547         for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
12548                 struct kvm_lpage_info *linfo;
12549                 unsigned long ugfn;
12550                 int lpages;
12551                 int level = i + 1;
12552
12553                 lpages = __kvm_mmu_slot_lpages(slot, npages, level);
12554
12555                 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
12556                 if (!linfo)
12557                         goto out_free;
12558
12559                 slot->arch.lpage_info[i - 1] = linfo;
12560
12561                 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
12562                         linfo[0].disallow_lpage = 1;
12563                 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
12564                         linfo[lpages - 1].disallow_lpage = 1;
12565                 ugfn = slot->userspace_addr >> PAGE_SHIFT;
12566                 /*
12567                  * If the gfn and userspace address are not aligned wrt each
12568                  * other, disable large page support for this slot.
12569                  */
12570                 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
12571                         unsigned long j;
12572
12573                         for (j = 0; j < lpages; ++j)
12574                                 linfo[j].disallow_lpage = 1;
12575                 }
12576         }
12577
12578         if (kvm_page_track_create_memslot(kvm, slot, npages))
12579                 goto out_free;
12580
12581         return 0;
12582
12583 out_free:
12584         memslot_rmap_free(slot);
12585
12586         for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
12587                 kvfree(slot->arch.lpage_info[i - 1]);
12588                 slot->arch.lpage_info[i - 1] = NULL;
12589         }
12590         return -ENOMEM;
12591 }
12592
12593 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
12594 {
12595         struct kvm_vcpu *vcpu;
12596         unsigned long i;
12597
12598         /*
12599          * memslots->generation has been incremented.
12600          * mmio generation may have reached its maximum value.
12601          */
12602         kvm_mmu_invalidate_mmio_sptes(kvm, gen);
12603
12604         /* Force re-initialization of steal_time cache */
12605         kvm_for_each_vcpu(i, vcpu, kvm)
12606                 kvm_vcpu_kick(vcpu);
12607 }
12608
12609 int kvm_arch_prepare_memory_region(struct kvm *kvm,
12610                                    const struct kvm_memory_slot *old,
12611                                    struct kvm_memory_slot *new,
12612                                    enum kvm_mr_change change)
12613 {
12614         if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) {
12615                 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn())
12616                         return -EINVAL;
12617
12618                 return kvm_alloc_memslot_metadata(kvm, new);
12619         }
12620
12621         if (change == KVM_MR_FLAGS_ONLY)
12622                 memcpy(&new->arch, &old->arch, sizeof(old->arch));
12623         else if (WARN_ON_ONCE(change != KVM_MR_DELETE))
12624                 return -EIO;
12625
12626         return 0;
12627 }
12628
12629
12630 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
12631 {
12632         struct kvm_arch *ka = &kvm->arch;
12633
12634         if (!kvm_x86_ops.cpu_dirty_log_size)
12635                 return;
12636
12637         if ((enable && ++ka->cpu_dirty_logging_count == 1) ||
12638             (!enable && --ka->cpu_dirty_logging_count == 0))
12639                 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
12640
12641         WARN_ON_ONCE(ka->cpu_dirty_logging_count < 0);
12642 }
12643
12644 static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
12645                                      struct kvm_memory_slot *old,
12646                                      const struct kvm_memory_slot *new,
12647                                      enum kvm_mr_change change)
12648 {
12649         u32 old_flags = old ? old->flags : 0;
12650         u32 new_flags = new ? new->flags : 0;
12651         bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
12652
12653         /*
12654          * Update CPU dirty logging if dirty logging is being toggled.  This
12655          * applies to all operations.
12656          */
12657         if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)
12658                 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages);
12659
12660         /*
12661          * Nothing more to do for RO slots (which can't be dirtied and can't be
12662          * made writable) or CREATE/MOVE/DELETE of a slot.
12663          *
12664          * For a memslot with dirty logging disabled:
12665          * CREATE:      No dirty mappings will already exist.
12666          * MOVE/DELETE: The old mappings will already have been cleaned up by
12667          *              kvm_arch_flush_shadow_memslot()
12668          *
12669          * For a memslot with dirty logging enabled:
12670          * CREATE:      No shadow pages exist, thus nothing to write-protect
12671          *              and no dirty bits to clear.
12672          * MOVE/DELETE: The old mappings will already have been cleaned up by
12673          *              kvm_arch_flush_shadow_memslot().
12674          */
12675         if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY))
12676                 return;
12677
12678         /*
12679          * READONLY and non-flags changes were filtered out above, and the only
12680          * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty
12681          * logging isn't being toggled on or off.
12682          */
12683         if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)))
12684                 return;
12685
12686         if (!log_dirty_pages) {
12687                 /*
12688                  * Dirty logging tracks sptes in 4k granularity, meaning that
12689                  * large sptes have to be split.  If live migration succeeds,
12690                  * the guest in the source machine will be destroyed and large
12691                  * sptes will be created in the destination.  However, if the
12692                  * guest continues to run in the source machine (for example if
12693                  * live migration fails), small sptes will remain around and
12694                  * cause bad performance.
12695                  *
12696                  * Scan sptes if dirty logging has been stopped, dropping those
12697                  * which can be collapsed into a single large-page spte.  Later
12698                  * page faults will create the large-page sptes.
12699                  */
12700                 kvm_mmu_zap_collapsible_sptes(kvm, new);
12701         } else {
12702                 /*
12703                  * Initially-all-set does not require write protecting any page,
12704                  * because they're all assumed to be dirty.
12705                  */
12706                 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
12707                         return;
12708
12709                 if (READ_ONCE(eager_page_split))
12710                         kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K);
12711
12712                 if (kvm_x86_ops.cpu_dirty_log_size) {
12713                         kvm_mmu_slot_leaf_clear_dirty(kvm, new);
12714                         kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
12715                 } else {
12716                         kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
12717                 }
12718
12719                 /*
12720                  * Unconditionally flush the TLBs after enabling dirty logging.
12721                  * A flush is almost always going to be necessary (see below),
12722                  * and unconditionally flushing allows the helpers to omit
12723                  * the subtly complex checks when removing write access.
12724                  *
12725                  * Do the flush outside of mmu_lock to reduce the amount of
12726                  * time mmu_lock is held.  Flushing after dropping mmu_lock is
12727                  * safe as KVM only needs to guarantee the slot is fully
12728                  * write-protected before returning to userspace, i.e. before
12729                  * userspace can consume the dirty status.
12730                  *
12731                  * Flushing outside of mmu_lock requires KVM to be careful when
12732                  * making decisions based on writable status of an SPTE, e.g. a
12733                  * !writable SPTE doesn't guarantee a CPU can't perform writes.
12734                  *
12735                  * Specifically, KVM also write-protects guest page tables to
12736                  * monitor changes when using shadow paging, and must guarantee
12737                  * no CPUs can write to those page before mmu_lock is dropped.
12738                  * Because CPUs may have stale TLB entries at this point, a
12739                  * !writable SPTE doesn't guarantee CPUs can't perform writes.
12740                  *
12741                  * KVM also allows making SPTES writable outside of mmu_lock,
12742                  * e.g. to allow dirty logging without taking mmu_lock.
12743                  *
12744                  * To handle these scenarios, KVM uses a separate software-only
12745                  * bit (MMU-writable) to track if a SPTE is !writable due to
12746                  * a guest page table being write-protected (KVM clears the
12747                  * MMU-writable flag when write-protecting for shadow paging).
12748                  *
12749                  * The use of MMU-writable is also the primary motivation for
12750                  * the unconditional flush.  Because KVM must guarantee that a
12751                  * CPU doesn't contain stale, writable TLB entries for a
12752                  * !MMU-writable SPTE, KVM must flush if it encounters any
12753                  * MMU-writable SPTE regardless of whether the actual hardware
12754                  * writable bit was set.  I.e. KVM is almost guaranteed to need
12755                  * to flush, while unconditionally flushing allows the "remove
12756                  * write access" helpers to ignore MMU-writable entirely.
12757                  *
12758                  * See is_writable_pte() for more details (the case involving
12759                  * access-tracked SPTEs is particularly relevant).
12760                  */
12761                 kvm_arch_flush_remote_tlbs_memslot(kvm, new);
12762         }
12763 }
12764
12765 void kvm_arch_commit_memory_region(struct kvm *kvm,
12766                                 struct kvm_memory_slot *old,
12767                                 const struct kvm_memory_slot *new,
12768                                 enum kvm_mr_change change)
12769 {
12770         if (!kvm->arch.n_requested_mmu_pages &&
12771             (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) {
12772                 unsigned long nr_mmu_pages;
12773
12774                 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO;
12775                 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
12776                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
12777         }
12778
12779         kvm_mmu_slot_apply_flags(kvm, old, new, change);
12780
12781         /* Free the arrays associated with the old memslot. */
12782         if (change == KVM_MR_MOVE)
12783                 kvm_arch_free_memslot(kvm, old);
12784 }
12785
12786 void kvm_arch_flush_shadow_all(struct kvm *kvm)
12787 {
12788         kvm_mmu_zap_all(kvm);
12789 }
12790
12791 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
12792                                    struct kvm_memory_slot *slot)
12793 {
12794         kvm_page_track_flush_slot(kvm, slot);
12795 }
12796
12797 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
12798 {
12799         return (is_guest_mode(vcpu) &&
12800                 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu));
12801 }
12802
12803 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
12804 {
12805         if (!list_empty_careful(&vcpu->async_pf.done))
12806                 return true;
12807
12808         if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
12809             kvm_apic_init_sipi_allowed(vcpu))
12810                 return true;
12811
12812         if (vcpu->arch.pv.pv_unhalted)
12813                 return true;
12814
12815         if (kvm_is_exception_pending(vcpu))
12816                 return true;
12817
12818         if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
12819             (vcpu->arch.nmi_pending &&
12820              static_call(kvm_x86_nmi_allowed)(vcpu, false)))
12821                 return true;
12822
12823         if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
12824             (vcpu->arch.smi_pending &&
12825              static_call(kvm_x86_smi_allowed)(vcpu, false)))
12826                 return true;
12827
12828         if (kvm_arch_interrupt_allowed(vcpu) &&
12829             (kvm_cpu_has_interrupt(vcpu) ||
12830             kvm_guest_apic_has_interrupt(vcpu)))
12831                 return true;
12832
12833         if (kvm_hv_has_stimer_pending(vcpu))
12834                 return true;
12835
12836         if (is_guest_mode(vcpu) &&
12837             kvm_x86_ops.nested_ops->has_events &&
12838             kvm_x86_ops.nested_ops->has_events(vcpu))
12839                 return true;
12840
12841         if (kvm_xen_has_pending_events(vcpu))
12842                 return true;
12843
12844         return false;
12845 }
12846
12847 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
12848 {
12849         return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
12850 }
12851
12852 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
12853 {
12854         if (kvm_vcpu_apicv_active(vcpu) &&
12855             static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu))
12856                 return true;
12857
12858         return false;
12859 }
12860
12861 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
12862 {
12863         if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
12864                 return true;
12865
12866         if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
12867                 kvm_test_request(KVM_REQ_SMI, vcpu) ||
12868                  kvm_test_request(KVM_REQ_EVENT, vcpu))
12869                 return true;
12870
12871         return kvm_arch_dy_has_pending_interrupt(vcpu);
12872 }
12873
12874 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
12875 {
12876         if (vcpu->arch.guest_state_protected)
12877                 return true;
12878
12879         return vcpu->arch.preempted_in_kernel;
12880 }
12881
12882 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
12883 {
12884         return kvm_rip_read(vcpu);
12885 }
12886
12887 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
12888 {
12889         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
12890 }
12891
12892 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
12893 {
12894         return static_call(kvm_x86_interrupt_allowed)(vcpu, false);
12895 }
12896
12897 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
12898 {
12899         /* Can't read the RIP when guest state is protected, just return 0 */
12900         if (vcpu->arch.guest_state_protected)
12901                 return 0;
12902
12903         if (is_64_bit_mode(vcpu))
12904                 return kvm_rip_read(vcpu);
12905         return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
12906                      kvm_rip_read(vcpu));
12907 }
12908 EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
12909
12910 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
12911 {
12912         return kvm_get_linear_rip(vcpu) == linear_rip;
12913 }
12914 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
12915
12916 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
12917 {
12918         unsigned long rflags;
12919
12920         rflags = static_call(kvm_x86_get_rflags)(vcpu);
12921         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
12922                 rflags &= ~X86_EFLAGS_TF;
12923         return rflags;
12924 }
12925 EXPORT_SYMBOL_GPL(kvm_get_rflags);
12926
12927 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
12928 {
12929         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
12930             kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
12931                 rflags |= X86_EFLAGS_TF;
12932         static_call(kvm_x86_set_rflags)(vcpu, rflags);
12933 }
12934
12935 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
12936 {
12937         __kvm_set_rflags(vcpu, rflags);
12938         kvm_make_request(KVM_REQ_EVENT, vcpu);
12939 }
12940 EXPORT_SYMBOL_GPL(kvm_set_rflags);
12941
12942 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
12943 {
12944         BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
12945
12946         return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
12947 }
12948
12949 static inline u32 kvm_async_pf_next_probe(u32 key)
12950 {
12951         return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
12952 }
12953
12954 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
12955 {
12956         u32 key = kvm_async_pf_hash_fn(gfn);
12957
12958         while (vcpu->arch.apf.gfns[key] != ~0)
12959                 key = kvm_async_pf_next_probe(key);
12960
12961         vcpu->arch.apf.gfns[key] = gfn;
12962 }
12963
12964 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
12965 {
12966         int i;
12967         u32 key = kvm_async_pf_hash_fn(gfn);
12968
12969         for (i = 0; i < ASYNC_PF_PER_VCPU &&
12970                      (vcpu->arch.apf.gfns[key] != gfn &&
12971                       vcpu->arch.apf.gfns[key] != ~0); i++)
12972                 key = kvm_async_pf_next_probe(key);
12973
12974         return key;
12975 }
12976
12977 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
12978 {
12979         return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
12980 }
12981
12982 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
12983 {
12984         u32 i, j, k;
12985
12986         i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
12987
12988         if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
12989                 return;
12990
12991         while (true) {
12992                 vcpu->arch.apf.gfns[i] = ~0;
12993                 do {
12994                         j = kvm_async_pf_next_probe(j);
12995                         if (vcpu->arch.apf.gfns[j] == ~0)
12996                                 return;
12997                         k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
12998                         /*
12999                          * k lies cyclically in ]i,j]
13000                          * |    i.k.j |
13001                          * |....j i.k.| or  |.k..j i...|
13002                          */
13003                 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
13004                 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
13005                 i = j;
13006         }
13007 }
13008
13009 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
13010 {
13011         u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
13012
13013         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
13014                                       sizeof(reason));
13015 }
13016
13017 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
13018 {
13019         unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
13020
13021         return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13022                                              &token, offset, sizeof(token));
13023 }
13024
13025 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu)
13026 {
13027         unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
13028         u32 val;
13029
13030         if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13031                                          &val, offset, sizeof(val)))
13032                 return false;
13033
13034         return !val;
13035 }
13036
13037 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
13038 {
13039
13040         if (!kvm_pv_async_pf_enabled(vcpu))
13041                 return false;
13042
13043         if (vcpu->arch.apf.send_user_only &&
13044             static_call(kvm_x86_get_cpl)(vcpu) == 0)
13045                 return false;
13046
13047         if (is_guest_mode(vcpu)) {
13048                 /*
13049                  * L1 needs to opt into the special #PF vmexits that are
13050                  * used to deliver async page faults.
13051                  */
13052                 return vcpu->arch.apf.delivery_as_pf_vmexit;
13053         } else {
13054                 /*
13055                  * Play it safe in case the guest temporarily disables paging.
13056                  * The real mode IDT in particular is unlikely to have a #PF
13057                  * exception setup.
13058                  */
13059                 return is_paging(vcpu);
13060         }
13061 }
13062
13063 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
13064 {
13065         if (unlikely(!lapic_in_kernel(vcpu) ||
13066                      kvm_event_needs_reinjection(vcpu) ||
13067                      kvm_is_exception_pending(vcpu)))
13068                 return false;
13069
13070         if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
13071                 return false;
13072
13073         /*
13074          * If interrupts are off we cannot even use an artificial
13075          * halt state.
13076          */
13077         return kvm_arch_interrupt_allowed(vcpu);
13078 }
13079
13080 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
13081                                      struct kvm_async_pf *work)
13082 {
13083         struct x86_exception fault;
13084
13085         trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
13086         kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
13087
13088         if (kvm_can_deliver_async_pf(vcpu) &&
13089             !apf_put_user_notpresent(vcpu)) {
13090                 fault.vector = PF_VECTOR;
13091                 fault.error_code_valid = true;
13092                 fault.error_code = 0;
13093                 fault.nested_page_fault = false;
13094                 fault.address = work->arch.token;
13095                 fault.async_page_fault = true;
13096                 kvm_inject_page_fault(vcpu, &fault);
13097                 return true;
13098         } else {
13099                 /*
13100                  * It is not possible to deliver a paravirtualized asynchronous
13101                  * page fault, but putting the guest in an artificial halt state
13102                  * can be beneficial nevertheless: if an interrupt arrives, we
13103                  * can deliver it timely and perhaps the guest will schedule
13104                  * another process.  When the instruction that triggered a page
13105                  * fault is retried, hopefully the page will be ready in the host.
13106                  */
13107                 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
13108                 return false;
13109         }
13110 }
13111
13112 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
13113                                  struct kvm_async_pf *work)
13114 {
13115         struct kvm_lapic_irq irq = {
13116                 .delivery_mode = APIC_DM_FIXED,
13117                 .vector = vcpu->arch.apf.vec
13118         };
13119
13120         if (work->wakeup_all)
13121                 work->arch.token = ~0; /* broadcast wakeup */
13122         else
13123                 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
13124         trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
13125
13126         if ((work->wakeup_all || work->notpresent_injected) &&
13127             kvm_pv_async_pf_enabled(vcpu) &&
13128             !apf_put_user_ready(vcpu, work->arch.token)) {
13129                 vcpu->arch.apf.pageready_pending = true;
13130                 kvm_apic_set_irq(vcpu, &irq, NULL);
13131         }
13132
13133         vcpu->arch.apf.halted = false;
13134         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
13135 }
13136
13137 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
13138 {
13139         kvm_make_request(KVM_REQ_APF_READY, vcpu);
13140         if (!vcpu->arch.apf.pageready_pending)
13141                 kvm_vcpu_kick(vcpu);
13142 }
13143
13144 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
13145 {
13146         if (!kvm_pv_async_pf_enabled(vcpu))
13147                 return true;
13148         else
13149                 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
13150 }
13151
13152 void kvm_arch_start_assignment(struct kvm *kvm)
13153 {
13154         if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
13155                 static_call_cond(kvm_x86_pi_start_assignment)(kvm);
13156 }
13157 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
13158
13159 void kvm_arch_end_assignment(struct kvm *kvm)
13160 {
13161         atomic_dec(&kvm->arch.assigned_device_count);
13162 }
13163 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
13164
13165 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
13166 {
13167         return arch_atomic_read(&kvm->arch.assigned_device_count);
13168 }
13169 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
13170
13171 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
13172 {
13173         atomic_inc(&kvm->arch.noncoherent_dma_count);
13174 }
13175 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
13176
13177 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
13178 {
13179         atomic_dec(&kvm->arch.noncoherent_dma_count);
13180 }
13181 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
13182
13183 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
13184 {
13185         return atomic_read(&kvm->arch.noncoherent_dma_count);
13186 }
13187 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
13188
13189 bool kvm_arch_has_irq_bypass(void)
13190 {
13191         return true;
13192 }
13193
13194 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
13195                                       struct irq_bypass_producer *prod)
13196 {
13197         struct kvm_kernel_irqfd *irqfd =
13198                 container_of(cons, struct kvm_kernel_irqfd, consumer);
13199         int ret;
13200
13201         irqfd->producer = prod;
13202         kvm_arch_start_assignment(irqfd->kvm);
13203         ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm,
13204                                          prod->irq, irqfd->gsi, 1);
13205
13206         if (ret)
13207                 kvm_arch_end_assignment(irqfd->kvm);
13208
13209         return ret;
13210 }
13211
13212 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
13213                                       struct irq_bypass_producer *prod)
13214 {
13215         int ret;
13216         struct kvm_kernel_irqfd *irqfd =
13217                 container_of(cons, struct kvm_kernel_irqfd, consumer);
13218
13219         WARN_ON(irqfd->producer != prod);
13220         irqfd->producer = NULL;
13221
13222         /*
13223          * When producer of consumer is unregistered, we change back to
13224          * remapped mode, so we can re-use the current implementation
13225          * when the irq is masked/disabled or the consumer side (KVM
13226          * int this case doesn't want to receive the interrupts.
13227         */
13228         ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0);
13229         if (ret)
13230                 printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
13231                        " fails: %d\n", irqfd->consumer.token, ret);
13232
13233         kvm_arch_end_assignment(irqfd->kvm);
13234 }
13235
13236 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
13237                                    uint32_t guest_irq, bool set)
13238 {
13239         return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set);
13240 }
13241
13242 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
13243                                   struct kvm_kernel_irq_routing_entry *new)
13244 {
13245         if (new->type != KVM_IRQ_ROUTING_MSI)
13246                 return true;
13247
13248         return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));
13249 }
13250
13251 bool kvm_vector_hashing_enabled(void)
13252 {
13253         return vector_hashing;
13254 }
13255
13256 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
13257 {
13258         return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
13259 }
13260 EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
13261
13262
13263 int kvm_spec_ctrl_test_value(u64 value)
13264 {
13265         /*
13266          * test that setting IA32_SPEC_CTRL to given value
13267          * is allowed by the host processor
13268          */
13269
13270         u64 saved_value;
13271         unsigned long flags;
13272         int ret = 0;
13273
13274         local_irq_save(flags);
13275
13276         if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
13277                 ret = 1;
13278         else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
13279                 ret = 1;
13280         else
13281                 wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
13282
13283         local_irq_restore(flags);
13284
13285         return ret;
13286 }
13287 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
13288
13289 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
13290 {
13291         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
13292         struct x86_exception fault;
13293         u64 access = error_code &
13294                 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
13295
13296         if (!(error_code & PFERR_PRESENT_MASK) ||
13297             mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) {
13298                 /*
13299                  * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
13300                  * tables probably do not match the TLB.  Just proceed
13301                  * with the error code that the processor gave.
13302                  */
13303                 fault.vector = PF_VECTOR;
13304                 fault.error_code_valid = true;
13305                 fault.error_code = error_code;
13306                 fault.nested_page_fault = false;
13307                 fault.address = gva;
13308                 fault.async_page_fault = false;
13309         }
13310         vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
13311 }
13312 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
13313
13314 /*
13315  * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
13316  * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
13317  * indicates whether exit to userspace is needed.
13318  */
13319 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
13320                               struct x86_exception *e)
13321 {
13322         if (r == X86EMUL_PROPAGATE_FAULT) {
13323                 kvm_inject_emulated_page_fault(vcpu, e);
13324                 return 1;
13325         }
13326
13327         /*
13328          * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
13329          * while handling a VMX instruction KVM could've handled the request
13330          * correctly by exiting to userspace and performing I/O but there
13331          * doesn't seem to be a real use-case behind such requests, just return
13332          * KVM_EXIT_INTERNAL_ERROR for now.
13333          */
13334         kvm_prepare_emulation_failure_exit(vcpu);
13335
13336         return 0;
13337 }
13338 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
13339
13340 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
13341 {
13342         bool pcid_enabled;
13343         struct x86_exception e;
13344         struct {
13345                 u64 pcid;
13346                 u64 gla;
13347         } operand;
13348         int r;
13349
13350         r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
13351         if (r != X86EMUL_CONTINUE)
13352                 return kvm_handle_memory_failure(vcpu, r, &e);
13353
13354         if (operand.pcid >> 12 != 0) {
13355                 kvm_inject_gp(vcpu, 0);
13356                 return 1;
13357         }
13358
13359         pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
13360
13361         switch (type) {
13362         case INVPCID_TYPE_INDIV_ADDR:
13363                 if ((!pcid_enabled && (operand.pcid != 0)) ||
13364                     is_noncanonical_address(operand.gla, vcpu)) {
13365                         kvm_inject_gp(vcpu, 0);
13366                         return 1;
13367                 }
13368                 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
13369                 return kvm_skip_emulated_instruction(vcpu);
13370
13371         case INVPCID_TYPE_SINGLE_CTXT:
13372                 if (!pcid_enabled && (operand.pcid != 0)) {
13373                         kvm_inject_gp(vcpu, 0);
13374                         return 1;
13375                 }
13376
13377                 kvm_invalidate_pcid(vcpu, operand.pcid);
13378                 return kvm_skip_emulated_instruction(vcpu);
13379
13380         case INVPCID_TYPE_ALL_NON_GLOBAL:
13381                 /*
13382                  * Currently, KVM doesn't mark global entries in the shadow
13383                  * page tables, so a non-global flush just degenerates to a
13384                  * global flush. If needed, we could optimize this later by
13385                  * keeping track of global entries in shadow page tables.
13386                  */
13387
13388                 fallthrough;
13389         case INVPCID_TYPE_ALL_INCL_GLOBAL:
13390                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
13391                 return kvm_skip_emulated_instruction(vcpu);
13392
13393         default:
13394                 kvm_inject_gp(vcpu, 0);
13395                 return 1;
13396         }
13397 }
13398 EXPORT_SYMBOL_GPL(kvm_handle_invpcid);
13399
13400 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
13401 {
13402         struct kvm_run *run = vcpu->run;
13403         struct kvm_mmio_fragment *frag;
13404         unsigned int len;
13405
13406         BUG_ON(!vcpu->mmio_needed);
13407
13408         /* Complete previous fragment */
13409         frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
13410         len = min(8u, frag->len);
13411         if (!vcpu->mmio_is_write)
13412                 memcpy(frag->data, run->mmio.data, len);
13413
13414         if (frag->len <= 8) {
13415                 /* Switch to the next fragment. */
13416                 frag++;
13417                 vcpu->mmio_cur_fragment++;
13418         } else {
13419                 /* Go forward to the next mmio piece. */
13420                 frag->data += len;
13421                 frag->gpa += len;
13422                 frag->len -= len;
13423         }
13424
13425         if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
13426                 vcpu->mmio_needed = 0;
13427
13428                 // VMG change, at this point, we're always done
13429                 // RIP has already been advanced
13430                 return 1;
13431         }
13432
13433         // More MMIO is needed
13434         run->mmio.phys_addr = frag->gpa;
13435         run->mmio.len = min(8u, frag->len);
13436         run->mmio.is_write = vcpu->mmio_is_write;
13437         if (run->mmio.is_write)
13438                 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
13439         run->exit_reason = KVM_EXIT_MMIO;
13440
13441         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13442
13443         return 0;
13444 }
13445
13446 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
13447                           void *data)
13448 {
13449         int handled;
13450         struct kvm_mmio_fragment *frag;
13451
13452         if (!data)
13453                 return -EINVAL;
13454
13455         handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13456         if (handled == bytes)
13457                 return 1;
13458
13459         bytes -= handled;
13460         gpa += handled;
13461         data += handled;
13462
13463         /*TODO: Check if need to increment number of frags */
13464         frag = vcpu->mmio_fragments;
13465         vcpu->mmio_nr_fragments = 1;
13466         frag->len = bytes;
13467         frag->gpa = gpa;
13468         frag->data = data;
13469
13470         vcpu->mmio_needed = 1;
13471         vcpu->mmio_cur_fragment = 0;
13472
13473         vcpu->run->mmio.phys_addr = gpa;
13474         vcpu->run->mmio.len = min(8u, frag->len);
13475         vcpu->run->mmio.is_write = 1;
13476         memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
13477         vcpu->run->exit_reason = KVM_EXIT_MMIO;
13478
13479         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13480
13481         return 0;
13482 }
13483 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write);
13484
13485 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
13486                          void *data)
13487 {
13488         int handled;
13489         struct kvm_mmio_fragment *frag;
13490
13491         if (!data)
13492                 return -EINVAL;
13493
13494         handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13495         if (handled == bytes)
13496                 return 1;
13497
13498         bytes -= handled;
13499         gpa += handled;
13500         data += handled;
13501
13502         /*TODO: Check if need to increment number of frags */
13503         frag = vcpu->mmio_fragments;
13504         vcpu->mmio_nr_fragments = 1;
13505         frag->len = bytes;
13506         frag->gpa = gpa;
13507         frag->data = data;
13508
13509         vcpu->mmio_needed = 1;
13510         vcpu->mmio_cur_fragment = 0;
13511
13512         vcpu->run->mmio.phys_addr = gpa;
13513         vcpu->run->mmio.len = min(8u, frag->len);
13514         vcpu->run->mmio.is_write = 0;
13515         vcpu->run->exit_reason = KVM_EXIT_MMIO;
13516
13517         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13518
13519         return 0;
13520 }
13521 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
13522
13523 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size)
13524 {
13525         vcpu->arch.sev_pio_count -= count;
13526         vcpu->arch.sev_pio_data += count * size;
13527 }
13528
13529 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
13530                            unsigned int port);
13531
13532 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
13533 {
13534         int size = vcpu->arch.pio.size;
13535         int port = vcpu->arch.pio.port;
13536
13537         vcpu->arch.pio.count = 0;
13538         if (vcpu->arch.sev_pio_count)
13539                 return kvm_sev_es_outs(vcpu, size, port);
13540         return 1;
13541 }
13542
13543 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
13544                            unsigned int port)
13545 {
13546         for (;;) {
13547                 unsigned int count =
13548                         min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
13549                 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
13550
13551                 /* memcpy done already by emulator_pio_out.  */
13552                 advance_sev_es_emulated_pio(vcpu, count, size);
13553                 if (!ret)
13554                         break;
13555
13556                 /* Emulation done by the kernel.  */
13557                 if (!vcpu->arch.sev_pio_count)
13558                         return 1;
13559         }
13560
13561         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
13562         return 0;
13563 }
13564
13565 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
13566                           unsigned int port);
13567
13568 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
13569 {
13570         unsigned count = vcpu->arch.pio.count;
13571         int size = vcpu->arch.pio.size;
13572         int port = vcpu->arch.pio.port;
13573
13574         complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
13575         advance_sev_es_emulated_pio(vcpu, count, size);
13576         if (vcpu->arch.sev_pio_count)
13577                 return kvm_sev_es_ins(vcpu, size, port);
13578         return 1;
13579 }
13580
13581 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
13582                           unsigned int port)
13583 {
13584         for (;;) {
13585                 unsigned int count =
13586                         min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
13587                 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count))
13588                         break;
13589
13590                 /* Emulation done by the kernel.  */
13591                 advance_sev_es_emulated_pio(vcpu, count, size);
13592                 if (!vcpu->arch.sev_pio_count)
13593                         return 1;
13594         }
13595
13596         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
13597         return 0;
13598 }
13599
13600 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
13601                          unsigned int port, void *data,  unsigned int count,
13602                          int in)
13603 {
13604         vcpu->arch.sev_pio_data = data;
13605         vcpu->arch.sev_pio_count = count;
13606         return in ? kvm_sev_es_ins(vcpu, size, port)
13607                   : kvm_sev_es_outs(vcpu, size, port);
13608 }
13609 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
13610
13611 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
13612 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
13613 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
13614 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
13615 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
13616 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
13617 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
13618 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter);
13619 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
13620 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
13621 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
13622 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed);
13623 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
13624 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
13625 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
13626 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
13627 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update);
13628 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
13629 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
13630 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
13631 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
13632 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
13633 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath);
13634 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell);
13635 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
13636 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
13637 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
13638 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
13639 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
13640
13641 static int __init kvm_x86_init(void)
13642 {
13643         kvm_mmu_x86_module_init();
13644         return 0;
13645 }
13646 module_init(kvm_x86_init);
13647
13648 static void __exit kvm_x86_exit(void)
13649 {
13650         /*
13651          * If module_init() is implemented, module_exit() must also be
13652          * implemented to allow module unload.
13653          */
13654 }
13655 module_exit(kvm_x86_exit);