2 * KVM paravirt_ops implementation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
42 #include <asm/traps.h>
44 #include <asm/tlbflush.h>
46 #include <asm/apicdef.h>
47 #include <asm/hypervisor.h>
49 static int kvmapf = 1;
51 static int __init parse_no_kvmapf(char *arg)
57 early_param("no-kvmapf", parse_no_kvmapf);
59 static int steal_acc = 1;
60 static int __init parse_no_stealacc(char *arg)
66 early_param("no-steal-acc", parse_no_stealacc);
68 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
69 static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
70 static int has_steal_clock = 0;
73 * No need for any "IO delay" on KVM
75 static void kvm_io_delay(void)
79 #define KVM_TASK_SLEEP_HASHBITS 8
80 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
82 struct kvm_task_sleep_node {
83 struct hlist_node link;
84 struct swait_queue_head wq;
90 static struct kvm_task_sleep_head {
92 struct hlist_head list;
93 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
95 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
100 hlist_for_each(p, &b->list) {
101 struct kvm_task_sleep_node *n =
102 hlist_entry(p, typeof(*n), link);
103 if (n->token == token)
111 * @interrupt_kernel: Is this called from a routine which interrupts the kernel
112 * (other than user space)?
114 void kvm_async_pf_task_wait(u32 token, int interrupt_kernel)
116 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
117 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
118 struct kvm_task_sleep_node n, *e;
119 DECLARE_SWAITQUEUE(wait);
123 raw_spin_lock(&b->lock);
124 e = _find_apf_task(b, token);
126 /* dummy entry exist -> wake up was delivered ahead of PF */
129 raw_spin_unlock(&b->lock);
136 n.cpu = smp_processor_id();
137 n.halted = is_idle_task(current) ||
138 (IS_ENABLED(CONFIG_PREEMPT_COUNT)
139 ? preempt_count() > 1 || rcu_preempt_depth()
141 init_swait_queue_head(&n.wq);
142 hlist_add_head(&n.link, &b->list);
143 raw_spin_unlock(&b->lock);
147 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
148 if (hlist_unhashed(&n.link))
159 * We cannot reschedule. So halt.
168 finish_swait(&n.wq, &wait);
173 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
175 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
177 hlist_del_init(&n->link);
179 smp_send_reschedule(n->cpu);
180 else if (swq_has_sleeper(&n->wq))
181 swake_up_one(&n->wq);
184 static void apf_task_wake_all(void)
188 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
189 struct hlist_node *p, *next;
190 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
191 raw_spin_lock(&b->lock);
192 hlist_for_each_safe(p, next, &b->list) {
193 struct kvm_task_sleep_node *n =
194 hlist_entry(p, typeof(*n), link);
195 if (n->cpu == smp_processor_id())
196 apf_task_wake_one(n);
198 raw_spin_unlock(&b->lock);
202 void kvm_async_pf_task_wake(u32 token)
204 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
205 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
206 struct kvm_task_sleep_node *n;
214 raw_spin_lock(&b->lock);
215 n = _find_apf_task(b, token);
218 * async PF was not yet handled.
219 * Add dummy entry for the token.
221 n = kzalloc(sizeof(*n), GFP_ATOMIC);
224 * Allocation failed! Busy wait while other cpu
227 raw_spin_unlock(&b->lock);
232 n->cpu = smp_processor_id();
233 init_swait_queue_head(&n->wq);
234 hlist_add_head(&n->link, &b->list);
236 apf_task_wake_one(n);
237 raw_spin_unlock(&b->lock);
240 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
242 u32 kvm_read_and_reset_pf_reason(void)
246 if (__this_cpu_read(apf_reason.enabled)) {
247 reason = __this_cpu_read(apf_reason.reason);
248 __this_cpu_write(apf_reason.reason, 0);
253 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
254 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
257 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
259 enum ctx_state prev_state;
261 switch (kvm_read_and_reset_pf_reason()) {
263 do_page_fault(regs, error_code);
265 case KVM_PV_REASON_PAGE_NOT_PRESENT:
266 /* page is swapped out by the host. */
267 prev_state = exception_enter();
268 kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs));
269 exception_exit(prev_state);
271 case KVM_PV_REASON_PAGE_READY:
273 kvm_async_pf_task_wake((u32)read_cr2());
278 NOKPROBE_SYMBOL(do_async_page_fault);
280 static void __init paravirt_ops_setup(void)
282 pv_info.name = "KVM";
284 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
285 pv_cpu_ops.io_delay = kvm_io_delay;
287 #ifdef CONFIG_X86_IO_APIC
292 static void kvm_register_steal_time(void)
294 int cpu = smp_processor_id();
295 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
297 if (!has_steal_clock)
300 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
301 pr_info("kvm-stealtime: cpu %d, msr %llx\n",
302 cpu, (unsigned long long) slow_virt_to_phys(st));
305 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
307 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
310 * This relies on __test_and_clear_bit to modify the memory
311 * in a way that is atomic with respect to the local CPU.
312 * The hypervisor only accesses this memory from the local CPU so
313 * there's no need for lock or memory barriers.
314 * An optimization barrier is implied in apic write.
316 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
318 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
321 static void kvm_guest_cpu_init(void)
323 if (!kvm_para_available())
326 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
327 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
329 #ifdef CONFIG_PREEMPT
330 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
332 pa |= KVM_ASYNC_PF_ENABLED;
334 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
335 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
337 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
338 __this_cpu_write(apf_reason.enabled, 1);
339 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
343 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
345 /* Size alignment is implied but just to make it explicit. */
346 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
347 __this_cpu_write(kvm_apic_eoi, 0);
348 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
350 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
354 kvm_register_steal_time();
357 static void kvm_pv_disable_apf(void)
359 if (!__this_cpu_read(apf_reason.enabled))
362 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
363 __this_cpu_write(apf_reason.enabled, 0);
365 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
369 static void kvm_pv_guest_cpu_reboot(void *unused)
372 * We disable PV EOI before we load a new kernel by kexec,
373 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
374 * New kernel can re-enable when it boots.
376 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
377 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
378 kvm_pv_disable_apf();
379 kvm_disable_steal_time();
382 static int kvm_pv_reboot_notify(struct notifier_block *nb,
383 unsigned long code, void *unused)
385 if (code == SYS_RESTART)
386 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
390 static struct notifier_block kvm_pv_reboot_nb = {
391 .notifier_call = kvm_pv_reboot_notify,
394 static u64 kvm_steal_clock(int cpu)
397 struct kvm_steal_time *src;
400 src = &per_cpu(steal_time, cpu);
402 version = src->version;
406 } while ((version & 1) || (version != src->version));
411 void kvm_disable_steal_time(void)
413 if (!has_steal_clock)
416 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
419 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
421 early_set_memory_decrypted((unsigned long) ptr, size);
425 * Iterate through all possible CPUs and map the memory region pointed
426 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
428 * Note: we iterate through all possible CPUs to ensure that CPUs
429 * hotplugged will have their per-cpu variable already mapped as
432 static void __init sev_map_percpu_data(void)
439 for_each_possible_cpu(cpu) {
440 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
441 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
442 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
447 static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
449 native_smp_prepare_cpus(max_cpus);
450 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
451 static_branch_disable(&virt_spin_lock_key);
454 static void __init kvm_smp_prepare_boot_cpu(void)
457 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
458 * shares the guest physical address with the hypervisor.
460 sev_map_percpu_data();
462 kvm_guest_cpu_init();
463 native_smp_prepare_boot_cpu();
467 static void kvm_guest_cpu_offline(void)
469 kvm_disable_steal_time();
470 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
471 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
472 kvm_pv_disable_apf();
476 static int kvm_cpu_online(unsigned int cpu)
479 kvm_guest_cpu_init();
484 static int kvm_cpu_down_prepare(unsigned int cpu)
487 kvm_guest_cpu_offline();
493 static void __init kvm_apf_trap_init(void)
495 update_intr_gate(X86_TRAP_PF, async_page_fault);
498 static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
500 static void kvm_flush_tlb_others(const struct cpumask *cpumask,
501 const struct flush_tlb_info *info)
505 struct kvm_steal_time *src;
506 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
508 cpumask_copy(flushmask, cpumask);
510 * We have to call flush only on online vCPUs. And
511 * queue flush_on_enter for pre-empted vCPUs
513 for_each_cpu(cpu, flushmask) {
514 src = &per_cpu(steal_time, cpu);
515 state = READ_ONCE(src->preempted);
516 if ((state & KVM_VCPU_PREEMPTED)) {
517 if (try_cmpxchg(&src->preempted, &state,
518 state | KVM_VCPU_FLUSH_TLB))
519 __cpumask_clear_cpu(cpu, flushmask);
523 native_flush_tlb_others(flushmask, info);
526 static void __init kvm_guest_init(void)
530 if (!kvm_para_available())
533 paravirt_ops_setup();
534 register_reboot_notifier(&kvm_pv_reboot_nb);
535 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
536 raw_spin_lock_init(&async_pf_sleepers[i].lock);
537 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
538 x86_init.irqs.trap_init = kvm_apf_trap_init;
540 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
542 pv_time_ops.steal_clock = kvm_steal_clock;
545 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
546 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
547 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
548 pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others;
550 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
551 apic_set_eoi_write(kvm_guest_apic_eoi_write);
554 smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
555 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
556 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
557 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
558 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
560 sev_map_percpu_data();
561 kvm_guest_cpu_init();
565 * Hard lockup detection is enabled by default. Disable it, as guests
566 * can get false positives too easily, for example if the host is
569 hardlockup_detector_disable();
572 static noinline uint32_t __kvm_cpuid_base(void)
574 if (boot_cpu_data.cpuid_level < 0)
575 return 0; /* So we don't blow up on old processors */
577 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
578 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
583 static inline uint32_t kvm_cpuid_base(void)
585 static int kvm_cpuid_base = -1;
587 if (kvm_cpuid_base == -1)
588 kvm_cpuid_base = __kvm_cpuid_base();
590 return kvm_cpuid_base;
593 bool kvm_para_available(void)
595 return kvm_cpuid_base() != 0;
597 EXPORT_SYMBOL_GPL(kvm_para_available);
599 unsigned int kvm_arch_para_features(void)
601 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
604 unsigned int kvm_arch_para_hints(void)
606 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
609 static uint32_t __init kvm_detect(void)
611 return kvm_cpuid_base();
614 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
616 .detect = kvm_detect,
617 .type = X86_HYPER_KVM,
618 .init.init_platform = kvmclock_init,
619 .init.guest_late_init = kvm_guest_init,
620 .init.x2apic_available = kvm_para_available,
623 static __init int activate_jump_labels(void)
625 if (has_steal_clock) {
626 static_key_slow_inc(¶virt_steal_enabled);
628 static_key_slow_inc(¶virt_steal_rq_enabled);
633 arch_initcall(activate_jump_labels);
635 static __init int kvm_setup_pv_tlb_flush(void)
639 if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
640 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
641 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
642 for_each_possible_cpu(cpu) {
643 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
644 GFP_KERNEL, cpu_to_node(cpu));
646 pr_info("KVM setup pv remote TLB flush\n");
651 arch_initcall(kvm_setup_pv_tlb_flush);
653 #ifdef CONFIG_PARAVIRT_SPINLOCKS
655 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
656 static void kvm_kick_cpu(int cpu)
659 unsigned long flags = 0;
661 apicid = per_cpu(x86_cpu_to_apicid, cpu);
662 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
665 #include <asm/qspinlock.h>
667 static void kvm_wait(u8 *ptr, u8 val)
674 local_irq_save(flags);
676 if (READ_ONCE(*ptr) != val)
680 * halt until it's our turn and kicked. Note that we do safe halt
681 * for irq enabled case to avoid hang when lock info is overwritten
682 * in irq spinlock slowpath and no spurious interrupt occur to save us.
684 if (arch_irqs_disabled_flags(flags))
690 local_irq_restore(flags);
694 __visible bool __kvm_vcpu_is_preempted(long cpu)
696 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
698 return !!(src->preempted & KVM_VCPU_PREEMPTED);
700 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
704 #include <asm/asm-offsets.h>
706 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
709 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
710 * restoring to/from the stack.
713 ".pushsection .text;"
714 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
715 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
716 "__raw_callee_save___kvm_vcpu_is_preempted:"
717 "movq __per_cpu_offset(,%rdi,8), %rax;"
718 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
726 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
728 void __init kvm_spinlock_init(void)
730 if (!kvm_para_available())
732 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
733 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
736 if (kvm_para_has_hint(KVM_HINTS_REALTIME))
739 __pv_init_lock_hash();
740 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
741 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
742 pv_lock_ops.wait = kvm_wait;
743 pv_lock_ops.kick = kvm_kick_cpu;
745 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
746 pv_lock_ops.vcpu_is_preempted =
747 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
751 #endif /* CONFIG_PARAVIRT_SPINLOCKS */