2 * KVM paravirt_ops implementation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19 * Copyright IBM Corporation, 2007
20 * Authors: Anthony Liguori <aliguori@us.ibm.com>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/kvm_para.h>
26 #include <linux/cpu.h>
28 #include <linux/highmem.h>
29 #include <linux/hardirq.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/hash.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/kprobes.h>
36 #include <asm/timer.h>
38 #include <asm/traps.h>
40 #include <asm/tlbflush.h>
43 #include <asm/apicdef.h>
44 #include <asm/hypervisor.h>
46 static int kvmapf = 1;
48 static int parse_no_kvmapf(char *arg)
54 early_param("no-kvmapf", parse_no_kvmapf);
56 static int steal_acc = 1;
57 static int parse_no_stealacc(char *arg)
63 early_param("no-steal-acc", parse_no_stealacc);
65 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
66 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
67 static int has_steal_clock = 0;
70 * No need for any "IO delay" on KVM
72 static void kvm_io_delay(void)
76 #define KVM_TASK_SLEEP_HASHBITS 8
77 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
79 struct kvm_task_sleep_node {
80 struct hlist_node link;
87 static struct kvm_task_sleep_head {
89 struct hlist_head list;
90 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
92 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
97 hlist_for_each(p, &b->list) {
98 struct kvm_task_sleep_node *n =
99 hlist_entry(p, typeof(*n), link);
100 if (n->token == token)
107 void kvm_async_pf_task_wait(u32 token)
109 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
110 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
111 struct kvm_task_sleep_node n, *e;
116 idle = idle_cpu(cpu);
120 e = _find_apf_task(b, token);
122 /* dummy entry exist -> wake up was delivered ahead of PF */
125 spin_unlock(&b->lock);
130 n.cpu = smp_processor_id();
131 n.halted = idle || preempt_count() > 1;
132 init_waitqueue_head(&n.wq);
133 hlist_add_head(&n.link, &b->list);
134 spin_unlock(&b->lock);
138 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
139 if (hlist_unhashed(&n.link))
148 * We cannot reschedule. So halt.
155 finish_wait(&n.wq, &wait);
159 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
161 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
163 hlist_del_init(&n->link);
165 smp_send_reschedule(n->cpu);
166 else if (waitqueue_active(&n->wq))
170 static void apf_task_wake_all(void)
174 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
175 struct hlist_node *p, *next;
176 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
178 hlist_for_each_safe(p, next, &b->list) {
179 struct kvm_task_sleep_node *n =
180 hlist_entry(p, typeof(*n), link);
181 if (n->cpu == smp_processor_id())
182 apf_task_wake_one(n);
184 spin_unlock(&b->lock);
188 void kvm_async_pf_task_wake(u32 token)
190 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
191 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
192 struct kvm_task_sleep_node *n;
201 n = _find_apf_task(b, token);
204 * async PF was not yet handled.
205 * Add dummy entry for the token.
207 n = kzalloc(sizeof(*n), GFP_ATOMIC);
210 * Allocation failed! Busy wait while other cpu
213 spin_unlock(&b->lock);
218 n->cpu = smp_processor_id();
219 init_waitqueue_head(&n->wq);
220 hlist_add_head(&n->link, &b->list);
222 apf_task_wake_one(n);
223 spin_unlock(&b->lock);
226 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
228 u32 kvm_read_and_reset_pf_reason(void)
232 if (__get_cpu_var(apf_reason).enabled) {
233 reason = __get_cpu_var(apf_reason).reason;
234 __get_cpu_var(apf_reason).reason = 0;
239 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
241 dotraplinkage void __kprobes
242 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
244 switch (kvm_read_and_reset_pf_reason()) {
246 do_page_fault(regs, error_code);
248 case KVM_PV_REASON_PAGE_NOT_PRESENT:
249 /* page is swapped out by the host. */
252 kvm_async_pf_task_wait((u32)read_cr2());
255 case KVM_PV_REASON_PAGE_READY:
258 kvm_async_pf_task_wake((u32)read_cr2());
264 static void __init paravirt_ops_setup(void)
266 pv_info.name = "KVM";
267 pv_info.paravirt_enabled = 1;
269 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
270 pv_cpu_ops.io_delay = kvm_io_delay;
272 #ifdef CONFIG_X86_IO_APIC
277 static void kvm_register_steal_time(void)
279 int cpu = smp_processor_id();
280 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
282 if (!has_steal_clock)
285 memset(st, 0, sizeof(*st));
287 wrmsrl(MSR_KVM_STEAL_TIME, (__pa(st) | KVM_MSR_ENABLED));
288 printk(KERN_INFO "kvm-stealtime: cpu %d, msr %lx\n",
292 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
294 static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
297 * This relies on __test_and_clear_bit to modify the memory
298 * in a way that is atomic with respect to the local CPU.
299 * The hypervisor only accesses this memory from the local CPU so
300 * there's no need for lock or memory barriers.
301 * An optimization barrier is implied in apic write.
303 if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
305 apic_write(APIC_EOI, APIC_EOI_ACK);
308 void __cpuinit kvm_guest_cpu_init(void)
310 if (!kvm_para_available())
313 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
314 u64 pa = __pa(&__get_cpu_var(apf_reason));
316 #ifdef CONFIG_PREEMPT
317 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
319 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
320 __get_cpu_var(apf_reason).enabled = 1;
321 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
325 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
327 /* Size alignment is implied but just to make it explicit. */
328 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
329 __get_cpu_var(kvm_apic_eoi) = 0;
330 pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED;
331 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
335 kvm_register_steal_time();
338 static void kvm_pv_disable_apf(void)
340 if (!__get_cpu_var(apf_reason).enabled)
343 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
344 __get_cpu_var(apf_reason).enabled = 0;
346 printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
350 static void kvm_pv_guest_cpu_reboot(void *unused)
353 * We disable PV EOI before we load a new kernel by kexec,
354 * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
355 * New kernel can re-enable when it boots.
357 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
358 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
359 kvm_pv_disable_apf();
360 kvm_disable_steal_time();
363 static int kvm_pv_reboot_notify(struct notifier_block *nb,
364 unsigned long code, void *unused)
366 if (code == SYS_RESTART)
367 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
371 static struct notifier_block kvm_pv_reboot_nb = {
372 .notifier_call = kvm_pv_reboot_notify,
375 static u64 kvm_steal_clock(int cpu)
378 struct kvm_steal_time *src;
381 src = &per_cpu(steal_time, cpu);
383 version = src->version;
387 } while ((version & 1) || (version != src->version));
392 void kvm_disable_steal_time(void)
394 if (!has_steal_clock)
397 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
401 static void __init kvm_smp_prepare_boot_cpu(void)
403 WARN_ON(kvm_register_clock("primary cpu clock"));
404 kvm_guest_cpu_init();
405 native_smp_prepare_boot_cpu();
408 static void __cpuinit kvm_guest_cpu_online(void *dummy)
410 kvm_guest_cpu_init();
413 static void kvm_guest_cpu_offline(void *dummy)
415 kvm_disable_steal_time();
416 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
417 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
418 kvm_pv_disable_apf();
422 static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
423 unsigned long action, void *hcpu)
425 int cpu = (unsigned long)hcpu;
428 case CPU_DOWN_FAILED:
429 case CPU_ONLINE_FROZEN:
430 smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
432 case CPU_DOWN_PREPARE:
433 case CPU_DOWN_PREPARE_FROZEN:
434 smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
442 static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
443 .notifier_call = kvm_cpu_notify,
447 static void __init kvm_apf_trap_init(void)
449 set_intr_gate(14, &async_page_fault);
452 void __init kvm_guest_init(void)
456 if (!kvm_para_available())
459 paravirt_ops_setup();
460 register_reboot_notifier(&kvm_pv_reboot_nb);
461 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
462 spin_lock_init(&async_pf_sleepers[i].lock);
463 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
464 x86_init.irqs.trap_init = kvm_apf_trap_init;
466 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
468 pv_time_ops.steal_clock = kvm_steal_clock;
471 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
472 apic_set_eoi_write(kvm_guest_apic_eoi_write);
475 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
476 register_cpu_notifier(&kvm_cpu_notifier);
478 kvm_guest_cpu_init();
482 static bool __init kvm_detect(void)
484 if (!kvm_para_available())
489 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
491 .detect = kvm_detect,
493 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
495 static __init int activate_jump_labels(void)
497 if (has_steal_clock) {
498 static_key_slow_inc(¶virt_steal_enabled);
500 static_key_slow_inc(¶virt_steal_rq_enabled);
505 arch_initcall(activate_jump_labels);