1 /* Paravirtualization interfaces
2 Copyright (C) 2006 Rusty Russell IBM Corporation
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/export.h>
24 #include <linux/efi.h>
25 #include <linux/bcd.h>
26 #include <linux/highmem.h>
27 #include <linux/kprobes.h>
30 #include <asm/paravirt.h>
31 #include <asm/debugreg.h>
33 #include <asm/setup.h>
34 #include <asm/pgtable.h>
36 #include <asm/pgalloc.h>
38 #include <asm/delay.h>
39 #include <asm/fixmap.h>
41 #include <asm/tlbflush.h>
42 #include <asm/timer.h>
43 #include <asm/special_insns.h>
47 * nop stub, which must not clobber anything *including the stack* to
48 * avoid confusing the entry prologues.
50 extern void _paravirt_nop(void);
51 asm (".pushsection .entry.text, \"ax\"\n"
52 ".global _paravirt_nop\n"
55 ".size _paravirt_nop, . - _paravirt_nop\n\t"
56 ".type _paravirt_nop, @function\n\t"
59 /* identity function, which can be inlined */
60 u32 notrace _paravirt_ident_32(u32 x)
65 u64 notrace _paravirt_ident_64(u64 x)
70 void __init default_banner(void)
72 printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
76 /* Undefined instruction for dealing with missing ops pointers. */
77 static const unsigned char ud2a[] = { 0x0f, 0x0b };
82 } __attribute__((packed));
84 static unsigned paravirt_patch_call(void *insnbuf, const void *target,
85 unsigned long addr, unsigned len)
87 struct branch *b = insnbuf;
88 unsigned long delta = (unsigned long)target - (addr+5);
91 #ifdef CONFIG_RETPOLINE
92 WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
94 return len; /* call too long for patch site */
97 b->opcode = 0xe8; /* call */
99 BUILD_BUG_ON(sizeof(*b) != 5);
104 static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
105 unsigned long addr, unsigned len)
107 struct branch *b = insnbuf;
108 unsigned long delta = (unsigned long)target - (addr+5);
111 #ifdef CONFIG_RETPOLINE
112 WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
114 return len; /* call too long for patch site */
117 b->opcode = 0xe9; /* jmp */
123 DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
125 void __init native_pv_lock_init(void)
127 if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
128 static_branch_disable(&virt_spin_lock_key);
131 unsigned paravirt_patch_default(u8 type, void *insnbuf,
132 unsigned long addr, unsigned len)
135 * Neat trick to map patch type back to the call within the
136 * corresponding structure.
138 void *opfunc = *((void **)&pv_ops + type);
142 /* If there's no function, patch it with a ud2a (BUG) */
143 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
144 else if (opfunc == _paravirt_nop)
147 /* identity functions just return their single argument */
148 else if (opfunc == _paravirt_ident_32)
149 ret = paravirt_patch_ident_32(insnbuf, len);
150 else if (opfunc == _paravirt_ident_64)
151 ret = paravirt_patch_ident_64(insnbuf, len);
153 else if (type == PARAVIRT_PATCH(cpu.iret) ||
154 type == PARAVIRT_PATCH(cpu.usergs_sysret64))
155 /* If operation requires a jmp, then jmp */
156 ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
158 /* Otherwise call the function. */
159 ret = paravirt_patch_call(insnbuf, opfunc, addr, len);
164 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
165 const char *start, const char *end)
167 unsigned insn_len = end - start;
169 if (insn_len > len || start == NULL)
172 memcpy(insnbuf, start, insn_len);
177 static void native_flush_tlb(void)
179 __native_flush_tlb();
183 * Global pages have to be flushed a bit differently. Not a real
184 * performance problem because this does not happen often.
186 static void native_flush_tlb_global(void)
188 __native_flush_tlb_global();
191 static void native_flush_tlb_one_user(unsigned long addr)
193 __native_flush_tlb_one_user(addr);
196 struct static_key paravirt_steal_enabled;
197 struct static_key paravirt_steal_rq_enabled;
199 static u64 native_steal_clock(int cpu)
204 /* These are in entry.S */
205 extern void native_iret(void);
206 extern void native_usergs_sysret64(void);
208 static struct resource reserve_ioports = {
210 .end = IO_SPACE_LIMIT,
211 .name = "paravirt-ioport",
212 .flags = IORESOURCE_IO | IORESOURCE_BUSY,
216 * Reserve the whole legacy IO space to prevent any legacy drivers
217 * from wasting time probing for their hardware. This is a fairly
218 * brute-force approach to disabling all non-virtual drivers.
220 * Note that this must be called very early to have any effect.
222 int paravirt_disable_iospace(void)
224 return request_resource(&ioport_resource, &reserve_ioports);
227 static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
229 static inline void enter_lazy(enum paravirt_lazy_mode mode)
231 BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
233 this_cpu_write(paravirt_lazy_mode, mode);
236 static void leave_lazy(enum paravirt_lazy_mode mode)
238 BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
240 this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
243 void paravirt_enter_lazy_mmu(void)
245 enter_lazy(PARAVIRT_LAZY_MMU);
248 void paravirt_leave_lazy_mmu(void)
250 leave_lazy(PARAVIRT_LAZY_MMU);
253 void paravirt_flush_lazy_mmu(void)
257 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
258 arch_leave_lazy_mmu_mode();
259 arch_enter_lazy_mmu_mode();
265 void paravirt_start_context_switch(struct task_struct *prev)
267 BUG_ON(preemptible());
269 if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
270 arch_leave_lazy_mmu_mode();
271 set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
273 enter_lazy(PARAVIRT_LAZY_CPU);
276 void paravirt_end_context_switch(struct task_struct *next)
278 BUG_ON(preemptible());
280 leave_lazy(PARAVIRT_LAZY_CPU);
282 if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
283 arch_enter_lazy_mmu_mode();
286 enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
289 return PARAVIRT_LAZY_NONE;
291 return this_cpu_read(paravirt_lazy_mode);
294 struct pv_info pv_info = {
295 .name = "bare hardware",
296 #ifdef CONFIG_PARAVIRT_XXL
298 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
301 .extra_user_64bit_cs = __USER_CS,
306 #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
307 /* 32-bit pagetable entries */
308 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
310 /* 64-bit pagetable entries */
311 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
314 struct paravirt_patch_template pv_ops = {
316 .init.patch = native_patch,
319 .time.sched_clock = native_sched_clock,
320 .time.steal_clock = native_steal_clock,
323 .cpu.cpuid = native_cpuid,
324 .cpu.get_debugreg = native_get_debugreg,
325 .cpu.set_debugreg = native_set_debugreg,
326 .cpu.read_cr0 = native_read_cr0,
327 .cpu.write_cr0 = native_write_cr0,
328 .cpu.write_cr4 = native_write_cr4,
330 .cpu.read_cr8 = native_read_cr8,
331 .cpu.write_cr8 = native_write_cr8,
333 .cpu.wbinvd = native_wbinvd,
334 .cpu.read_msr = native_read_msr,
335 .cpu.write_msr = native_write_msr,
336 .cpu.read_msr_safe = native_read_msr_safe,
337 .cpu.write_msr_safe = native_write_msr_safe,
338 .cpu.read_pmc = native_read_pmc,
339 .cpu.load_tr_desc = native_load_tr_desc,
340 .cpu.set_ldt = native_set_ldt,
341 .cpu.load_gdt = native_load_gdt,
342 .cpu.load_idt = native_load_idt,
343 .cpu.store_tr = native_store_tr,
344 .cpu.load_tls = native_load_tls,
346 .cpu.load_gs_index = native_load_gs_index,
348 .cpu.write_ldt_entry = native_write_ldt_entry,
349 .cpu.write_gdt_entry = native_write_gdt_entry,
350 .cpu.write_idt_entry = native_write_idt_entry,
352 .cpu.alloc_ldt = paravirt_nop,
353 .cpu.free_ldt = paravirt_nop,
355 .cpu.load_sp0 = native_load_sp0,
358 .cpu.usergs_sysret64 = native_usergs_sysret64,
360 .cpu.iret = native_iret,
361 .cpu.swapgs = native_swapgs,
363 .cpu.set_iopl_mask = native_set_iopl_mask,
364 .cpu.io_delay = native_io_delay,
366 .cpu.start_context_switch = paravirt_nop,
367 .cpu.end_context_switch = paravirt_nop,
370 .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
371 .irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
372 .irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
373 .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
374 .irq.safe_halt = native_safe_halt,
375 .irq.halt = native_halt,
378 .mmu.read_cr2 = native_read_cr2,
379 .mmu.write_cr2 = native_write_cr2,
380 .mmu.read_cr3 = __native_read_cr3,
381 .mmu.write_cr3 = native_write_cr3,
383 .mmu.flush_tlb_user = native_flush_tlb,
384 .mmu.flush_tlb_kernel = native_flush_tlb_global,
385 .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
386 .mmu.flush_tlb_others = native_flush_tlb_others,
387 .mmu.tlb_remove_table =
388 (void (*)(struct mmu_gather *, void *))tlb_remove_page,
390 .mmu.pgd_alloc = __paravirt_pgd_alloc,
391 .mmu.pgd_free = paravirt_nop,
393 .mmu.alloc_pte = paravirt_nop,
394 .mmu.alloc_pmd = paravirt_nop,
395 .mmu.alloc_pud = paravirt_nop,
396 .mmu.alloc_p4d = paravirt_nop,
397 .mmu.release_pte = paravirt_nop,
398 .mmu.release_pmd = paravirt_nop,
399 .mmu.release_pud = paravirt_nop,
400 .mmu.release_p4d = paravirt_nop,
402 .mmu.set_pte = native_set_pte,
403 .mmu.set_pte_at = native_set_pte_at,
404 .mmu.set_pmd = native_set_pmd,
406 .mmu.ptep_modify_prot_start = __ptep_modify_prot_start,
407 .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit,
409 #if CONFIG_PGTABLE_LEVELS >= 3
410 #ifdef CONFIG_X86_PAE
411 .mmu.set_pte_atomic = native_set_pte_atomic,
412 .mmu.pte_clear = native_pte_clear,
413 .mmu.pmd_clear = native_pmd_clear,
415 .mmu.set_pud = native_set_pud,
417 .mmu.pmd_val = PTE_IDENT,
418 .mmu.make_pmd = PTE_IDENT,
420 #if CONFIG_PGTABLE_LEVELS >= 4
421 .mmu.pud_val = PTE_IDENT,
422 .mmu.make_pud = PTE_IDENT,
424 .mmu.set_p4d = native_set_p4d,
426 #if CONFIG_PGTABLE_LEVELS >= 5
427 .mmu.p4d_val = PTE_IDENT,
428 .mmu.make_p4d = PTE_IDENT,
430 .mmu.set_pgd = native_set_pgd,
431 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
432 #endif /* CONFIG_PGTABLE_LEVELS >= 4 */
433 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
435 .mmu.pte_val = PTE_IDENT,
436 .mmu.pgd_val = PTE_IDENT,
438 .mmu.make_pte = PTE_IDENT,
439 .mmu.make_pgd = PTE_IDENT,
441 .mmu.dup_mmap = paravirt_nop,
442 .mmu.exit_mmap = paravirt_nop,
443 .mmu.activate_mm = paravirt_nop,
446 .enter = paravirt_nop,
447 .leave = paravirt_nop,
448 .flush = paravirt_nop,
451 .mmu.set_fixmap = native_set_fixmap,
453 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
456 .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
457 .lock.queued_spin_unlock =
458 PV_CALLEE_SAVE(__native_queued_spin_unlock),
459 .lock.wait = paravirt_nop,
460 .lock.kick = paravirt_nop,
461 .lock.vcpu_is_preempted =
462 PV_CALLEE_SAVE(__native_vcpu_is_preempted),
467 /* At this point, native_get/set_debugreg has real function entries */
468 NOKPROBE_SYMBOL(native_get_debugreg);
469 NOKPROBE_SYMBOL(native_set_debugreg);
470 NOKPROBE_SYMBOL(native_load_idt);
472 EXPORT_SYMBOL_GPL(pv_ops);
473 EXPORT_SYMBOL_GPL(pv_info);