ab3e172dcc693120cf05eb530082c22aab0e9e85
[linux-block.git] / arch / x86 / kernel / paravirt.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*  Paravirtualization interfaces
3     Copyright (C) 2006 Rusty Russell IBM Corporation
4
5
6     2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
7 */
8
9 #include <linux/errno.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/efi.h>
13 #include <linux/bcd.h>
14 #include <linux/highmem.h>
15 #include <linux/kprobes.h>
16 #include <linux/pgtable.h>
17 #include <linux/static_call.h>
18
19 #include <asm/bug.h>
20 #include <asm/paravirt.h>
21 #include <asm/debugreg.h>
22 #include <asm/desc.h>
23 #include <asm/setup.h>
24 #include <asm/time.h>
25 #include <asm/pgalloc.h>
26 #include <asm/irq.h>
27 #include <asm/delay.h>
28 #include <asm/fixmap.h>
29 #include <asm/apic.h>
30 #include <asm/tlbflush.h>
31 #include <asm/timer.h>
32 #include <asm/special_insns.h>
33 #include <asm/tlb.h>
34 #include <asm/io_bitmap.h>
35 #include <asm/gsseg.h>
36 #include <asm/msr.h>
37
38 /* stub always returning 0. */
39 DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax", .entry.text);
40
41 void __init default_banner(void)
42 {
43         printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
44                pv_info.name);
45 }
46
47 #ifdef CONFIG_PARAVIRT_XXL
48 DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax", .text);
49 DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax", .noinstr.text);
50 DEFINE_ASM_FUNC(pv_native_irq_disable, "cli", .noinstr.text);
51 DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
52 DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
53 #endif
54
55 DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
56
57 void __init native_pv_lock_init(void)
58 {
59         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
60                 static_branch_enable(&virt_spin_lock_key);
61 }
62
63 struct static_key paravirt_steal_enabled;
64 struct static_key paravirt_steal_rq_enabled;
65
66 static u64 native_steal_clock(int cpu)
67 {
68         return 0;
69 }
70
71 DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
72 DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
73
74 void paravirt_set_sched_clock(u64 (*func)(void))
75 {
76         static_call_update(pv_sched_clock, func);
77 }
78
79 static noinstr void pv_native_safe_halt(void)
80 {
81         native_safe_halt();
82 }
83
84 #ifdef CONFIG_PARAVIRT_XXL
85 static noinstr void pv_native_write_cr2(unsigned long val)
86 {
87         native_write_cr2(val);
88 }
89
90 static noinstr unsigned long pv_native_read_cr3(void)
91 {
92         return __native_read_cr3();
93 }
94
95 static noinstr void pv_native_write_cr3(unsigned long cr3)
96 {
97         native_write_cr3(cr3);
98 }
99
100 static noinstr unsigned long pv_native_get_debugreg(int regno)
101 {
102         return native_get_debugreg(regno);
103 }
104
105 static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
106 {
107         native_set_debugreg(regno, val);
108 }
109 #endif
110
111 struct pv_info pv_info = {
112         .name = "bare hardware",
113 #ifdef CONFIG_PARAVIRT_XXL
114         .extra_user_64bit_cs = __USER_CS,
115 #endif
116 };
117
118 /* 64-bit pagetable entries */
119 #define PTE_IDENT       __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
120
121 struct paravirt_patch_template pv_ops = {
122         /* Cpu ops. */
123         .cpu.io_delay           = native_io_delay,
124
125 #ifdef CONFIG_PARAVIRT_XXL
126         .cpu.cpuid              = native_cpuid,
127         .cpu.get_debugreg       = pv_native_get_debugreg,
128         .cpu.set_debugreg       = pv_native_set_debugreg,
129         .cpu.read_cr0           = native_read_cr0,
130         .cpu.write_cr0          = native_write_cr0,
131         .cpu.write_cr4          = native_write_cr4,
132         .cpu.read_msr           = native_read_msr,
133         .cpu.write_msr          = native_write_msr,
134         .cpu.read_msr_safe      = native_read_msr_safe,
135         .cpu.write_msr_safe     = native_write_msr_safe,
136         .cpu.read_pmc           = native_read_pmc,
137         .cpu.load_tr_desc       = native_load_tr_desc,
138         .cpu.set_ldt            = native_set_ldt,
139         .cpu.load_gdt           = native_load_gdt,
140         .cpu.load_idt           = native_load_idt,
141         .cpu.store_tr           = native_store_tr,
142         .cpu.load_tls           = native_load_tls,
143         .cpu.load_gs_index      = native_load_gs_index,
144         .cpu.write_ldt_entry    = native_write_ldt_entry,
145         .cpu.write_gdt_entry    = native_write_gdt_entry,
146         .cpu.write_idt_entry    = native_write_idt_entry,
147
148         .cpu.alloc_ldt          = paravirt_nop,
149         .cpu.free_ldt           = paravirt_nop,
150
151         .cpu.load_sp0           = native_load_sp0,
152
153 #ifdef CONFIG_X86_IOPL_IOPERM
154         .cpu.invalidate_io_bitmap       = native_tss_invalidate_io_bitmap,
155         .cpu.update_io_bitmap           = native_tss_update_io_bitmap,
156 #endif
157
158         .cpu.start_context_switch       = paravirt_nop,
159         .cpu.end_context_switch         = paravirt_nop,
160
161         /* Irq ops. */
162         .irq.save_fl            = __PV_IS_CALLEE_SAVE(pv_native_save_fl),
163         .irq.irq_disable        = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
164         .irq.irq_enable         = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
165 #endif /* CONFIG_PARAVIRT_XXL */
166
167         /* Irq HLT ops. */
168         .irq.safe_halt          = pv_native_safe_halt,
169         .irq.halt               = native_halt,
170
171         /* Mmu ops. */
172         .mmu.flush_tlb_user     = native_flush_tlb_local,
173         .mmu.flush_tlb_kernel   = native_flush_tlb_global,
174         .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
175         .mmu.flush_tlb_multi    = native_flush_tlb_multi,
176
177         .mmu.exit_mmap          = paravirt_nop,
178         .mmu.notify_page_enc_status_changed     = paravirt_nop,
179
180 #ifdef CONFIG_PARAVIRT_XXL
181         .mmu.read_cr2           = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
182         .mmu.write_cr2          = pv_native_write_cr2,
183         .mmu.read_cr3           = pv_native_read_cr3,
184         .mmu.write_cr3          = pv_native_write_cr3,
185
186         .mmu.pgd_alloc          = __paravirt_pgd_alloc,
187         .mmu.pgd_free           = paravirt_nop,
188
189         .mmu.alloc_pte          = paravirt_nop,
190         .mmu.alloc_pmd          = paravirt_nop,
191         .mmu.alloc_pud          = paravirt_nop,
192         .mmu.alloc_p4d          = paravirt_nop,
193         .mmu.release_pte        = paravirt_nop,
194         .mmu.release_pmd        = paravirt_nop,
195         .mmu.release_pud        = paravirt_nop,
196         .mmu.release_p4d        = paravirt_nop,
197
198         .mmu.set_pte            = native_set_pte,
199         .mmu.set_pmd            = native_set_pmd,
200
201         .mmu.ptep_modify_prot_start     = __ptep_modify_prot_start,
202         .mmu.ptep_modify_prot_commit    = __ptep_modify_prot_commit,
203
204         .mmu.set_pud            = native_set_pud,
205
206         .mmu.pmd_val            = PTE_IDENT,
207         .mmu.make_pmd           = PTE_IDENT,
208
209         .mmu.pud_val            = PTE_IDENT,
210         .mmu.make_pud           = PTE_IDENT,
211
212         .mmu.set_p4d            = native_set_p4d,
213
214         .mmu.p4d_val            = PTE_IDENT,
215         .mmu.make_p4d           = PTE_IDENT,
216
217         .mmu.set_pgd            = native_set_pgd,
218
219         .mmu.pte_val            = PTE_IDENT,
220         .mmu.pgd_val            = PTE_IDENT,
221
222         .mmu.make_pte           = PTE_IDENT,
223         .mmu.make_pgd           = PTE_IDENT,
224
225         .mmu.enter_mmap         = paravirt_nop,
226
227         .mmu.lazy_mode = {
228                 .enter          = paravirt_nop,
229                 .leave          = paravirt_nop,
230                 .flush          = paravirt_nop,
231         },
232
233         .mmu.set_fixmap         = native_set_fixmap,
234 #endif /* CONFIG_PARAVIRT_XXL */
235
236 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
237         /* Lock ops. */
238 #ifdef CONFIG_SMP
239         .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
240         .lock.queued_spin_unlock        =
241                                 PV_CALLEE_SAVE(__native_queued_spin_unlock),
242         .lock.wait                      = paravirt_nop,
243         .lock.kick                      = paravirt_nop,
244         .lock.vcpu_is_preempted         =
245                                 PV_CALLEE_SAVE(__native_vcpu_is_preempted),
246 #endif /* SMP */
247 #endif
248 };
249
250 #ifdef CONFIG_PARAVIRT_XXL
251 NOKPROBE_SYMBOL(native_load_idt);
252 #endif
253
254 EXPORT_SYMBOL(pv_ops);
255 EXPORT_SYMBOL_GPL(pv_info);