Commit | Line | Data |
---|---|---|
fd534e9b | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
d3561b7f RR |
2 | /* Paravirtualization interfaces |
3 | Copyright (C) 2006 Rusty Russell IBM Corporation | |
4 | ||
b1df07bd GOC |
5 | |
6 | 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc | |
d3561b7f | 7 | */ |
b1df07bd | 8 | |
d3561b7f | 9 | #include <linux/errno.h> |
186f4360 PG |
10 | #include <linux/init.h> |
11 | #include <linux/export.h> | |
d3561b7f RR |
12 | #include <linux/efi.h> |
13 | #include <linux/bcd.h> | |
ce6234b5 | 14 | #include <linux/highmem.h> |
376e2424 | 15 | #include <linux/kprobes.h> |
d3561b7f RR |
16 | |
17 | #include <asm/bug.h> | |
18 | #include <asm/paravirt.h> | |
50af5ead | 19 | #include <asm/debugreg.h> |
d3561b7f RR |
20 | #include <asm/desc.h> |
21 | #include <asm/setup.h> | |
a312b37b | 22 | #include <asm/pgtable.h> |
d3561b7f | 23 | #include <asm/time.h> |
eba0045f | 24 | #include <asm/pgalloc.h> |
d3561b7f RR |
25 | #include <asm/irq.h> |
26 | #include <asm/delay.h> | |
13623d79 RR |
27 | #include <asm/fixmap.h> |
28 | #include <asm/apic.h> | |
da181a8b | 29 | #include <asm/tlbflush.h> |
6cb9a835 | 30 | #include <asm/timer.h> |
f05e798a | 31 | #include <asm/special_insns.h> |
48a8b97c | 32 | #include <asm/tlb.h> |
99bcd4a6 | 33 | #include <asm/io_bitmap.h> |
d3561b7f | 34 | |
fc57a7c6 AL |
35 | /* |
36 | * nop stub, which must not clobber anything *including the stack* to | |
37 | * avoid confusing the entry prologues. | |
38 | */ | |
39 | extern void _paravirt_nop(void); | |
40 | asm (".pushsection .entry.text, \"ax\"\n" | |
41 | ".global _paravirt_nop\n" | |
42 | "_paravirt_nop:\n\t" | |
43 | "ret\n\t" | |
44 | ".size _paravirt_nop, . - _paravirt_nop\n\t" | |
45 | ".type _paravirt_nop, @function\n\t" | |
46 | ".popsection"); | |
d3561b7f | 47 | |
6f30c1ac | 48 | void __init default_banner(void) |
d3561b7f RR |
49 | { |
50 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", | |
93b1eab3 | 51 | pv_info.name); |
d3561b7f RR |
52 | } |
53 | ||
93b1eab3 JF |
54 | /* Undefined instruction for dealing with missing ops pointers. */ |
55 | static const unsigned char ud2a[] = { 0x0f, 0x0b }; | |
139ec7c4 | 56 | |
19d36ccd AK |
57 | struct branch { |
58 | unsigned char opcode; | |
59 | u32 delta; | |
60 | } __attribute__((packed)); | |
61 | ||
1fc654cf | 62 | static unsigned paravirt_patch_call(void *insn_buff, const void *target, |
abc745f8 | 63 | unsigned long addr, unsigned len) |
63f70270 | 64 | { |
11e86dc7 | 65 | const int call_len = 5; |
1fc654cf | 66 | struct branch *b = insn_buff; |
11e86dc7 | 67 | unsigned long delta = (unsigned long)target - (addr+call_len); |
63f70270 | 68 | |
11e86dc7 IM |
69 | if (len < call_len) { |
70 | pr_warn("paravirt: Failed to patch indirect CALL at %ps\n", (void *)addr); | |
71 | /* Kernel might not be viable if patching fails, bail out: */ | |
72 | BUG_ON(1); | |
5800dc5c | 73 | } |
139ec7c4 | 74 | |
ab144f5e AK |
75 | b->opcode = 0xe8; /* call */ |
76 | b->delta = delta; | |
11e86dc7 | 77 | BUILD_BUG_ON(sizeof(*b) != call_len); |
139ec7c4 | 78 | |
11e86dc7 | 79 | return call_len; |
63f70270 JF |
80 | } |
81 | ||
9bad5658 | 82 | #ifdef CONFIG_PARAVIRT_XXL |
7847c7be JG |
83 | /* identity function, which can be inlined */ |
84 | u64 notrace _paravirt_ident_64(u64 x) | |
85 | { | |
86 | return x; | |
87 | } | |
88 | ||
1fc654cf | 89 | static unsigned paravirt_patch_jmp(void *insn_buff, const void *target, |
7e437202 | 90 | unsigned long addr, unsigned len) |
63f70270 | 91 | { |
1fc654cf | 92 | struct branch *b = insn_buff; |
ab144f5e | 93 | unsigned long delta = (unsigned long)target - (addr+5); |
63f70270 | 94 | |
5800dc5c PZ |
95 | if (len < 5) { |
96 | #ifdef CONFIG_RETPOLINE | |
571d0563 | 97 | WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr); |
5800dc5c | 98 | #endif |
63f70270 | 99 | return len; /* call too long for patch site */ |
5800dc5c | 100 | } |
63f70270 | 101 | |
ab144f5e AK |
102 | b->opcode = 0xe9; /* jmp */ |
103 | b->delta = delta; | |
63f70270 JF |
104 | |
105 | return 5; | |
106 | } | |
9bad5658 | 107 | #endif |
63f70270 | 108 | |
9043442b JG |
109 | DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); |
110 | ||
111 | void __init native_pv_lock_init(void) | |
112 | { | |
67e87d43 | 113 | if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) |
9043442b JG |
114 | static_branch_disable(&virt_spin_lock_key); |
115 | } | |
116 | ||
1fc654cf | 117 | unsigned paravirt_patch_default(u8 type, void *insn_buff, |
ab144f5e | 118 | unsigned long addr, unsigned len) |
63f70270 | 119 | { |
5c83511b JG |
120 | /* |
121 | * Neat trick to map patch type back to the call within the | |
122 | * corresponding structure. | |
123 | */ | |
124 | void *opfunc = *((void **)&pv_ops + type); | |
63f70270 JF |
125 | unsigned ret; |
126 | ||
127 | if (opfunc == NULL) | |
128 | /* If there's no function, patch it with a ud2a (BUG) */ | |
1fc654cf | 129 | ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a)); |
41edafdb | 130 | else if (opfunc == _paravirt_nop) |
79f1d836 | 131 | ret = 0; |
41edafdb | 132 | |
7847c7be | 133 | #ifdef CONFIG_PARAVIRT_XXL |
41edafdb | 134 | /* identity functions just return their single argument */ |
41edafdb | 135 | else if (opfunc == _paravirt_ident_64) |
1fc654cf | 136 | ret = paravirt_patch_ident_64(insn_buff, len); |
41edafdb | 137 | |
5c83511b JG |
138 | else if (type == PARAVIRT_PATCH(cpu.iret) || |
139 | type == PARAVIRT_PATCH(cpu.usergs_sysret64)) | |
63f70270 | 140 | /* If operation requires a jmp, then jmp */ |
1fc654cf | 141 | ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len); |
9bad5658 | 142 | #endif |
63f70270 | 143 | else |
abc745f8 | 144 | /* Otherwise call the function. */ |
1fc654cf | 145 | ret = paravirt_patch_call(insn_buff, opfunc, addr, len); |
63f70270 JF |
146 | |
147 | return ret; | |
148 | } | |
149 | ||
1fc654cf | 150 | unsigned paravirt_patch_insns(void *insn_buff, unsigned len, |
63f70270 JF |
151 | const char *start, const char *end) |
152 | { | |
153 | unsigned insn_len = end - start; | |
139ec7c4 | 154 | |
2777cae2 IM |
155 | /* Alternative instruction is too large for the patch site and we cannot continue: */ |
156 | BUG_ON(insn_len > len || start == NULL); | |
157 | ||
1fc654cf | 158 | memcpy(insn_buff, start, insn_len); |
139ec7c4 | 159 | |
139ec7c4 RR |
160 | return insn_len; |
161 | } | |
162 | ||
1a1eecd1 | 163 | static void native_flush_tlb(void) |
da181a8b RR |
164 | { |
165 | __native_flush_tlb(); | |
166 | } | |
167 | ||
168 | /* | |
169 | * Global pages have to be flushed a bit differently. Not a real | |
170 | * performance problem because this does not happen often. | |
171 | */ | |
1a1eecd1 | 172 | static void native_flush_tlb_global(void) |
da181a8b RR |
173 | { |
174 | __native_flush_tlb_global(); | |
175 | } | |
176 | ||
1299ef1d | 177 | static void native_flush_tlb_one_user(unsigned long addr) |
da181a8b | 178 | { |
1299ef1d | 179 | __native_flush_tlb_one_user(addr); |
da181a8b RR |
180 | } |
181 | ||
c5905afb IM |
182 | struct static_key paravirt_steal_enabled; |
183 | struct static_key paravirt_steal_rq_enabled; | |
3c404b57 GC |
184 | |
185 | static u64 native_steal_clock(int cpu) | |
186 | { | |
187 | return 0; | |
188 | } | |
189 | ||
d3561b7f | 190 | /* These are in entry.S */ |
1a1eecd1 | 191 | extern void native_iret(void); |
2be29982 | 192 | extern void native_usergs_sysret64(void); |
d3561b7f | 193 | |
d572929c JF |
194 | static struct resource reserve_ioports = { |
195 | .start = 0, | |
196 | .end = IO_SPACE_LIMIT, | |
197 | .name = "paravirt-ioport", | |
198 | .flags = IORESOURCE_IO | IORESOURCE_BUSY, | |
199 | }; | |
200 | ||
d572929c JF |
201 | /* |
202 | * Reserve the whole legacy IO space to prevent any legacy drivers | |
203 | * from wasting time probing for their hardware. This is a fairly | |
204 | * brute-force approach to disabling all non-virtual drivers. | |
205 | * | |
206 | * Note that this must be called very early to have any effect. | |
207 | */ | |
208 | int paravirt_disable_iospace(void) | |
209 | { | |
f7743fe6 | 210 | return request_resource(&ioport_resource, &reserve_ioports); |
d572929c JF |
211 | } |
212 | ||
8965c1c0 JF |
213 | static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; |
214 | ||
215 | static inline void enter_lazy(enum paravirt_lazy_mode mode) | |
216 | { | |
c6ae41e7 | 217 | BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); |
8965c1c0 | 218 | |
c6ae41e7 | 219 | this_cpu_write(paravirt_lazy_mode, mode); |
8965c1c0 JF |
220 | } |
221 | ||
b407fc57 | 222 | static void leave_lazy(enum paravirt_lazy_mode mode) |
8965c1c0 | 223 | { |
c6ae41e7 | 224 | BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode); |
8965c1c0 | 225 | |
c6ae41e7 | 226 | this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); |
8965c1c0 JF |
227 | } |
228 | ||
229 | void paravirt_enter_lazy_mmu(void) | |
230 | { | |
231 | enter_lazy(PARAVIRT_LAZY_MMU); | |
232 | } | |
233 | ||
234 | void paravirt_leave_lazy_mmu(void) | |
235 | { | |
b407fc57 | 236 | leave_lazy(PARAVIRT_LAZY_MMU); |
8965c1c0 JF |
237 | } |
238 | ||
511ba86e BO |
239 | void paravirt_flush_lazy_mmu(void) |
240 | { | |
241 | preempt_disable(); | |
242 | ||
243 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | |
244 | arch_leave_lazy_mmu_mode(); | |
245 | arch_enter_lazy_mmu_mode(); | |
246 | } | |
247 | ||
248 | preempt_enable(); | |
249 | } | |
250 | ||
9bad5658 | 251 | #ifdef CONFIG_PARAVIRT_XXL |
224101ed | 252 | void paravirt_start_context_switch(struct task_struct *prev) |
8965c1c0 | 253 | { |
2829b449 JF |
254 | BUG_ON(preemptible()); |
255 | ||
c6ae41e7 | 256 | if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { |
b407fc57 | 257 | arch_leave_lazy_mmu_mode(); |
224101ed | 258 | set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); |
b407fc57 | 259 | } |
8965c1c0 JF |
260 | enter_lazy(PARAVIRT_LAZY_CPU); |
261 | } | |
262 | ||
224101ed | 263 | void paravirt_end_context_switch(struct task_struct *next) |
8965c1c0 | 264 | { |
2829b449 JF |
265 | BUG_ON(preemptible()); |
266 | ||
b407fc57 JF |
267 | leave_lazy(PARAVIRT_LAZY_CPU); |
268 | ||
224101ed | 269 | if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) |
b407fc57 | 270 | arch_enter_lazy_mmu_mode(); |
8965c1c0 | 271 | } |
9bad5658 | 272 | #endif |
8965c1c0 JF |
273 | |
274 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | |
275 | { | |
b8bcfe99 JF |
276 | if (in_interrupt()) |
277 | return PARAVIRT_LAZY_NONE; | |
278 | ||
c6ae41e7 | 279 | return this_cpu_read(paravirt_lazy_mode); |
8965c1c0 JF |
280 | } |
281 | ||
93b1eab3 | 282 | struct pv_info pv_info = { |
d3561b7f | 283 | .name = "bare hardware", |
40181646 | 284 | #ifdef CONFIG_PARAVIRT_XXL |
d3561b7f | 285 | .kernel_rpl = 0, |
5311ab62 | 286 | .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ |
318f5a2a AL |
287 | |
288 | #ifdef CONFIG_X86_64 | |
289 | .extra_user_64bit_cs = __USER_CS, | |
290 | #endif | |
88b4755f | 291 | #endif |
93b1eab3 | 292 | }; |
d3561b7f | 293 | |
41edafdb | 294 | /* 64-bit pagetable entries */ |
da5de7c2 | 295 | #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) |
41edafdb | 296 | |
5c83511b JG |
297 | struct paravirt_patch_template pv_ops = { |
298 | /* Init ops. */ | |
299 | .init.patch = native_patch, | |
b239fb25 | 300 | |
5c83511b JG |
301 | /* Time ops. */ |
302 | .time.sched_clock = native_sched_clock, | |
303 | .time.steal_clock = native_steal_clock, | |
93b1eab3 | 304 | |
5c83511b | 305 | /* Cpu ops. */ |
9bad5658 | 306 | .cpu.io_delay = native_io_delay, |
da181a8b | 307 | |
9bad5658 | 308 | #ifdef CONFIG_PARAVIRT_XXL |
5c83511b JG |
309 | .cpu.cpuid = native_cpuid, |
310 | .cpu.get_debugreg = native_get_debugreg, | |
311 | .cpu.set_debugreg = native_set_debugreg, | |
312 | .cpu.read_cr0 = native_read_cr0, | |
313 | .cpu.write_cr0 = native_write_cr0, | |
314 | .cpu.write_cr4 = native_write_cr4, | |
5c83511b JG |
315 | .cpu.wbinvd = native_wbinvd, |
316 | .cpu.read_msr = native_read_msr, | |
317 | .cpu.write_msr = native_write_msr, | |
318 | .cpu.read_msr_safe = native_read_msr_safe, | |
319 | .cpu.write_msr_safe = native_write_msr_safe, | |
320 | .cpu.read_pmc = native_read_pmc, | |
321 | .cpu.load_tr_desc = native_load_tr_desc, | |
322 | .cpu.set_ldt = native_set_ldt, | |
323 | .cpu.load_gdt = native_load_gdt, | |
324 | .cpu.load_idt = native_load_idt, | |
325 | .cpu.store_tr = native_store_tr, | |
326 | .cpu.load_tls = native_load_tls, | |
327 | #ifdef CONFIG_X86_64 | |
328 | .cpu.load_gs_index = native_load_gs_index, | |
329 | #endif | |
330 | .cpu.write_ldt_entry = native_write_ldt_entry, | |
331 | .cpu.write_gdt_entry = native_write_gdt_entry, | |
332 | .cpu.write_idt_entry = native_write_idt_entry, | |
eba0045f | 333 | |
5c83511b JG |
334 | .cpu.alloc_ldt = paravirt_nop, |
335 | .cpu.free_ldt = paravirt_nop, | |
c119ecce | 336 | |
5c83511b | 337 | .cpu.load_sp0 = native_load_sp0, |
3dc494e8 | 338 | |
5c83511b JG |
339 | #ifdef CONFIG_X86_64 |
340 | .cpu.usergs_sysret64 = native_usergs_sysret64, | |
341 | #endif | |
342 | .cpu.iret = native_iret, | |
343 | .cpu.swapgs = native_swapgs, | |
344 | ||
99bcd4a6 JG |
345 | #ifdef CONFIG_X86_IOPL_IOPERM |
346 | .cpu.update_io_bitmap = native_tss_update_io_bitmap, | |
347 | #endif | |
348 | ||
5c83511b JG |
349 | .cpu.start_context_switch = paravirt_nop, |
350 | .cpu.end_context_switch = paravirt_nop, | |
351 | ||
352 | /* Irq ops. */ | |
353 | .irq.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), | |
354 | .irq.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), | |
355 | .irq.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), | |
356 | .irq.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), | |
357 | .irq.safe_halt = native_safe_halt, | |
358 | .irq.halt = native_halt, | |
6da63eb2 | 359 | #endif /* CONFIG_PARAVIRT_XXL */ |
5c83511b JG |
360 | |
361 | /* Mmu ops. */ | |
5c83511b JG |
362 | .mmu.flush_tlb_user = native_flush_tlb, |
363 | .mmu.flush_tlb_kernel = native_flush_tlb_global, | |
364 | .mmu.flush_tlb_one_user = native_flush_tlb_one_user, | |
365 | .mmu.flush_tlb_others = native_flush_tlb_others, | |
366 | .mmu.tlb_remove_table = | |
367 | (void (*)(struct mmu_gather *, void *))tlb_remove_page, | |
368 | ||
fdc0269e JG |
369 | .mmu.exit_mmap = paravirt_nop, |
370 | ||
371 | #ifdef CONFIG_PARAVIRT_XXL | |
55aedddb | 372 | .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2), |
fdc0269e JG |
373 | .mmu.write_cr2 = native_write_cr2, |
374 | .mmu.read_cr3 = __native_read_cr3, | |
375 | .mmu.write_cr3 = native_write_cr3, | |
376 | ||
5c83511b JG |
377 | .mmu.pgd_alloc = __paravirt_pgd_alloc, |
378 | .mmu.pgd_free = paravirt_nop, | |
379 | ||
380 | .mmu.alloc_pte = paravirt_nop, | |
381 | .mmu.alloc_pmd = paravirt_nop, | |
382 | .mmu.alloc_pud = paravirt_nop, | |
383 | .mmu.alloc_p4d = paravirt_nop, | |
384 | .mmu.release_pte = paravirt_nop, | |
385 | .mmu.release_pmd = paravirt_nop, | |
386 | .mmu.release_pud = paravirt_nop, | |
387 | .mmu.release_p4d = paravirt_nop, | |
388 | ||
389 | .mmu.set_pte = native_set_pte, | |
390 | .mmu.set_pte_at = native_set_pte_at, | |
391 | .mmu.set_pmd = native_set_pmd, | |
392 | ||
393 | .mmu.ptep_modify_prot_start = __ptep_modify_prot_start, | |
394 | .mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit, | |
08b882c6 | 395 | |
98233368 | 396 | #if CONFIG_PGTABLE_LEVELS >= 3 |
da181a8b | 397 | #ifdef CONFIG_X86_PAE |
5c83511b JG |
398 | .mmu.set_pte_atomic = native_set_pte_atomic, |
399 | .mmu.pte_clear = native_pte_clear, | |
400 | .mmu.pmd_clear = native_pmd_clear, | |
f95f2f7b | 401 | #endif |
5c83511b | 402 | .mmu.set_pud = native_set_pud, |
da5de7c2 | 403 | |
5c83511b JG |
404 | .mmu.pmd_val = PTE_IDENT, |
405 | .mmu.make_pmd = PTE_IDENT, | |
f95f2f7b | 406 | |
f2a6a705 | 407 | #if CONFIG_PGTABLE_LEVELS >= 4 |
5c83511b JG |
408 | .mmu.pud_val = PTE_IDENT, |
409 | .mmu.make_pud = PTE_IDENT, | |
da5de7c2 | 410 | |
5c83511b | 411 | .mmu.set_p4d = native_set_p4d, |
f2a6a705 KS |
412 | |
413 | #if CONFIG_PGTABLE_LEVELS >= 5 | |
5c83511b JG |
414 | .mmu.p4d_val = PTE_IDENT, |
415 | .mmu.make_p4d = PTE_IDENT, | |
335437fb | 416 | |
5c83511b | 417 | .mmu.set_pgd = native_set_pgd, |
335437fb | 418 | #endif /* CONFIG_PGTABLE_LEVELS >= 5 */ |
f2a6a705 | 419 | #endif /* CONFIG_PGTABLE_LEVELS >= 4 */ |
98233368 | 420 | #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ |
da181a8b | 421 | |
5c83511b JG |
422 | .mmu.pte_val = PTE_IDENT, |
423 | .mmu.pgd_val = PTE_IDENT, | |
3dc494e8 | 424 | |
5c83511b JG |
425 | .mmu.make_pte = PTE_IDENT, |
426 | .mmu.make_pgd = PTE_IDENT, | |
3dc494e8 | 427 | |
5c83511b | 428 | .mmu.dup_mmap = paravirt_nop, |
5c83511b | 429 | .mmu.activate_mm = paravirt_nop, |
8965c1c0 | 430 | |
5c83511b JG |
431 | .mmu.lazy_mode = { |
432 | .enter = paravirt_nop, | |
433 | .leave = paravirt_nop, | |
434 | .flush = paravirt_nop, | |
8965c1c0 | 435 | }, |
aeaaa59c | 436 | |
5c83511b | 437 | .mmu.set_fixmap = native_set_fixmap, |
fdc0269e | 438 | #endif /* CONFIG_PARAVIRT_XXL */ |
5c83511b JG |
439 | |
440 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) | |
441 | /* Lock ops. */ | |
442 | #ifdef CONFIG_SMP | |
443 | .lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, | |
444 | .lock.queued_spin_unlock = | |
445 | PV_CALLEE_SAVE(__native_queued_spin_unlock), | |
446 | .lock.wait = paravirt_nop, | |
447 | .lock.kick = paravirt_nop, | |
448 | .lock.vcpu_is_preempted = | |
449 | PV_CALLEE_SAVE(__native_vcpu_is_preempted), | |
450 | #endif /* SMP */ | |
451 | #endif | |
d3561b7f | 452 | }; |
0dbe5a11 | 453 | |
9bad5658 | 454 | #ifdef CONFIG_PARAVIRT_XXL |
5c83511b JG |
455 | /* At this point, native_get/set_debugreg has real function entries */ |
456 | NOKPROBE_SYMBOL(native_get_debugreg); | |
457 | NOKPROBE_SYMBOL(native_set_debugreg); | |
458 | NOKPROBE_SYMBOL(native_load_idt); | |
9bad5658 | 459 | #endif |
5c83511b | 460 | |
8af19095 | 461 | EXPORT_SYMBOL(pv_ops); |
93b1eab3 | 462 | EXPORT_SYMBOL_GPL(pv_info); |