x86/paravirt: Switch functions with custom code to ALTERNATIVE
[linux-block.git] / arch / x86 / include / asm / paravirt_types.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
ac5672f8
JF
2#ifndef _ASM_X86_PARAVIRT_TYPES_H
3#define _ASM_X86_PARAVIRT_TYPES_H
4
5/* Bitmask of what can be clobbered: usually at least eax. */
ac5672f8
JF
6#define CLBR_EAX (1 << 0)
7#define CLBR_ECX (1 << 1)
8#define CLBR_EDX (1 << 2)
9#define CLBR_EDI (1 << 3)
10
11#ifdef CONFIG_X86_32
12/* CLBR_ANY should match all regs platform has. For i386, that's just it */
13#define CLBR_ANY ((1 << 4) - 1)
14
15#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
16#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
ac5672f8
JF
17#else
18#define CLBR_RAX CLBR_EAX
19#define CLBR_RCX CLBR_ECX
20#define CLBR_RDX CLBR_EDX
21#define CLBR_RDI CLBR_EDI
22#define CLBR_RSI (1 << 4)
23#define CLBR_R8 (1 << 5)
24#define CLBR_R9 (1 << 6)
25#define CLBR_R10 (1 << 7)
26#define CLBR_R11 (1 << 8)
27
28#define CLBR_ANY ((1 << 9) - 1)
29
30#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
31 CLBR_RCX | CLBR_R8 | CLBR_R9)
32#define CLBR_RET_REG (CLBR_RAX)
ac5672f8
JF
33
34#endif /* X86_64 */
35
ac5672f8
JF
36#ifndef __ASSEMBLY__
37
38#include <asm/desc_defs.h>
318f5a2a 39#include <asm/pgtable_types.h>
3010a066 40#include <asm/nospec-branch.h>
ac5672f8
JF
41
42struct page;
43struct thread_struct;
44struct desc_ptr;
45struct tss_struct;
46struct mm_struct;
47struct desc_struct;
48struct task_struct;
49struct cpumask;
a2055abe 50struct flush_tlb_info;
48a8b97c 51struct mmu_gather;
0cbe3e26 52struct vm_area_struct;
ac5672f8
JF
53
54/*
55 * Wrapper type for pointers to code which uses the non-standard
56 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
57 */
58struct paravirt_callee_save {
59 void *func;
60};
61
62/* general info */
63struct pv_info {
40181646 64#ifdef CONFIG_PARAVIRT_XXL
318f5a2a
AL
65 u16 extra_user_64bit_cs; /* __USER_CS if none */
66#endif
67
ac5672f8
JF
68 const char *name;
69};
70
71struct pv_init_ops {
72 /*
73 * Patch may replace one of the defined code sequences with
74 * arbitrary code, subject to the same register constraints.
75 * This generally means the code is not free to clobber any
76 * registers other than EAX. The patch function should return
77 * the number of bytes of code generated, as we nop pad the
78 * rest in generic code.
79 */
1fc654cf 80 unsigned (*patch)(u8 type, void *insn_buff,
ac5672f8 81 unsigned long addr, unsigned len);
8acdf505 82} __no_randomize_layout;
ac5672f8 83
fdc0269e 84#ifdef CONFIG_PARAVIRT_XXL
ac5672f8
JF
85struct pv_lazy_ops {
86 /* Set deferred update mode, used for batching operations. */
87 void (*enter)(void);
88 void (*leave)(void);
511ba86e 89 void (*flush)(void);
8acdf505 90} __no_randomize_layout;
fdc0269e 91#endif
ac5672f8 92
ac5672f8
JF
93struct pv_cpu_ops {
94 /* hooks for various privileged instructions */
9bad5658
JG
95 void (*io_delay)(void);
96
97#ifdef CONFIG_PARAVIRT_XXL
ac5672f8
JF
98 unsigned long (*get_debugreg)(int regno);
99 void (*set_debugreg)(int regno, unsigned long value);
100
ac5672f8
JF
101 unsigned long (*read_cr0)(void);
102 void (*write_cr0)(unsigned long);
103
ac5672f8
JF
104 void (*write_cr4)(unsigned long);
105
ac5672f8
JF
106 /* Segment descriptor handling */
107 void (*load_tr_desc)(void);
108 void (*load_gdt)(const struct desc_ptr *);
109 void (*load_idt)(const struct desc_ptr *);
ac5672f8
JF
110 void (*set_ldt)(const void *desc, unsigned entries);
111 unsigned long (*store_tr)(void);
112 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
ac5672f8 113 void (*load_gs_index)(unsigned int idx);
ac5672f8
JF
114 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
115 const void *desc);
116 void (*write_gdt_entry)(struct desc_struct *,
117 int entrynum, const void *desc, int size);
118 void (*write_idt_entry)(gate_desc *,
119 int entrynum, const gate_desc *gate);
120 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
121 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
122
da51da18 123 void (*load_sp0)(unsigned long sp0);
ac5672f8 124
99bcd4a6 125#ifdef CONFIG_X86_IOPL_IOPERM
cadfad87 126 void (*invalidate_io_bitmap)(void);
99bcd4a6
JG
127 void (*update_io_bitmap)(void);
128#endif
129
ac5672f8 130 void (*wbinvd)(void);
ac5672f8
JF
131
132 /* cpuid emulation, mostly so that caps bits can be disabled */
133 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
134 unsigned int *ecx, unsigned int *edx);
135
dd2f4a00
AL
136 /* Unsafe MSR operations. These will warn or panic on failure. */
137 u64 (*read_msr)(unsigned int msr);
138 void (*write_msr)(unsigned int msr, unsigned low, unsigned high);
139
140 /*
141 * Safe MSR operations.
142 * read sets err to 0 or -EIO. write returns 0 or -EIO.
143 */
c2ee03b2
AL
144 u64 (*read_msr_safe)(unsigned int msr, int *err);
145 int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high);
ac5672f8 146
ac5672f8 147 u64 (*read_pmc)(int counter);
ac5672f8 148
ac5672f8
JF
149 void (*start_context_switch)(struct task_struct *prev);
150 void (*end_context_switch)(struct task_struct *next);
9bad5658 151#endif
8acdf505 152} __no_randomize_layout;
ac5672f8
JF
153
154struct pv_irq_ops {
6da63eb2 155#ifdef CONFIG_PARAVIRT_XXL
ac5672f8 156 /*
ab234a26
JG
157 * Get/set interrupt state. save_fl is expected to use X86_EFLAGS_IF;
158 * all other bits returned from save_fl are undefined.
ac5672f8
JF
159 *
160 * NOTE: These functions callers expect the callee to preserve
161 * more registers than the standard C calling convention.
162 */
163 struct paravirt_callee_save save_fl;
ac5672f8
JF
164 struct paravirt_callee_save irq_disable;
165 struct paravirt_callee_save irq_enable;
166
167 void (*safe_halt)(void);
168 void (*halt)(void);
6da63eb2 169#endif
8acdf505 170} __no_randomize_layout;
ac5672f8 171
ac5672f8 172struct pv_mmu_ops {
fdc0269e
JG
173 /* TLB operations */
174 void (*flush_tlb_user)(void);
175 void (*flush_tlb_kernel)(void);
176 void (*flush_tlb_one_user)(unsigned long addr);
177 void (*flush_tlb_others)(const struct cpumask *cpus,
178 const struct flush_tlb_info *info);
179
180 void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
181
182 /* Hook for intercepting the destruction of an mm_struct. */
183 void (*exit_mmap)(struct mm_struct *mm);
184
185#ifdef CONFIG_PARAVIRT_XXL
55aedddb 186 struct paravirt_callee_save read_cr2;
ac5672f8
JF
187 void (*write_cr2)(unsigned long);
188
189 unsigned long (*read_cr3)(void);
190 void (*write_cr3)(unsigned long);
191
fdc0269e 192 /* Hooks for intercepting the creation/use of an mm_struct. */
ac5672f8
JF
193 void (*activate_mm)(struct mm_struct *prev,
194 struct mm_struct *next);
195 void (*dup_mmap)(struct mm_struct *oldmm,
196 struct mm_struct *mm);
48a8b97c 197
ac5672f8
JF
198 /* Hooks for allocating and freeing a pagetable top-level */
199 int (*pgd_alloc)(struct mm_struct *mm);
200 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
201
202 /*
203 * Hooks for allocating/releasing pagetable pages when they're
204 * attached to a pagetable
205 */
206 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
207 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
ac5672f8 208 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
335437fb 209 void (*alloc_p4d)(struct mm_struct *mm, unsigned long pfn);
ac5672f8
JF
210 void (*release_pte)(unsigned long pfn);
211 void (*release_pmd)(unsigned long pfn);
212 void (*release_pud)(unsigned long pfn);
335437fb 213 void (*release_p4d)(unsigned long pfn);
ac5672f8
JF
214
215 /* Pagetable manipulation functions */
216 void (*set_pte)(pte_t *ptep, pte_t pteval);
ac5672f8 217 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
ac5672f8 218
0cbe3e26 219 pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
ac5672f8 220 pte_t *ptep);
0cbe3e26 221 void (*ptep_modify_prot_commit)(struct vm_area_struct *vma, unsigned long addr,
ac5672f8
JF
222 pte_t *ptep, pte_t pte);
223
224 struct paravirt_callee_save pte_val;
225 struct paravirt_callee_save make_pte;
226
227 struct paravirt_callee_save pgd_val;
228 struct paravirt_callee_save make_pgd;
229
ac5672f8
JF
230 void (*set_pud)(pud_t *pudp, pud_t pudval);
231
232 struct paravirt_callee_save pmd_val;
233 struct paravirt_callee_save make_pmd;
234
ac5672f8
JF
235 struct paravirt_callee_save pud_val;
236 struct paravirt_callee_save make_pud;
237
f2a6a705
KS
238 void (*set_p4d)(p4d_t *p4dp, p4d_t p4dval);
239
240#if CONFIG_PGTABLE_LEVELS >= 5
335437fb
KS
241 struct paravirt_callee_save p4d_val;
242 struct paravirt_callee_save make_p4d;
243
244 void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
f2a6a705
KS
245#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
246
ac5672f8
JF
247 struct pv_lazy_ops lazy_mode;
248
249 /* dom0 ops */
250
251 /* Sometimes the physical address is a pfn, and sometimes its
252 an mfn. We can tell which is which from the index. */
253 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
254 phys_addr_t phys, pgprot_t flags);
fdc0269e 255#endif
8acdf505 256} __no_randomize_layout;
ac5672f8 257
445c8951 258struct arch_spinlock;
545ac138
JF
259#ifdef CONFIG_SMP
260#include <asm/spinlock_types.h>
545ac138
JF
261#endif
262
f233f7f1
PZI
263struct qspinlock;
264
ac5672f8 265struct pv_lock_ops {
f233f7f1
PZI
266 void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
267 struct paravirt_callee_save queued_spin_unlock;
268
269 void (*wait)(u8 *ptr, u8 val);
270 void (*kick)(int cpu);
446f3dc8 271
3cded417 272 struct paravirt_callee_save vcpu_is_preempted;
8acdf505 273} __no_randomize_layout;
ac5672f8
JF
274
275/* This contains all the paravirt structures: we get a convenient
276 * number for each function using the offset which we use to indicate
277 * what to patch. */
278struct paravirt_patch_template {
5c83511b 279 struct pv_init_ops init;
5c83511b
JG
280 struct pv_cpu_ops cpu;
281 struct pv_irq_ops irq;
282 struct pv_mmu_ops mmu;
283 struct pv_lock_ops lock;
8acdf505 284} __no_randomize_layout;
ac5672f8
JF
285
286extern struct pv_info pv_info;
5c83511b 287extern struct paravirt_patch_template pv_ops;
ae755b5a 288extern void (*paravirt_iret)(void);
ac5672f8
JF
289
290#define PARAVIRT_PATCH(x) \
291 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
292
293#define paravirt_type(op) \
294 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
5c83511b 295 [paravirt_opptr] "i" (&(pv_ops.op))
ac5672f8
JF
296#define paravirt_clobber(clobber) \
297 [paravirt_clobber] "i" (clobber)
298
a4da3d86
IM
299/*
300 * Generate some code, and mark it as patchable by the
301 * apply_paravirt() alternate instruction patcher.
302 */
303#define _paravirt_alt(insn_string, type, clobber) \
304 "771:\n\t" insn_string "\n" "772:\n" \
305 ".pushsection .parainstructions,\"a\"\n" \
306 _ASM_ALIGN "\n" \
307 _ASM_PTR " 771b\n" \
308 " .byte " type "\n" \
309 " .byte 772b-771b\n" \
310 " .short " clobber "\n" \
311 ".popsection\n"
312
ac5672f8 313/* Generate patchable code, with the default asm parameters. */
a4da3d86
IM
314#define paravirt_alt(insn_string) \
315 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
ac5672f8
JF
316
317/* Simple instruction patching code. */
824a2870
AK
318#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
319
1fc654cf
IM
320unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len);
321unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end);
ac5672f8 322
1fc654cf 323unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len);
ac5672f8
JF
324
325int paravirt_disable_iospace(void);
326
a4da3d86
IM
327/*
328 * This generates an indirect call based on the operation type number.
329 * The type number, computed in PARAVIRT_PATCH, is derived from the
330 * offset into the paravirt_patch_template structure, and can therefore be
331 * freely converted back into a structure offset.
332 */
333#define PARAVIRT_CALL \
334 ANNOTATE_RETPOLINE_SAFE \
335 "call *%c[paravirt_opptr];"
336
ac5672f8
JF
337/*
338 * These macros are intended to wrap calls through one of the paravirt
339 * ops structs, so that they can be later identified and patched at
340 * runtime.
341 *
342 * Normally, a call to a pv_op function is a simple indirect call:
343 * (pv_op_struct.operations)(args...).
344 *
345 * Unfortunately, this is a relatively slow operation for modern CPUs,
346 * because it cannot necessarily determine what the destination
347 * address is. In this case, the address is a runtime constant, so at
348 * the very least we can patch the call to e a simple direct call, or
349 * ideally, patch an inline implementation into the callsite. (Direct
350 * calls are essentially free, because the call and return addresses
351 * are completely predictable.)
352 *
353 * For i386, these macros rely on the standard gcc "regparm(3)" calling
354 * convention, in which the first three arguments are placed in %eax,
355 * %edx, %ecx (in that order), and the remaining arguments are placed
356 * on the stack. All caller-save registers (eax,edx,ecx) are expected
357 * to be modified (either clobbered or used for return values).
358 * X86_64, on the other hand, already specifies a register-based calling
359 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
360 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
361 * special handling for dealing with 4 arguments, unlike i386.
362 * However, x86_64 also have to clobber all caller saved registers, which
363 * unfortunately, are quite a bit (r8 - r11)
364 *
365 * The call instruction itself is marked by placing its start address
366 * and size into the .parainstructions section, so that
367 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
368 * appropriate patching under the control of the backend pv_init_ops
369 * implementation.
370 *
371 * Unfortunately there's no way to get gcc to generate the args setup
372 * for the call, and then allow the call itself to be generated by an
373 * inline asm. Because of this, we must do the complete arg setup and
374 * return value handling from within these macros. This is fairly
375 * cumbersome.
376 *
377 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
378 * It could be extended to more arguments, but there would be little
379 * to be gained from that. For each number of arguments, there are
380 * the two VCALL and CALL variants for void and non-void functions.
381 *
382 * When there is a return value, the invoker of the macro must specify
383 * the return type. The macro then uses sizeof() on that type to
384 * determine whether its a 32 or 64 bit value, and places the return
385 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
386 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
387 * the return value size.
388 *
389 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
390 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
391 * in low,high order
392 *
393 * Small structures are passed and returned in registers. The macro
394 * calling convention can't directly deal with this, so the wrapper
395 * functions must do this.
396 *
397 * These PVOP_* macros are only defined within this header. This
398 * means that all uses must be wrapped in inline functions. This also
399 * makes sure the incoming and outgoing types are always correct.
400 */
401#ifdef CONFIG_X86_32
0b8d366a 402#define PVOP_CALL_ARGS \
f5caf621
JP
403 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
404
ac5672f8
JF
405#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
406#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
407#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
408
409#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
410 "=c" (__ecx)
411#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
412
413#define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx)
414#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
415
416#define EXTRA_CLOBBERS
417#define VEXTRA_CLOBBERS
418#else /* CONFIG_X86_64 */
71999d98 419/* [re]ax isn't an arg, but the return val */
0b8d366a 420#define PVOP_CALL_ARGS \
bb93eb4c 421 unsigned long __edi = __edi, __esi = __esi, \
f5caf621
JP
422 __edx = __edx, __ecx = __ecx, __eax = __eax;
423
ac5672f8
JF
424#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
425#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
426#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
427#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
428
429#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
430 "=S" (__esi), "=d" (__edx), \
431 "=c" (__ecx)
432#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
433
71999d98 434/* void functions are still allowed [re]ax for scratch */
ac5672f8
JF
435#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
436#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
437
438#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
439#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
440#endif /* CONFIG_X86_32 */
441
442#ifdef CONFIG_PARAVIRT_DEBUG
5c83511b 443#define PVOP_TEST_NULL(op) BUG_ON(pv_ops.op == NULL)
ac5672f8 444#else
5c83511b 445#define PVOP_TEST_NULL(op) ((void)pv_ops.op)
ac5672f8
JF
446#endif
447
0b8d366a 448#define PVOP_RETVAL(rettype) \
11f254db 449 ({ unsigned long __mask = ~0UL; \
0b8d366a 450 BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long)); \
11f254db
PZ
451 switch (sizeof(rettype)) { \
452 case 1: __mask = 0xffUL; break; \
453 case 2: __mask = 0xffffUL; break; \
454 case 4: __mask = 0xffffffffUL; break; \
455 default: break; \
456 } \
0b8d366a 457 __mask & __eax; \
11f254db
PZ
458 })
459
460
0b8d366a 461#define ____PVOP_CALL(ret, op, clbr, call_clbr, extra_clbr, ...) \
ac5672f8 462 ({ \
ac5672f8
JF
463 PVOP_CALL_ARGS; \
464 PVOP_TEST_NULL(op); \
33634e42
JG
465 asm volatile(paravirt_alt(PARAVIRT_CALL) \
466 : call_clbr, ASM_CALL_CONSTRAINT \
467 : paravirt_type(op), \
468 paravirt_clobber(clbr), \
469 ##__VA_ARGS__ \
470 : "memory", "cc" extra_clbr); \
0b8d366a 471 ret; \
ac5672f8
JF
472 })
473
00aa3193
JG
474#define ____PVOP_ALT_CALL(ret, op, alt, cond, clbr, call_clbr, \
475 extra_clbr, ...) \
476 ({ \
477 PVOP_CALL_ARGS; \
478 PVOP_TEST_NULL(op); \
479 asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL), \
480 alt, cond) \
481 : call_clbr, ASM_CALL_CONSTRAINT \
482 : paravirt_type(op), \
483 paravirt_clobber(clbr), \
484 ##__VA_ARGS__ \
485 : "memory", "cc" extra_clbr); \
486 ret; \
487 })
488
33634e42 489#define __PVOP_CALL(rettype, op, ...) \
0b8d366a
JG
490 ____PVOP_CALL(PVOP_RETVAL(rettype), op, CLBR_ANY, \
491 PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__)
ac5672f8 492
00aa3193
JG
493#define __PVOP_ALT_CALL(rettype, op, alt, cond, ...) \
494 ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond, CLBR_ANY,\
495 PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, \
496 ##__VA_ARGS__)
497
33634e42 498#define __PVOP_CALLEESAVE(rettype, op, ...) \
0b8d366a 499 ____PVOP_CALL(PVOP_RETVAL(rettype), op.func, CLBR_RET_REG, \
33634e42 500 PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
ac5672f8 501
00aa3193
JG
502#define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...) \
503 ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond, \
504 CLBR_RET_REG, PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
505
506
33634e42 507#define __PVOP_VCALL(op, ...) \
0b8d366a 508 (void)____PVOP_CALL(, op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
33634e42 509 VEXTRA_CLOBBERS, ##__VA_ARGS__)
ac5672f8 510
00aa3193
JG
511#define __PVOP_ALT_VCALL(op, alt, cond, ...) \
512 (void)____PVOP_ALT_CALL(, op, alt, cond, CLBR_ANY, \
513 PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS, \
514 ##__VA_ARGS__)
515
33634e42 516#define __PVOP_VCALLEESAVE(op, ...) \
0b8d366a 517 (void)____PVOP_CALL(, op.func, CLBR_RET_REG, \
00aa3193 518 PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
ac5672f8 519
00aa3193
JG
520#define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...) \
521 (void)____PVOP_ALT_CALL(, op.func, alt, cond, CLBR_RET_REG, \
522 PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
ac5672f8
JF
523
524
525#define PVOP_CALL0(rettype, op) \
33634e42 526 __PVOP_CALL(rettype, op)
ac5672f8 527#define PVOP_VCALL0(op) \
33634e42 528 __PVOP_VCALL(op)
00aa3193
JG
529#define PVOP_ALT_CALL0(rettype, op, alt, cond) \
530 __PVOP_ALT_CALL(rettype, op, alt, cond)
531#define PVOP_ALT_VCALL0(op, alt, cond) \
532 __PVOP_ALT_VCALL(op, alt, cond)
ac5672f8
JF
533
534#define PVOP_CALLEE0(rettype, op) \
33634e42 535 __PVOP_CALLEESAVE(rettype, op)
ac5672f8 536#define PVOP_VCALLEE0(op) \
33634e42 537 __PVOP_VCALLEESAVE(op)
00aa3193
JG
538#define PVOP_ALT_CALLEE0(rettype, op, alt, cond) \
539 __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond)
540#define PVOP_ALT_VCALLEE0(op, alt, cond) \
541 __PVOP_ALT_VCALLEESAVE(op, alt, cond)
ac5672f8
JF
542
543
544#define PVOP_CALL1(rettype, op, arg1) \
33634e42 545 __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1))
ac5672f8 546#define PVOP_VCALL1(op, arg1) \
33634e42 547 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1))
00aa3193
JG
548#define PVOP_ALT_VCALL1(op, arg1, alt, cond) \
549 __PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1))
ac5672f8
JF
550
551#define PVOP_CALLEE1(rettype, op, arg1) \
33634e42 552 __PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1))
ac5672f8 553#define PVOP_VCALLEE1(op, arg1) \
33634e42 554 __PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1))
00aa3193
JG
555#define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond) \
556 __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1))
557#define PVOP_ALT_VCALLEE1(op, arg1, alt, cond) \
558 __PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1))
ac5672f8
JF
559
560
561#define PVOP_CALL2(rettype, op, arg1, arg2) \
33634e42 562 __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
ac5672f8 563#define PVOP_VCALL2(op, arg1, arg2) \
33634e42 564 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
ac5672f8
JF
565
566#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
33634e42 567 __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), \
ac5672f8
JF
568 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
569#define PVOP_VCALL3(op, arg1, arg2, arg3) \
33634e42 570 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), \
ac5672f8
JF
571 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
572
ac5672f8
JF
573#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
574 __PVOP_CALL(rettype, op, \
ac5672f8
JF
575 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
576 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
577#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
33634e42 578 __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
ac5672f8 579 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
ac5672f8
JF
580
581/* Lazy mode for batching updates / context switch */
582enum paravirt_lazy_mode {
583 PARAVIRT_LAZY_NONE,
584 PARAVIRT_LAZY_MMU,
585 PARAVIRT_LAZY_CPU,
586};
587
588enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
589void paravirt_start_context_switch(struct task_struct *prev);
590void paravirt_end_context_switch(struct task_struct *next);
591
592void paravirt_enter_lazy_mmu(void);
593void paravirt_leave_lazy_mmu(void);
511ba86e 594void paravirt_flush_lazy_mmu(void);
ac5672f8
JF
595
596void _paravirt_nop(void);
ac5672f8
JF
597u64 _paravirt_ident_64(u64);
598
599#define paravirt_nop ((void *)_paravirt_nop)
600
601/* These all sit in the .parainstructions section to tell us what to patch. */
602struct paravirt_patch_site {
46938cc8
IM
603 u8 *instr; /* original instructions */
604 u8 type; /* type of this instruction */
ac5672f8 605 u8 len; /* length of original instruction */
ac5672f8
JF
606};
607
608extern struct paravirt_patch_site __parainstructions[],
609 __parainstructions_end[];
610
611#endif /* __ASSEMBLY__ */
612
613#endif /* _ASM_X86_PARAVIRT_TYPES_H */