x86/paravirt: fix missing callee-save call on pud_val
[linux-2.6-block.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PARAVIRT_H
2#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
3/* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
b239fb25
JF
5
6#ifdef CONFIG_PARAVIRT
da181a8b 7#include <asm/page.h>
658be9d3 8#include <asm/asm.h>
d3561b7f 9
139ec7c4 10/* Bitmask of what can be clobbered: usually at least eax. */
21438f7c
GOC
11#define CLBR_NONE 0
12#define CLBR_EAX (1 << 0)
13#define CLBR_ECX (1 << 1)
14#define CLBR_EDX (1 << 2)
9104a18d 15#define CLBR_EDI (1 << 3)
21438f7c 16
9104a18d
JF
17#ifdef CONFIG_X86_32
18/* CLBR_ANY should match all regs platform has. For i386, that's just it */
19#define CLBR_ANY ((1 << 4) - 1)
ecb93d1c
JF
20
21#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
da5de7c2 22#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
ecb93d1c 23#define CLBR_SCRATCH (0)
9104a18d
JF
24#else
25#define CLBR_RAX CLBR_EAX
26#define CLBR_RCX CLBR_ECX
27#define CLBR_RDX CLBR_EDX
28#define CLBR_RDI CLBR_EDI
29#define CLBR_RSI (1 << 4)
21438f7c
GOC
30#define CLBR_R8 (1 << 5)
31#define CLBR_R9 (1 << 6)
32#define CLBR_R10 (1 << 7)
33#define CLBR_R11 (1 << 8)
ecb93d1c 34
21438f7c 35#define CLBR_ANY ((1 << 9) - 1)
9104a18d
JF
36
37#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38 CLBR_RCX | CLBR_R8 | CLBR_R9)
ecb93d1c 39#define CLBR_RET_REG (CLBR_RAX)
9104a18d
JF
40#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
41
21438f7c 42#include <asm/desc_defs.h>
21438f7c 43#endif /* X86_64 */
139ec7c4 44
ecb93d1c
JF
45#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
d3561b7f 47#ifndef __ASSEMBLY__
3dc494e8 48#include <linux/types.h>
d4c10477 49#include <linux/cpumask.h>
ce6234b5 50#include <asm/kmap_types.h>
8d947344 51#include <asm/desc_defs.h>
3dc494e8 52
ce6234b5 53struct page;
d3561b7f 54struct thread_struct;
6b68f01b 55struct desc_ptr;
d3561b7f 56struct tss_struct;
da181a8b 57struct mm_struct;
90a0a06a 58struct desc_struct;
294688c0 59
ecb93d1c
JF
60/*
61 * Wrapper type for pointers to code which uses the non-standard
62 * calling convention. See PV_CALL_SAVE_REGS_THUNK below.
63 */
64struct paravirt_callee_save {
65 void *func;
66};
67
93b1eab3
JF
68/* general info */
69struct pv_info {
d3561b7f 70 unsigned int kernel_rpl;
5311ab62 71 int shared_kernel_pmd;
93b1eab3 72 int paravirt_enabled;
d3561b7f 73 const char *name;
93b1eab3 74};
d3561b7f 75
93b1eab3 76struct pv_init_ops {
139ec7c4 77 /*
93b1eab3
JF
78 * Patch may replace one of the defined code sequences with
79 * arbitrary code, subject to the same register constraints.
80 * This generally means the code is not free to clobber any
81 * registers other than EAX. The patch function should return
82 * the number of bytes of code generated, as we nop pad the
83 * rest in generic code.
139ec7c4 84 */
ab144f5e
AK
85 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
86 unsigned long addr, unsigned len);
139ec7c4 87
294688c0 88 /* Basic arch-specific setup */
d3561b7f
RR
89 void (*arch_setup)(void);
90 char *(*memory_setup)(void);
6996d3b6
JF
91 void (*post_allocator_init)(void);
92
294688c0 93 /* Print a banner to identify the environment */
d3561b7f 94 void (*banner)(void);
93b1eab3
JF
95};
96
97
8965c1c0 98struct pv_lazy_ops {
93b1eab3 99 /* Set deferred update mode, used for batching operations. */
8965c1c0
JF
100 void (*enter)(void);
101 void (*leave)(void);
93b1eab3
JF
102};
103
104struct pv_time_ops {
105 void (*time_init)(void);
d3561b7f 106
294688c0 107 /* Set and set time of day */
d3561b7f
RR
108 unsigned long (*get_wallclock)(void);
109 int (*set_wallclock)(unsigned long);
d3561b7f 110
93b1eab3 111 unsigned long long (*sched_clock)(void);
e93ef949 112 unsigned long (*get_tsc_khz)(void);
93b1eab3 113};
d3561b7f 114
93b1eab3 115struct pv_cpu_ops {
294688c0 116 /* hooks for various privileged instructions */
1a1eecd1
AK
117 unsigned long (*get_debugreg)(int regno);
118 void (*set_debugreg)(int regno, unsigned long value);
d3561b7f 119
1a1eecd1 120 void (*clts)(void);
d3561b7f 121
1a1eecd1
AK
122 unsigned long (*read_cr0)(void);
123 void (*write_cr0)(unsigned long);
d3561b7f 124
1a1eecd1
AK
125 unsigned long (*read_cr4_safe)(void);
126 unsigned long (*read_cr4)(void);
127 void (*write_cr4)(unsigned long);
d3561b7f 128
4c9890c2
GOC
129#ifdef CONFIG_X86_64
130 unsigned long (*read_cr8)(void);
131 void (*write_cr8)(unsigned long);
132#endif
133
294688c0 134 /* Segment descriptor handling */
1a1eecd1 135 void (*load_tr_desc)(void);
6b68f01b
GOC
136 void (*load_gdt)(const struct desc_ptr *);
137 void (*load_idt)(const struct desc_ptr *);
138 void (*store_gdt)(struct desc_ptr *);
139 void (*store_idt)(struct desc_ptr *);
1a1eecd1
AK
140 void (*set_ldt)(const void *desc, unsigned entries);
141 unsigned long (*store_tr)(void);
142 void (*load_tls)(struct thread_struct *t, unsigned int cpu);
9f9d489a
JF
143#ifdef CONFIG_X86_64
144 void (*load_gs_index)(unsigned int idx);
145#endif
75b8bb3e
GOC
146 void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
147 const void *desc);
90a0a06a 148 void (*write_gdt_entry)(struct desc_struct *,
014b15be 149 int entrynum, const void *desc, int size);
8d947344
GOC
150 void (*write_idt_entry)(gate_desc *,
151 int entrynum, const gate_desc *gate);
38ffbe66
JF
152 void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
153 void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
154
faca6227 155 void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
d3561b7f 156
1a1eecd1 157 void (*set_iopl_mask)(unsigned mask);
93b1eab3
JF
158
159 void (*wbinvd)(void);
1a1eecd1 160 void (*io_delay)(void);
d3561b7f 161
93b1eab3
JF
162 /* cpuid emulation, mostly so that caps bits can be disabled */
163 void (*cpuid)(unsigned int *eax, unsigned int *ebx,
164 unsigned int *ecx, unsigned int *edx);
165
166 /* MSR, PMC and TSR operations.
167 err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
b05f78f5 168 u64 (*read_msr_amd)(unsigned int msr, int *err);
93b1eab3 169 u64 (*read_msr)(unsigned int msr, int *err);
c9dcda5c 170 int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
93b1eab3
JF
171
172 u64 (*read_tsc)(void);
b8d1fae7 173 u64 (*read_pmc)(int counter);
e5aaac44 174 unsigned long long (*read_tscp)(unsigned int *aux);
93b1eab3 175
2be29982
JF
176 /*
177 * Atomically enable interrupts and return to userspace. This
178 * is only ever used to return to 32-bit processes; in a
179 * 64-bit kernel, it's used for 32-on-64 compat processes, but
180 * never native 64-bit processes. (Jump, not call.)
181 */
d75cd22f 182 void (*irq_enable_sysexit)(void);
2be29982
JF
183
184 /*
185 * Switch to usermode gs and return to 64-bit usermode using
186 * sysret. Only used in 64-bit kernels to return to 64-bit
187 * processes. Usermode register state, including %rsp, must
188 * already be restored.
189 */
190 void (*usergs_sysret64)(void);
191
192 /*
193 * Switch to usermode gs and return to 32-bit usermode using
194 * sysret. Used to return to 32-on-64 compat processes.
195 * Other usermode register state, including %esp, must already
196 * be restored.
197 */
198 void (*usergs_sysret32)(void);
199
200 /* Normal iret. Jump to this with the standard iret stack
201 frame set up. */
93b1eab3 202 void (*iret)(void);
8965c1c0 203
e801f864
GOC
204 void (*swapgs)(void);
205
8965c1c0 206 struct pv_lazy_ops lazy_mode;
93b1eab3
JF
207};
208
209struct pv_irq_ops {
210 void (*init_IRQ)(void);
211
294688c0 212 /*
93b1eab3
JF
213 * Get/set interrupt state. save_fl and restore_fl are only
214 * expected to use X86_EFLAGS_IF; all other bits
215 * returned from save_fl are undefined, and may be ignored by
216 * restore_fl.
ecb93d1c
JF
217 *
218 * NOTE: These functions callers expect the callee to preserve
219 * more registers than the standard C calling convention.
294688c0 220 */
ecb93d1c
JF
221 struct paravirt_callee_save save_fl;
222 struct paravirt_callee_save restore_fl;
223 struct paravirt_callee_save irq_disable;
224 struct paravirt_callee_save irq_enable;
225
93b1eab3
JF
226 void (*safe_halt)(void);
227 void (*halt)(void);
fab58420
JF
228
229#ifdef CONFIG_X86_64
230 void (*adjust_exception_frame)(void);
231#endif
93b1eab3 232};
d6dd61c8 233
93b1eab3 234struct pv_apic_ops {
13623d79 235#ifdef CONFIG_X86_LOCAL_APIC
bbab4f3b
ZA
236 void (*setup_boot_clock)(void);
237 void (*setup_secondary_clock)(void);
294688c0
JF
238
239 void (*startup_ipi_hook)(int phys_apicid,
240 unsigned long start_eip,
241 unsigned long start_esp);
13623d79 242#endif
93b1eab3
JF
243};
244
245struct pv_mmu_ops {
246 /*
247 * Called before/after init_mm pagetable setup. setup_start
248 * may reset %cr3, and may pre-install parts of the pagetable;
249 * pagetable setup is expected to preserve any existing
250 * mapping.
251 */
252 void (*pagetable_setup_start)(pgd_t *pgd_base);
253 void (*pagetable_setup_done)(pgd_t *pgd_base);
254
255 unsigned long (*read_cr2)(void);
256 void (*write_cr2)(unsigned long);
257
258 unsigned long (*read_cr3)(void);
259 void (*write_cr3)(unsigned long);
260
261 /*
262 * Hooks for intercepting the creation/use/destruction of an
263 * mm_struct.
264 */
265 void (*activate_mm)(struct mm_struct *prev,
266 struct mm_struct *next);
267 void (*dup_mmap)(struct mm_struct *oldmm,
268 struct mm_struct *mm);
269 void (*exit_mmap)(struct mm_struct *mm);
270
13623d79 271
294688c0 272 /* TLB operations */
1a1eecd1
AK
273 void (*flush_tlb_user)(void);
274 void (*flush_tlb_kernel)(void);
f8822f42 275 void (*flush_tlb_single)(unsigned long addr);
4595f962
RR
276 void (*flush_tlb_others)(const struct cpumask *cpus,
277 struct mm_struct *mm,
d4c10477 278 unsigned long va);
1a1eecd1 279
eba0045f
JF
280 /* Hooks for allocating and freeing a pagetable top-level */
281 int (*pgd_alloc)(struct mm_struct *mm);
282 void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
283
284 /*
285 * Hooks for allocating/releasing pagetable pages when they're
286 * attached to a pagetable
287 */
f8639939
EH
288 void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
289 void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
290 void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
291 void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
292 void (*release_pte)(unsigned long pfn);
293 void (*release_pmd)(unsigned long pfn);
294 void (*release_pud)(unsigned long pfn);
1a1eecd1 295
294688c0 296 /* Pagetable manipulation functions */
1a1eecd1 297 void (*set_pte)(pte_t *ptep, pte_t pteval);
294688c0
JF
298 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
299 pte_t *ptep, pte_t pteval);
1a1eecd1 300 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
49cd740b
JP
301 void (*pte_update)(struct mm_struct *mm, unsigned long addr,
302 pte_t *ptep);
294688c0
JF
303 void (*pte_update_defer)(struct mm_struct *mm,
304 unsigned long addr, pte_t *ptep);
3dc494e8 305
08b882c6
JF
306 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
307 pte_t *ptep);
308 void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
309 pte_t *ptep, pte_t pte);
310
da5de7c2
JF
311 struct paravirt_callee_save pte_val;
312 struct paravirt_callee_save make_pte;
5b8dd1e9 313
da5de7c2
JF
314 struct paravirt_callee_save pgd_val;
315 struct paravirt_callee_save make_pgd;
5b8dd1e9
JF
316
317#if PAGETABLE_LEVELS >= 3
da181a8b 318#ifdef CONFIG_X86_PAE
1a1eecd1 319 void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
93b1eab3
JF
320 void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
321 pte_t *ptep, pte_t pte);
49cd740b
JP
322 void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
323 pte_t *ptep);
1a1eecd1 324 void (*pmd_clear)(pmd_t *pmdp);
3dc494e8 325
5b8dd1e9 326#endif /* CONFIG_X86_PAE */
3dc494e8 327
5b8dd1e9 328 void (*set_pud)(pud_t *pudp, pud_t pudval);
3dc494e8 329
da5de7c2
JF
330 struct paravirt_callee_save pmd_val;
331 struct paravirt_callee_save make_pmd;
5b8dd1e9
JF
332
333#if PAGETABLE_LEVELS == 4
da5de7c2
JF
334 struct paravirt_callee_save pud_val;
335 struct paravirt_callee_save make_pud;
9042219c
EH
336
337 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
5b8dd1e9
JF
338#endif /* PAGETABLE_LEVELS == 4 */
339#endif /* PAGETABLE_LEVELS >= 3 */
da181a8b 340
93b1eab3
JF
341#ifdef CONFIG_HIGHPTE
342 void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
343#endif
8965c1c0
JF
344
345 struct pv_lazy_ops lazy_mode;
aeaaa59c
JF
346
347 /* dom0 ops */
348
349 /* Sometimes the physical address is a pfn, and sometimes its
350 an mfn. We can tell which is which from the index. */
351 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
352 unsigned long phys, pgprot_t flags);
93b1eab3 353};
9226d125 354
74d4affd
JF
355struct raw_spinlock;
356struct pv_lock_ops {
357 int (*spin_is_locked)(struct raw_spinlock *lock);
358 int (*spin_is_contended)(struct raw_spinlock *lock);
359 void (*spin_lock)(struct raw_spinlock *lock);
63d3a75d 360 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
74d4affd
JF
361 int (*spin_trylock)(struct raw_spinlock *lock);
362 void (*spin_unlock)(struct raw_spinlock *lock);
363};
364
93b1eab3
JF
365/* This contains all the paravirt structures: we get a convenient
366 * number for each function using the offset which we use to indicate
367 * what to patch. */
49cd740b 368struct paravirt_patch_template {
93b1eab3 369 struct pv_init_ops pv_init_ops;
93b1eab3
JF
370 struct pv_time_ops pv_time_ops;
371 struct pv_cpu_ops pv_cpu_ops;
372 struct pv_irq_ops pv_irq_ops;
373 struct pv_apic_ops pv_apic_ops;
374 struct pv_mmu_ops pv_mmu_ops;
74d4affd 375 struct pv_lock_ops pv_lock_ops;
d3561b7f
RR
376};
377
93b1eab3
JF
378extern struct pv_info pv_info;
379extern struct pv_init_ops pv_init_ops;
93b1eab3
JF
380extern struct pv_time_ops pv_time_ops;
381extern struct pv_cpu_ops pv_cpu_ops;
382extern struct pv_irq_ops pv_irq_ops;
383extern struct pv_apic_ops pv_apic_ops;
384extern struct pv_mmu_ops pv_mmu_ops;
74d4affd 385extern struct pv_lock_ops pv_lock_ops;
d3561b7f 386
d5822035 387#define PARAVIRT_PATCH(x) \
93b1eab3 388 (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
d5822035 389
93b1eab3
JF
390#define paravirt_type(op) \
391 [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \
392 [paravirt_opptr] "m" (op)
d5822035
JF
393#define paravirt_clobber(clobber) \
394 [paravirt_clobber] "i" (clobber)
395
294688c0
JF
396/*
397 * Generate some code, and mark it as patchable by the
398 * apply_paravirt() alternate instruction patcher.
399 */
d5822035
JF
400#define _paravirt_alt(insn_string, type, clobber) \
401 "771:\n\t" insn_string "\n" "772:\n" \
402 ".pushsection .parainstructions,\"a\"\n" \
658be9d3
GOC
403 _ASM_ALIGN "\n" \
404 _ASM_PTR " 771b\n" \
d5822035
JF
405 " .byte " type "\n" \
406 " .byte 772b-771b\n" \
407 " .short " clobber "\n" \
408 ".popsection\n"
409
294688c0 410/* Generate patchable code, with the default asm parameters. */
f8822f42 411#define paravirt_alt(insn_string) \
d5822035
JF
412 _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
413
2f485ef5
GOC
414/* Simple instruction patching code. */
415#define DEF_NATIVE(ops, name, code) \
416 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
417 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
418
63f70270 419unsigned paravirt_patch_nop(void);
41edafdb
JF
420unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
421unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
63f70270 422unsigned paravirt_patch_ignore(unsigned len);
ab144f5e
AK
423unsigned paravirt_patch_call(void *insnbuf,
424 const void *target, u16 tgt_clobbers,
425 unsigned long addr, u16 site_clobbers,
63f70270 426 unsigned len);
93b1eab3 427unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
ab144f5e
AK
428 unsigned long addr, unsigned len);
429unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
430 unsigned long addr, unsigned len);
63f70270 431
ab144f5e 432unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
63f70270
JF
433 const char *start, const char *end);
434
2f485ef5
GOC
435unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
436 unsigned long addr, unsigned len);
437
d572929c 438int paravirt_disable_iospace(void);
63f70270 439
294688c0
JF
440/*
441 * This generates an indirect call based on the operation type number.
442 * The type number, computed in PARAVIRT_PATCH, is derived from the
93b1eab3
JF
443 * offset into the paravirt_patch_template structure, and can therefore be
444 * freely converted back into a structure offset.
294688c0 445 */
93b1eab3 446#define PARAVIRT_CALL "call *%[paravirt_opptr];"
294688c0
JF
447
448/*
93b1eab3
JF
449 * These macros are intended to wrap calls through one of the paravirt
450 * ops structs, so that they can be later identified and patched at
294688c0
JF
451 * runtime.
452 *
453 * Normally, a call to a pv_op function is a simple indirect call:
a4746364 454 * (pv_op_struct.operations)(args...).
294688c0
JF
455 *
456 * Unfortunately, this is a relatively slow operation for modern CPUs,
457 * because it cannot necessarily determine what the destination
458 * address is. In this case, the address is a runtime constant, so at
459 * the very least we can patch the call to e a simple direct call, or
460 * ideally, patch an inline implementation into the callsite. (Direct
461 * calls are essentially free, because the call and return addresses
462 * are completely predictable.)
463 *
a4746364 464 * For i386, these macros rely on the standard gcc "regparm(3)" calling
294688c0
JF
465 * convention, in which the first three arguments are placed in %eax,
466 * %edx, %ecx (in that order), and the remaining arguments are placed
467 * on the stack. All caller-save registers (eax,edx,ecx) are expected
468 * to be modified (either clobbered or used for return values).
a4746364
GOC
469 * X86_64, on the other hand, already specifies a register-based calling
470 * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
471 * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
472 * special handling for dealing with 4 arguments, unlike i386.
473 * However, x86_64 also have to clobber all caller saved registers, which
474 * unfortunately, are quite a bit (r8 - r11)
294688c0
JF
475 *
476 * The call instruction itself is marked by placing its start address
477 * and size into the .parainstructions section, so that
478 * apply_paravirt() in arch/i386/kernel/alternative.c can do the
93b1eab3 479 * appropriate patching under the control of the backend pv_init_ops
294688c0
JF
480 * implementation.
481 *
482 * Unfortunately there's no way to get gcc to generate the args setup
483 * for the call, and then allow the call itself to be generated by an
484 * inline asm. Because of this, we must do the complete arg setup and
485 * return value handling from within these macros. This is fairly
486 * cumbersome.
487 *
488 * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
489 * It could be extended to more arguments, but there would be little
490 * to be gained from that. For each number of arguments, there are
491 * the two VCALL and CALL variants for void and non-void functions.
492 *
493 * When there is a return value, the invoker of the macro must specify
494 * the return type. The macro then uses sizeof() on that type to
495 * determine whether its a 32 or 64 bit value, and places the return
496 * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
a4746364
GOC
497 * 64-bit). For x86_64 machines, it just returns at %rax regardless of
498 * the return value size.
294688c0
JF
499 *
500 * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
a4746364
GOC
501 * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
502 * in low,high order
294688c0
JF
503 *
504 * Small structures are passed and returned in registers. The macro
505 * calling convention can't directly deal with this, so the wrapper
506 * functions must do this.
507 *
508 * These PVOP_* macros are only defined within this header. This
509 * means that all uses must be wrapped in inline functions. This also
510 * makes sure the incoming and outgoing types are always correct.
511 */
a4746364 512#ifdef CONFIG_X86_32
791bad9d
JF
513#define PVOP_VCALL_ARGS \
514 unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
a4746364 515#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
791bad9d
JF
516
517#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
518#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
519#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
520
a4746364
GOC
521#define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \
522 "=c" (__ecx)
523#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS
791bad9d
JF
524
525#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
526#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
527
a4746364
GOC
528#define EXTRA_CLOBBERS
529#define VEXTRA_CLOBBERS
791bad9d
JF
530#else /* CONFIG_X86_64 */
531#define PVOP_VCALL_ARGS \
532 unsigned long __edi = __edi, __esi = __esi, \
533 __edx = __edx, __ecx = __ecx
a4746364 534#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
791bad9d
JF
535
536#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
537#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
538#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
539#define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x))
540
a4746364
GOC
541#define PVOP_VCALL_CLOBBERS "=D" (__edi), \
542 "=S" (__esi), "=d" (__edx), \
543 "=c" (__ecx)
a4746364
GOC
544#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
545
791bad9d
JF
546#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
547#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
548
a4746364
GOC
549#define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11"
550#define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11"
791bad9d 551#endif /* CONFIG_X86_32 */
a4746364 552
97349135
JF
553#ifdef CONFIG_PARAVIRT_DEBUG
554#define PVOP_TEST_NULL(op) BUG_ON(op == NULL)
555#else
556#define PVOP_TEST_NULL(op) ((void)op)
557#endif
558
791bad9d
JF
559#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
560 pre, post, ...) \
f8822f42 561 ({ \
1a45b7aa 562 rettype __ret; \
791bad9d 563 PVOP_CALL_ARGS; \
97349135 564 PVOP_TEST_NULL(op); \
a4746364
GOC
565 /* This is 32-bit specific, but is okay in 64-bit */ \
566 /* since this condition will never hold */ \
1a45b7aa
JF
567 if (sizeof(rettype) > sizeof(unsigned long)) { \
568 asm volatile(pre \
569 paravirt_alt(PARAVIRT_CALL) \
570 post \
791bad9d 571 : call_clbr \
1a45b7aa 572 : paravirt_type(op), \
791bad9d 573 paravirt_clobber(clbr), \
1a45b7aa 574 ##__VA_ARGS__ \
791bad9d 575 : "memory", "cc" extra_clbr); \
1a45b7aa 576 __ret = (rettype)((((u64)__edx) << 32) | __eax); \
f8822f42 577 } else { \
1a45b7aa 578 asm volatile(pre \
f8822f42 579 paravirt_alt(PARAVIRT_CALL) \
1a45b7aa 580 post \
791bad9d 581 : call_clbr \
1a45b7aa 582 : paravirt_type(op), \
791bad9d 583 paravirt_clobber(clbr), \
1a45b7aa 584 ##__VA_ARGS__ \
791bad9d 585 : "memory", "cc" extra_clbr); \
1a45b7aa 586 __ret = (rettype)__eax; \
f8822f42
JF
587 } \
588 __ret; \
589 })
791bad9d
JF
590
591#define __PVOP_CALL(rettype, op, pre, post, ...) \
592 ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
593 EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
594
595#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
596 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
597 PVOP_CALLEE_CLOBBERS, , \
598 pre, post, ##__VA_ARGS__)
599
600
601#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
f8822f42 602 ({ \
a4746364 603 PVOP_VCALL_ARGS; \
97349135 604 PVOP_TEST_NULL(op); \
1a45b7aa 605 asm volatile(pre \
f8822f42 606 paravirt_alt(PARAVIRT_CALL) \
1a45b7aa 607 post \
791bad9d 608 : call_clbr \
1a45b7aa 609 : paravirt_type(op), \
791bad9d 610 paravirt_clobber(clbr), \
1a45b7aa 611 ##__VA_ARGS__ \
791bad9d 612 : "memory", "cc" extra_clbr); \
f8822f42
JF
613 })
614
791bad9d
JF
615#define __PVOP_VCALL(op, pre, post, ...) \
616 ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
617 VEXTRA_CLOBBERS, \
618 pre, post, ##__VA_ARGS__)
619
620#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
621 ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
622 PVOP_VCALLEE_CLOBBERS, , \
623 pre, post, ##__VA_ARGS__)
624
625
626
1a45b7aa
JF
627#define PVOP_CALL0(rettype, op) \
628 __PVOP_CALL(rettype, op, "", "")
629#define PVOP_VCALL0(op) \
630 __PVOP_VCALL(op, "", "")
631
791bad9d
JF
632#define PVOP_CALLEE0(rettype, op) \
633 __PVOP_CALLEESAVE(rettype, op, "", "")
634#define PVOP_VCALLEE0(op) \
635 __PVOP_VCALLEESAVE(op, "", "")
636
637
1a45b7aa 638#define PVOP_CALL1(rettype, op, arg1) \
791bad9d 639 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
1a45b7aa 640#define PVOP_VCALL1(op, arg1) \
791bad9d
JF
641 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
642
643#define PVOP_CALLEE1(rettype, op, arg1) \
644 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
645#define PVOP_VCALLEE1(op, arg1) \
646 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
647
1a45b7aa
JF
648
649#define PVOP_CALL2(rettype, op, arg1, arg2) \
791bad9d
JF
650 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
651 PVOP_CALL_ARG2(arg2))
1a45b7aa 652#define PVOP_VCALL2(op, arg1, arg2) \
791bad9d
JF
653 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
654 PVOP_CALL_ARG2(arg2))
655
656#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
657 __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
658 PVOP_CALL_ARG2(arg2))
659#define PVOP_VCALLEE2(op, arg1, arg2) \
660 __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
661 PVOP_CALL_ARG2(arg2))
662
1a45b7aa
JF
663
664#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
791bad9d
JF
665 __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
666 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
1a45b7aa 667#define PVOP_VCALL3(op, arg1, arg2, arg3) \
791bad9d
JF
668 __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
669 PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
1a45b7aa 670
a4746364
GOC
671/* This is the only difference in x86_64. We can make it much simpler */
672#ifdef CONFIG_X86_32
1a45b7aa
JF
673#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
674 __PVOP_CALL(rettype, op, \
675 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
791bad9d
JF
676 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
677 PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
1a45b7aa
JF
678#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
679 __PVOP_VCALL(op, \
680 "push %[_arg4];", "lea 4(%%esp),%%esp;", \
681 "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
682 "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
a4746364
GOC
683#else
684#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
791bad9d
JF
685 __PVOP_CALL(rettype, op, "", "", \
686 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
687 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
a4746364 688#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
791bad9d
JF
689 __PVOP_VCALL(op, "", "", \
690 PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
691 PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
a4746364 692#endif
1a45b7aa 693
f8822f42
JF
694static inline int paravirt_enabled(void)
695{
93b1eab3 696 return pv_info.paravirt_enabled;
f8822f42 697}
d3561b7f 698
faca6227 699static inline void load_sp0(struct tss_struct *tss,
d3561b7f
RR
700 struct thread_struct *thread)
701{
faca6227 702 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
d3561b7f
RR
703}
704
93b1eab3 705#define ARCH_SETUP pv_init_ops.arch_setup();
d3561b7f
RR
706static inline unsigned long get_wallclock(void)
707{
93b1eab3 708 return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
d3561b7f
RR
709}
710
711static inline int set_wallclock(unsigned long nowtime)
712{
93b1eab3 713 return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
d3561b7f
RR
714}
715
e30fab3a 716static inline void (*choose_time_init(void))(void)
d3561b7f 717{
93b1eab3 718 return pv_time_ops.time_init;
d3561b7f
RR
719}
720
721/* The paravirtualized CPUID instruction. */
722static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
723 unsigned int *ecx, unsigned int *edx)
724{
93b1eab3 725 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
726}
727
728/*
729 * These special macros can be used to get or set a debugging register
730 */
f8822f42
JF
731static inline unsigned long paravirt_get_debugreg(int reg)
732{
93b1eab3 733 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
f8822f42
JF
734}
735#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
736static inline void set_debugreg(unsigned long val, int reg)
737{
93b1eab3 738 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
f8822f42 739}
d3561b7f 740
f8822f42
JF
741static inline void clts(void)
742{
93b1eab3 743 PVOP_VCALL0(pv_cpu_ops.clts);
f8822f42 744}
d3561b7f 745
f8822f42
JF
746static inline unsigned long read_cr0(void)
747{
93b1eab3 748 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
f8822f42 749}
d3561b7f 750
f8822f42
JF
751static inline void write_cr0(unsigned long x)
752{
93b1eab3 753 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
f8822f42
JF
754}
755
756static inline unsigned long read_cr2(void)
757{
93b1eab3 758 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
f8822f42
JF
759}
760
761static inline void write_cr2(unsigned long x)
762{
93b1eab3 763 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
f8822f42
JF
764}
765
766static inline unsigned long read_cr3(void)
767{
93b1eab3 768 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
f8822f42 769}
d3561b7f 770
f8822f42
JF
771static inline void write_cr3(unsigned long x)
772{
93b1eab3 773 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
f8822f42 774}
d3561b7f 775
f8822f42
JF
776static inline unsigned long read_cr4(void)
777{
93b1eab3 778 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
f8822f42
JF
779}
780static inline unsigned long read_cr4_safe(void)
781{
93b1eab3 782 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
f8822f42 783}
d3561b7f 784
f8822f42
JF
785static inline void write_cr4(unsigned long x)
786{
93b1eab3 787 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
f8822f42 788}
3dc494e8 789
94ea03cd 790#ifdef CONFIG_X86_64
4c9890c2
GOC
791static inline unsigned long read_cr8(void)
792{
793 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
794}
795
796static inline void write_cr8(unsigned long x)
797{
798 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
799}
94ea03cd 800#endif
4c9890c2 801
d3561b7f
RR
802static inline void raw_safe_halt(void)
803{
93b1eab3 804 PVOP_VCALL0(pv_irq_ops.safe_halt);
d3561b7f
RR
805}
806
807static inline void halt(void)
808{
93b1eab3 809 PVOP_VCALL0(pv_irq_ops.safe_halt);
f8822f42
JF
810}
811
812static inline void wbinvd(void)
813{
93b1eab3 814 PVOP_VCALL0(pv_cpu_ops.wbinvd);
d3561b7f 815}
d3561b7f 816
93b1eab3 817#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 818
f8822f42
JF
819static inline u64 paravirt_read_msr(unsigned msr, int *err)
820{
93b1eab3 821 return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
f8822f42 822}
b05f78f5
YL
823static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
824{
825 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
826}
f8822f42
JF
827static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
828{
93b1eab3 829 return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
f8822f42
JF
830}
831
90a0a06a 832/* These should all do BUG_ON(_err), but our headers are too tangled. */
49cd740b
JP
833#define rdmsr(msr, val1, val2) \
834do { \
f8822f42
JF
835 int _err; \
836 u64 _l = paravirt_read_msr(msr, &_err); \
837 val1 = (u32)_l; \
838 val2 = _l >> 32; \
49cd740b 839} while (0)
d3561b7f 840
49cd740b
JP
841#define wrmsr(msr, val1, val2) \
842do { \
f8822f42 843 paravirt_write_msr(msr, val1, val2); \
49cd740b 844} while (0)
d3561b7f 845
49cd740b
JP
846#define rdmsrl(msr, val) \
847do { \
f8822f42
JF
848 int _err; \
849 val = paravirt_read_msr(msr, &_err); \
49cd740b 850} while (0)
d3561b7f 851
49cd740b
JP
852#define wrmsrl(msr, val) wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
853#define wrmsr_safe(msr, a, b) paravirt_write_msr(msr, a, b)
d3561b7f
RR
854
855/* rdmsr with exception handling */
49cd740b
JP
856#define rdmsr_safe(msr, a, b) \
857({ \
f8822f42
JF
858 int _err; \
859 u64 _l = paravirt_read_msr(msr, &_err); \
860 (*a) = (u32)_l; \
861 (*b) = _l >> 32; \
49cd740b
JP
862 _err; \
863})
d3561b7f 864
1de87bd4
AK
865static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
866{
867 int err;
868
869 *p = paravirt_read_msr(msr, &err);
870 return err;
871}
b05f78f5
YL
872static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
873{
874 int err;
875
876 *p = paravirt_read_msr_amd(msr, &err);
877 return err;
878}
f8822f42
JF
879
880static inline u64 paravirt_read_tsc(void)
881{
93b1eab3 882 return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
f8822f42 883}
d3561b7f 884
49cd740b
JP
885#define rdtscl(low) \
886do { \
f8822f42
JF
887 u64 _l = paravirt_read_tsc(); \
888 low = (int)_l; \
49cd740b 889} while (0)
d3561b7f 890
f8822f42 891#define rdtscll(val) (val = paravirt_read_tsc())
d3561b7f 892
688340ea
JF
893static inline unsigned long long paravirt_sched_clock(void)
894{
93b1eab3 895 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
688340ea 896}
e93ef949 897#define calibrate_tsc() (pv_time_ops.get_tsc_khz())
6cb9a835 898
f8822f42
JF
899static inline unsigned long long paravirt_read_pmc(int counter)
900{
93b1eab3 901 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
f8822f42 902}
d3561b7f 903
49cd740b
JP
904#define rdpmc(counter, low, high) \
905do { \
f8822f42
JF
906 u64 _l = paravirt_read_pmc(counter); \
907 low = (u32)_l; \
908 high = _l >> 32; \
49cd740b 909} while (0)
3dc494e8 910
e5aaac44
GOC
911static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
912{
913 return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
914}
915
916#define rdtscp(low, high, aux) \
917do { \
918 int __aux; \
919 unsigned long __val = paravirt_rdtscp(&__aux); \
920 (low) = (u32)__val; \
921 (high) = (u32)(__val >> 32); \
922 (aux) = __aux; \
923} while (0)
924
925#define rdtscpll(val, aux) \
926do { \
927 unsigned long __aux; \
928 val = paravirt_rdtscp(&__aux); \
929 (aux) = __aux; \
930} while (0)
931
38ffbe66
JF
932static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
933{
934 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
935}
936
937static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
938{
939 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
940}
941
f8822f42
JF
942static inline void load_TR_desc(void)
943{
93b1eab3 944 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
f8822f42 945}
6b68f01b 946static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 947{
93b1eab3 948 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
f8822f42 949}
6b68f01b 950static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 951{
93b1eab3 952 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
f8822f42
JF
953}
954static inline void set_ldt(const void *addr, unsigned entries)
955{
93b1eab3 956 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
f8822f42 957}
6b68f01b 958static inline void store_gdt(struct desc_ptr *dtr)
f8822f42 959{
93b1eab3 960 PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
f8822f42 961}
6b68f01b 962static inline void store_idt(struct desc_ptr *dtr)
f8822f42 963{
93b1eab3 964 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
f8822f42
JF
965}
966static inline unsigned long paravirt_store_tr(void)
967{
93b1eab3 968 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
f8822f42
JF
969}
970#define store_tr(tr) ((tr) = paravirt_store_tr())
971static inline void load_TLS(struct thread_struct *t, unsigned cpu)
972{
93b1eab3 973 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
f8822f42 974}
75b8bb3e 975
9f9d489a
JF
976#ifdef CONFIG_X86_64
977static inline void load_gs_index(unsigned int gs)
978{
979 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
980}
981#endif
982
75b8bb3e
GOC
983static inline void write_ldt_entry(struct desc_struct *dt, int entry,
984 const void *desc)
f8822f42 985{
75b8bb3e 986 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
f8822f42 987}
014b15be
GOC
988
989static inline void write_gdt_entry(struct desc_struct *dt, int entry,
990 void *desc, int type)
f8822f42 991{
014b15be 992 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
f8822f42 993}
014b15be 994
8d947344 995static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 996{
8d947344 997 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
f8822f42
JF
998}
999static inline void set_iopl_mask(unsigned mask)
1000{
93b1eab3 1001 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
f8822f42 1002}
3dc494e8 1003
d3561b7f 1004/* The paravirtualized I/O functions */
49cd740b
JP
1005static inline void slow_down_io(void)
1006{
93b1eab3 1007 pv_cpu_ops.io_delay();
d3561b7f 1008#ifdef REALLY_SLOW_IO
93b1eab3
JF
1009 pv_cpu_ops.io_delay();
1010 pv_cpu_ops.io_delay();
1011 pv_cpu_ops.io_delay();
d3561b7f
RR
1012#endif
1013}
1014
13623d79 1015#ifdef CONFIG_X86_LOCAL_APIC
bbab4f3b
ZA
1016static inline void setup_boot_clock(void)
1017{
93b1eab3 1018 PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
bbab4f3b
ZA
1019}
1020
1021static inline void setup_secondary_clock(void)
1022{
93b1eab3 1023 PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
bbab4f3b 1024}
13623d79
RR
1025#endif
1026
6996d3b6
JF
1027static inline void paravirt_post_allocator_init(void)
1028{
93b1eab3
JF
1029 if (pv_init_ops.post_allocator_init)
1030 (*pv_init_ops.post_allocator_init)();
6996d3b6
JF
1031}
1032
b239fb25
JF
1033static inline void paravirt_pagetable_setup_start(pgd_t *base)
1034{
93b1eab3 1035 (*pv_mmu_ops.pagetable_setup_start)(base);
b239fb25
JF
1036}
1037
1038static inline void paravirt_pagetable_setup_done(pgd_t *base)
1039{
93b1eab3 1040 (*pv_mmu_ops.pagetable_setup_done)(base);
b239fb25 1041}
3dc494e8 1042
ae5da273
ZA
1043#ifdef CONFIG_SMP
1044static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
1045 unsigned long start_esp)
1046{
93b1eab3
JF
1047 PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
1048 phys_apicid, start_eip, start_esp);
ae5da273
ZA
1049}
1050#endif
13623d79 1051
d6dd61c8
JF
1052static inline void paravirt_activate_mm(struct mm_struct *prev,
1053 struct mm_struct *next)
1054{
93b1eab3 1055 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
d6dd61c8
JF
1056}
1057
1058static inline void arch_dup_mmap(struct mm_struct *oldmm,
1059 struct mm_struct *mm)
1060{
93b1eab3 1061 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
d6dd61c8
JF
1062}
1063
1064static inline void arch_exit_mmap(struct mm_struct *mm)
1065{
93b1eab3 1066 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
d6dd61c8
JF
1067}
1068
f8822f42
JF
1069static inline void __flush_tlb(void)
1070{
93b1eab3 1071 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
f8822f42
JF
1072}
1073static inline void __flush_tlb_global(void)
1074{
93b1eab3 1075 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
f8822f42
JF
1076}
1077static inline void __flush_tlb_single(unsigned long addr)
1078{
93b1eab3 1079 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
f8822f42 1080}
da181a8b 1081
4595f962
RR
1082static inline void flush_tlb_others(const struct cpumask *cpumask,
1083 struct mm_struct *mm,
d4c10477
JF
1084 unsigned long va)
1085{
4595f962 1086 PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
d4c10477
JF
1087}
1088
eba0045f
JF
1089static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1090{
1091 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1092}
1093
1094static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1095{
1096 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1097}
1098
f8639939 1099static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 1100{
6944a9c8 1101 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
f8822f42 1102}
f8639939 1103static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 1104{
6944a9c8 1105 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
f8822f42 1106}
c119ecce 1107
f8639939 1108static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 1109{
6944a9c8 1110 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
f8822f42 1111}
c119ecce 1112
f8639939
EH
1113static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1114 unsigned long start, unsigned long count)
f8822f42 1115{
6944a9c8 1116 PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
f8822f42 1117}
f8639939 1118static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 1119{
6944a9c8 1120 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
da181a8b
RR
1121}
1122
f8639939 1123static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09
JF
1124{
1125 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1126}
f8639939 1127static inline void paravirt_release_pud(unsigned long pfn)
2761fa09
JF
1128{
1129 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1130}
1131
ce6234b5
JF
1132#ifdef CONFIG_HIGHPTE
1133static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1134{
1135 unsigned long ret;
93b1eab3 1136 ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
ce6234b5
JF
1137 return (void *)ret;
1138}
1139#endif
1140
f8822f42
JF
1141static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1142 pte_t *ptep)
da181a8b 1143{
93b1eab3 1144 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
da181a8b
RR
1145}
1146
f8822f42
JF
1147static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1148 pte_t *ptep)
da181a8b 1149{
93b1eab3 1150 PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
da181a8b
RR
1151}
1152
773221f4 1153static inline pte_t __pte(pteval_t val)
da181a8b 1154{
773221f4
JF
1155 pteval_t ret;
1156
1157 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
1158 ret = PVOP_CALLEE2(pteval_t,
1159 pv_mmu_ops.make_pte,
1160 val, (u64)val >> 32);
773221f4 1161 else
da5de7c2
JF
1162 ret = PVOP_CALLEE1(pteval_t,
1163 pv_mmu_ops.make_pte,
1164 val);
773221f4 1165
c8e5393a 1166 return (pte_t) { .pte = ret };
da181a8b
RR
1167}
1168
773221f4
JF
1169static inline pteval_t pte_val(pte_t pte)
1170{
1171 pteval_t ret;
1172
1173 if (sizeof(pteval_t) > sizeof(long))
da5de7c2
JF
1174 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1175 pte.pte, (u64)pte.pte >> 32);
773221f4 1176 else
da5de7c2
JF
1177 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1178 pte.pte);
773221f4
JF
1179
1180 return ret;
1181}
1182
ef38503e 1183static inline pgd_t __pgd(pgdval_t val)
da181a8b 1184{
ef38503e
JF
1185 pgdval_t ret;
1186
1187 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
1188 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1189 val, (u64)val >> 32);
ef38503e 1190 else
da5de7c2
JF
1191 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1192 val);
ef38503e
JF
1193
1194 return (pgd_t) { ret };
1195}
1196
1197static inline pgdval_t pgd_val(pgd_t pgd)
1198{
1199 pgdval_t ret;
1200
1201 if (sizeof(pgdval_t) > sizeof(long))
da5de7c2
JF
1202 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1203 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 1204 else
da5de7c2
JF
1205 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1206 pgd.pgd);
ef38503e
JF
1207
1208 return ret;
f8822f42
JF
1209}
1210
08b882c6
JF
1211#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1212static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1213 pte_t *ptep)
1214{
1215 pteval_t ret;
1216
1217 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1218 mm, addr, ptep);
1219
1220 return (pte_t) { .pte = ret };
1221}
1222
1223static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1224 pte_t *ptep, pte_t pte)
1225{
1226 if (sizeof(pteval_t) > sizeof(long))
1227 /* 5 arg words */
1228 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1229 else
1230 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1231 mm, addr, ptep, pte.pte);
1232}
1233
4eed80cd
JF
1234static inline void set_pte(pte_t *ptep, pte_t pte)
1235{
1236 if (sizeof(pteval_t) > sizeof(long))
1237 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1238 pte.pte, (u64)pte.pte >> 32);
1239 else
1240 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1241 pte.pte);
1242}
1243
1244static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1245 pte_t *ptep, pte_t pte)
1246{
1247 if (sizeof(pteval_t) > sizeof(long))
1248 /* 5 arg words */
1249 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1250 else
1251 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1252}
1253
60b3f626
JF
1254static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1255{
1256 pmdval_t val = native_pmd_val(pmd);
1257
1258 if (sizeof(pmdval_t) > sizeof(long))
1259 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1260 else
1261 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1262}
1263
1fe91514
GOC
1264#if PAGETABLE_LEVELS >= 3
1265static inline pmd_t __pmd(pmdval_t val)
1266{
1267 pmdval_t ret;
1268
1269 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
1270 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1271 val, (u64)val >> 32);
1fe91514 1272 else
da5de7c2
JF
1273 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1274 val);
1fe91514
GOC
1275
1276 return (pmd_t) { ret };
1277}
1278
1279static inline pmdval_t pmd_val(pmd_t pmd)
1280{
1281 pmdval_t ret;
1282
1283 if (sizeof(pmdval_t) > sizeof(long))
da5de7c2
JF
1284 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1285 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 1286 else
da5de7c2
JF
1287 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1288 pmd.pmd);
1fe91514
GOC
1289
1290 return ret;
1291}
1292
1293static inline void set_pud(pud_t *pudp, pud_t pud)
1294{
1295 pudval_t val = native_pud_val(pud);
1296
1297 if (sizeof(pudval_t) > sizeof(long))
1298 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1299 val, (u64)val >> 32);
1300 else
1301 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1302 val);
1303}
9042219c
EH
1304#if PAGETABLE_LEVELS == 4
1305static inline pud_t __pud(pudval_t val)
1306{
1307 pudval_t ret;
1308
1309 if (sizeof(pudval_t) > sizeof(long))
da5de7c2
JF
1310 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1311 val, (u64)val >> 32);
9042219c 1312 else
da5de7c2
JF
1313 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1314 val);
9042219c
EH
1315
1316 return (pud_t) { ret };
1317}
1318
1319static inline pudval_t pud_val(pud_t pud)
1320{
1321 pudval_t ret;
1322
1323 if (sizeof(pudval_t) > sizeof(long))
4767afbf
JF
1324 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1325 pud.pud, (u64)pud.pud >> 32);
9042219c 1326 else
4767afbf
JF
1327 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1328 pud.pud);
9042219c
EH
1329
1330 return ret;
1331}
1332
1333static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1334{
1335 pgdval_t val = native_pgd_val(pgd);
1336
1337 if (sizeof(pgdval_t) > sizeof(long))
1338 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1339 val, (u64)val >> 32);
1340 else
1341 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1342 val);
1343}
1344
1345static inline void pgd_clear(pgd_t *pgdp)
1346{
1347 set_pgd(pgdp, __pgd(0));
1348}
1349
1350static inline void pud_clear(pud_t *pudp)
1351{
1352 set_pud(pudp, __pud(0));
1353}
1354
1355#endif /* PAGETABLE_LEVELS == 4 */
1356
1fe91514
GOC
1357#endif /* PAGETABLE_LEVELS >= 3 */
1358
4eed80cd
JF
1359#ifdef CONFIG_X86_PAE
1360/* Special-case pte-setting operations for PAE, which can't update a
1361 64-bit pte atomically */
1362static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1363{
1364 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1365 pte.pte, pte.pte >> 32);
1366}
1367
1368static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1369 pte_t *ptep, pte_t pte)
1370{
1371 /* 5 arg words */
1372 pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1373}
1374
1375static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1376 pte_t *ptep)
1377{
1378 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1379}
60b3f626
JF
1380
1381static inline void pmd_clear(pmd_t *pmdp)
1382{
1383 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1384}
4eed80cd
JF
1385#else /* !CONFIG_X86_PAE */
1386static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1387{
1388 set_pte(ptep, pte);
1389}
1390
1391static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1392 pte_t *ptep, pte_t pte)
1393{
1394 set_pte(ptep, pte);
1395}
1396
1397static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1398 pte_t *ptep)
1399{
1400 set_pte_at(mm, addr, ptep, __pte(0));
1401}
60b3f626
JF
1402
1403static inline void pmd_clear(pmd_t *pmdp)
1404{
1405 set_pmd(pmdp, __pmd(0));
1406}
4eed80cd
JF
1407#endif /* CONFIG_X86_PAE */
1408
8965c1c0
JF
1409/* Lazy mode for batching updates / context switch */
1410enum paravirt_lazy_mode {
1411 PARAVIRT_LAZY_NONE,
1412 PARAVIRT_LAZY_MMU,
1413 PARAVIRT_LAZY_CPU,
1414};
1415
1416enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1417void paravirt_enter_lazy_cpu(void);
1418void paravirt_leave_lazy_cpu(void);
1419void paravirt_enter_lazy_mmu(void);
1420void paravirt_leave_lazy_mmu(void);
1421void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1422
9226d125 1423#define __HAVE_ARCH_ENTER_LAZY_CPU_MODE
f8822f42
JF
1424static inline void arch_enter_lazy_cpu_mode(void)
1425{
8965c1c0 1426 PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
f8822f42
JF
1427}
1428
1429static inline void arch_leave_lazy_cpu_mode(void)
1430{
8965c1c0 1431 PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
f8822f42
JF
1432}
1433
1434static inline void arch_flush_lazy_cpu_mode(void)
1435{
8965c1c0
JF
1436 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1437 arch_leave_lazy_cpu_mode();
1438 arch_enter_lazy_cpu_mode();
1439 }
f8822f42
JF
1440}
1441
9226d125
ZA
1442
1443#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
1444static inline void arch_enter_lazy_mmu_mode(void)
1445{
8965c1c0 1446 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
f8822f42
JF
1447}
1448
1449static inline void arch_leave_lazy_mmu_mode(void)
1450{
8965c1c0 1451 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
f8822f42
JF
1452}
1453
1454static inline void arch_flush_lazy_mmu_mode(void)
1455{
8965c1c0
JF
1456 if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1457 arch_leave_lazy_mmu_mode();
1458 arch_enter_lazy_mmu_mode();
1459 }
f8822f42 1460}
9226d125 1461
aeaaa59c
JF
1462static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1463 unsigned long phys, pgprot_t flags)
1464{
1465 pv_mmu_ops.set_fixmap(idx, phys, flags);
1466}
1467
45876233 1468void _paravirt_nop(void);
41edafdb
JF
1469u32 _paravirt_ident_32(u32);
1470u64 _paravirt_ident_64(u64);
1471
45876233
JF
1472#define paravirt_nop ((void *)_paravirt_nop)
1473
8efcbab6
JF
1474void paravirt_use_bytelocks(void);
1475
4bb689ee
IM
1476#ifdef CONFIG_SMP
1477
74d4affd
JF
1478static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1479{
1480 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1481}
1482
1483static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1484{
1485 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1486}
1487
1488static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1489{
32172561 1490 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
74d4affd
JF
1491}
1492
63d3a75d
JF
1493static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1494 unsigned long flags)
1495{
1496 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1497}
1498
74d4affd
JF
1499static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1500{
1501 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1502}
1503
1504static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1505{
32172561 1506 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
74d4affd
JF
1507}
1508
4bb689ee
IM
1509#endif
1510
139ec7c4 1511/* These all sit in the .parainstructions section to tell us what to patch. */
98de032b 1512struct paravirt_patch_site {
139ec7c4
RR
1513 u8 *instr; /* original instructions */
1514 u8 instrtype; /* type of this instruction */
1515 u8 len; /* length of original instruction */
1516 u16 clobbers; /* what registers you may clobber */
1517};
1518
98de032b
JF
1519extern struct paravirt_patch_site __parainstructions[],
1520 __parainstructions_end[];
1521
2e47d3e6 1522#ifdef CONFIG_X86_32
ecb93d1c
JF
1523#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1524#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1525
1526/* save and restore all caller-save registers, except return value */
1527#define PV_SAVE_ALL_CALLER_REGS PV_SAVE_REGS
1528#define PV_RESTORE_ALL_CALLER_REGS PV_RESTORE_REGS
1529
2e47d3e6
GOC
1530#define PV_FLAGS_ARG "0"
1531#define PV_EXTRA_CLOBBERS
1532#define PV_VEXTRA_CLOBBERS
1533#else
ecb93d1c
JF
1534/* save and restore all caller-save registers, except return value */
1535#define PV_SAVE_ALL_CALLER_REGS \
1536 "push %rcx;" \
1537 "push %rdx;" \
1538 "push %rsi;" \
1539 "push %rdi;" \
1540 "push %r8;" \
1541 "push %r9;" \
1542 "push %r10;" \
1543 "push %r11;"
1544#define PV_RESTORE_ALL_CALLER_REGS \
1545 "pop %r11;" \
1546 "pop %r10;" \
1547 "pop %r9;" \
1548 "pop %r8;" \
1549 "pop %rdi;" \
1550 "pop %rsi;" \
1551 "pop %rdx;" \
1552 "pop %rcx;"
1553
2e47d3e6
GOC
1554/* We save some registers, but all of them, that's too much. We clobber all
1555 * caller saved registers but the argument parameter */
1556#define PV_SAVE_REGS "pushq %%rdi;"
1557#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
1558#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1559#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
1560#define PV_FLAGS_ARG "D"
1561#endif
1562
ecb93d1c
JF
1563/*
1564 * Generate a thunk around a function which saves all caller-save
1565 * registers except for the return value. This allows C functions to
1566 * be called from assembler code where fewer than normal registers are
1567 * available. It may also help code generation around calls from C
1568 * code if the common case doesn't use many registers.
1569 *
1570 * When a callee is wrapped in a thunk, the caller can assume that all
1571 * arg regs and all scratch registers are preserved across the
1572 * call. The return value in rax/eax will not be saved, even for void
1573 * functions.
1574 */
1575#define PV_CALLEE_SAVE_REGS_THUNK(func) \
1576 extern typeof(func) __raw_callee_save_##func; \
1577 static void *__##func##__ __used = func; \
1578 \
1579 asm(".pushsection .text;" \
1580 "__raw_callee_save_" #func ": " \
1581 PV_SAVE_ALL_CALLER_REGS \
1582 "call " #func ";" \
1583 PV_RESTORE_ALL_CALLER_REGS \
1584 "ret;" \
1585 ".popsection")
1586
1587/* Get a reference to a callee-save function */
1588#define PV_CALLEE_SAVE(func) \
1589 ((struct paravirt_callee_save) { __raw_callee_save_##func })
1590
1591/* Promise that "func" already uses the right calling convention */
1592#define __PV_IS_CALLEE_SAVE(func) \
1593 ((struct paravirt_callee_save) { func })
1594
139ec7c4
RR
1595static inline unsigned long __raw_local_save_flags(void)
1596{
1597 unsigned long f;
1598
ecb93d1c 1599 asm volatile(paravirt_alt(PARAVIRT_CALL)
d5822035 1600 : "=a"(f)
93b1eab3 1601 : paravirt_type(pv_irq_ops.save_fl),
42c24fa2 1602 paravirt_clobber(CLBR_EAX)
ecb93d1c 1603 : "memory", "cc");
139ec7c4
RR
1604 return f;
1605}
1606
1607static inline void raw_local_irq_restore(unsigned long f)
1608{
ecb93d1c 1609 asm volatile(paravirt_alt(PARAVIRT_CALL)
d5822035 1610 : "=a"(f)
2e47d3e6 1611 : PV_FLAGS_ARG(f),
93b1eab3 1612 paravirt_type(pv_irq_ops.restore_fl),
d5822035 1613 paravirt_clobber(CLBR_EAX)
ecb93d1c 1614 : "memory", "cc");
139ec7c4
RR
1615}
1616
1617static inline void raw_local_irq_disable(void)
1618{
ecb93d1c 1619 asm volatile(paravirt_alt(PARAVIRT_CALL)
d5822035 1620 :
93b1eab3 1621 : paravirt_type(pv_irq_ops.irq_disable),
d5822035 1622 paravirt_clobber(CLBR_EAX)
ecb93d1c 1623 : "memory", "eax", "cc");
139ec7c4
RR
1624}
1625
1626static inline void raw_local_irq_enable(void)
1627{
ecb93d1c 1628 asm volatile(paravirt_alt(PARAVIRT_CALL)
d5822035 1629 :
93b1eab3 1630 : paravirt_type(pv_irq_ops.irq_enable),
d5822035 1631 paravirt_clobber(CLBR_EAX)
ecb93d1c 1632 : "memory", "eax", "cc");
139ec7c4
RR
1633}
1634
1635static inline unsigned long __raw_local_irq_save(void)
1636{
1637 unsigned long f;
1638
d5822035
JF
1639 f = __raw_local_save_flags();
1640 raw_local_irq_disable();
139ec7c4
RR
1641 return f;
1642}
1643
74d4affd 1644
294688c0 1645/* Make sure as little as possible of this mess escapes. */
d5822035 1646#undef PARAVIRT_CALL
1a45b7aa
JF
1647#undef __PVOP_CALL
1648#undef __PVOP_VCALL
f8822f42
JF
1649#undef PVOP_VCALL0
1650#undef PVOP_CALL0
1651#undef PVOP_VCALL1
1652#undef PVOP_CALL1
1653#undef PVOP_VCALL2
1654#undef PVOP_CALL2
1655#undef PVOP_VCALL3
1656#undef PVOP_CALL3
1657#undef PVOP_VCALL4
1658#undef PVOP_CALL4
139ec7c4 1659
d3561b7f
RR
1660#else /* __ASSEMBLY__ */
1661
658be9d3 1662#define _PVSITE(ptype, clobbers, ops, word, algn) \
139ec7c4
RR
1663771:; \
1664 ops; \
1665772:; \
1666 .pushsection .parainstructions,"a"; \
658be9d3
GOC
1667 .align algn; \
1668 word 771b; \
139ec7c4
RR
1669 .byte ptype; \
1670 .byte 772b-771b; \
1671 .short clobbers; \
1672 .popsection
1673
658be9d3 1674
9104a18d 1675#define COND_PUSH(set, mask, reg) \
ecb93d1c 1676 .if ((~(set)) & mask); push %reg; .endif
9104a18d 1677#define COND_POP(set, mask, reg) \
ecb93d1c 1678 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 1679
658be9d3 1680#ifdef CONFIG_X86_64
9104a18d
JF
1681
1682#define PV_SAVE_REGS(set) \
1683 COND_PUSH(set, CLBR_RAX, rax); \
1684 COND_PUSH(set, CLBR_RCX, rcx); \
1685 COND_PUSH(set, CLBR_RDX, rdx); \
1686 COND_PUSH(set, CLBR_RSI, rsi); \
1687 COND_PUSH(set, CLBR_RDI, rdi); \
1688 COND_PUSH(set, CLBR_R8, r8); \
1689 COND_PUSH(set, CLBR_R9, r9); \
1690 COND_PUSH(set, CLBR_R10, r10); \
1691 COND_PUSH(set, CLBR_R11, r11)
1692#define PV_RESTORE_REGS(set) \
1693 COND_POP(set, CLBR_R11, r11); \
1694 COND_POP(set, CLBR_R10, r10); \
1695 COND_POP(set, CLBR_R9, r9); \
1696 COND_POP(set, CLBR_R8, r8); \
1697 COND_POP(set, CLBR_RDI, rdi); \
1698 COND_POP(set, CLBR_RSI, rsi); \
1699 COND_POP(set, CLBR_RDX, rdx); \
1700 COND_POP(set, CLBR_RCX, rcx); \
1701 COND_POP(set, CLBR_RAX, rax)
1702
6057fc82 1703#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
658be9d3 1704#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
491eccb7 1705#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 1706#else
9104a18d
JF
1707#define PV_SAVE_REGS(set) \
1708 COND_PUSH(set, CLBR_EAX, eax); \
1709 COND_PUSH(set, CLBR_EDI, edi); \
1710 COND_PUSH(set, CLBR_ECX, ecx); \
1711 COND_PUSH(set, CLBR_EDX, edx)
1712#define PV_RESTORE_REGS(set) \
1713 COND_POP(set, CLBR_EDX, edx); \
1714 COND_POP(set, CLBR_ECX, ecx); \
1715 COND_POP(set, CLBR_EDI, edi); \
1716 COND_POP(set, CLBR_EAX, eax)
1717
6057fc82 1718#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
658be9d3 1719#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
491eccb7 1720#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
1721#endif
1722
93b1eab3
JF
1723#define INTERRUPT_RETURN \
1724 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
491eccb7 1725 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
d5822035
JF
1726
1727#define DISABLE_INTERRUPTS(clobbers) \
93b1eab3 1728 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
ecb93d1c 1729 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 1730 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
ecb93d1c 1731 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
1732
1733#define ENABLE_INTERRUPTS(clobbers) \
93b1eab3 1734 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
ecb93d1c 1735 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
491eccb7 1736 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
ecb93d1c 1737 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035 1738
2be29982
JF
1739#define USERGS_SYSRET32 \
1740 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32), \
6abcd98f 1741 CLBR_NONE, \
2be29982 1742 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
2e47d3e6 1743
6057fc82 1744#ifdef CONFIG_X86_32
491eccb7
JF
1745#define GET_CR0_INTO_EAX \
1746 push %ecx; push %edx; \
1747 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
42c24fa2 1748 pop %edx; pop %ecx
2be29982
JF
1749
1750#define ENABLE_INTERRUPTS_SYSEXIT \
1751 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1752 CLBR_NONE, \
1753 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1754
1755
1756#else /* !CONFIG_X86_32 */
a00394f8
JF
1757
1758/*
1759 * If swapgs is used while the userspace stack is still current,
1760 * there's no way to call a pvop. The PV replacement *must* be
1761 * inlined, or the swapgs instruction must be trapped and emulated.
1762 */
1763#define SWAPGS_UNSAFE_STACK \
1764 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
1765 swapgs)
1766
9104a18d
JF
1767/*
1768 * Note: swapgs is very special, and in practise is either going to be
1769 * implemented with a single "swapgs" instruction or something very
1770 * special. Either way, we don't need to save any registers for
1771 * it.
1772 */
e801f864
GOC
1773#define SWAPGS \
1774 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
9104a18d 1775 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
e801f864
GOC
1776 )
1777
491eccb7
JF
1778#define GET_CR2_INTO_RCX \
1779 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1780 movq %rax, %rcx; \
4a8c4c4e
GOC
1781 xorq %rax, %rax;
1782
fab58420
JF
1783#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
1784 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1785 CLBR_NONE, \
1786 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1787
2be29982
JF
1788#define USERGS_SYSRET64 \
1789 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
d75cd22f 1790 CLBR_NONE, \
2be29982
JF
1791 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1792
1793#define ENABLE_INTERRUPTS_SYSEXIT32 \
1794 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
1795 CLBR_NONE, \
1796 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1797#endif /* CONFIG_X86_32 */
139ec7c4 1798
d3561b7f
RR
1799#endif /* __ASSEMBLY__ */
1800#endif /* CONFIG_PARAVIRT */
1965aae3 1801#endif /* _ASM_X86_PARAVIRT_H */