Commit | Line | Data |
---|---|---|
ac5672f8 JF |
1 | #ifndef _ASM_X86_PARAVIRT_TYPES_H |
2 | #define _ASM_X86_PARAVIRT_TYPES_H | |
3 | ||
4 | /* Bitmask of what can be clobbered: usually at least eax. */ | |
5 | #define CLBR_NONE 0 | |
6 | #define CLBR_EAX (1 << 0) | |
7 | #define CLBR_ECX (1 << 1) | |
8 | #define CLBR_EDX (1 << 2) | |
9 | #define CLBR_EDI (1 << 3) | |
10 | ||
11 | #ifdef CONFIG_X86_32 | |
12 | /* CLBR_ANY should match all regs platform has. For i386, that's just it */ | |
13 | #define CLBR_ANY ((1 << 4) - 1) | |
14 | ||
15 | #define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX) | |
16 | #define CLBR_RET_REG (CLBR_EAX | CLBR_EDX) | |
17 | #define CLBR_SCRATCH (0) | |
18 | #else | |
19 | #define CLBR_RAX CLBR_EAX | |
20 | #define CLBR_RCX CLBR_ECX | |
21 | #define CLBR_RDX CLBR_EDX | |
22 | #define CLBR_RDI CLBR_EDI | |
23 | #define CLBR_RSI (1 << 4) | |
24 | #define CLBR_R8 (1 << 5) | |
25 | #define CLBR_R9 (1 << 6) | |
26 | #define CLBR_R10 (1 << 7) | |
27 | #define CLBR_R11 (1 << 8) | |
28 | ||
29 | #define CLBR_ANY ((1 << 9) - 1) | |
30 | ||
31 | #define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \ | |
32 | CLBR_RCX | CLBR_R8 | CLBR_R9) | |
33 | #define CLBR_RET_REG (CLBR_RAX) | |
34 | #define CLBR_SCRATCH (CLBR_R10 | CLBR_R11) | |
35 | ||
36 | #endif /* X86_64 */ | |
37 | ||
38 | #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG) | |
39 | ||
40 | #ifndef __ASSEMBLY__ | |
41 | ||
42 | #include <asm/desc_defs.h> | |
43 | #include <asm/kmap_types.h> | |
318f5a2a | 44 | #include <asm/pgtable_types.h> |
ac5672f8 JF |
45 | |
46 | struct page; | |
47 | struct thread_struct; | |
48 | struct desc_ptr; | |
49 | struct tss_struct; | |
50 | struct mm_struct; | |
51 | struct desc_struct; | |
52 | struct task_struct; | |
53 | struct cpumask; | |
54 | ||
55 | /* | |
56 | * Wrapper type for pointers to code which uses the non-standard | |
57 | * calling convention. See PV_CALL_SAVE_REGS_THUNK below. | |
58 | */ | |
59 | struct paravirt_callee_save { | |
60 | void *func; | |
61 | }; | |
62 | ||
63 | /* general info */ | |
64 | struct pv_info { | |
65 | unsigned int kernel_rpl; | |
66 | int shared_kernel_pmd; | |
318f5a2a AL |
67 | |
68 | #ifdef CONFIG_X86_64 | |
69 | u16 extra_user_64bit_cs; /* __USER_CS if none */ | |
70 | #endif | |
71 | ||
ac5672f8 JF |
72 | const char *name; |
73 | }; | |
74 | ||
75 | struct pv_init_ops { | |
76 | /* | |
77 | * Patch may replace one of the defined code sequences with | |
78 | * arbitrary code, subject to the same register constraints. | |
79 | * This generally means the code is not free to clobber any | |
80 | * registers other than EAX. The patch function should return | |
81 | * the number of bytes of code generated, as we nop pad the | |
82 | * rest in generic code. | |
83 | */ | |
84 | unsigned (*patch)(u8 type, u16 clobber, void *insnbuf, | |
85 | unsigned long addr, unsigned len); | |
ac5672f8 JF |
86 | }; |
87 | ||
88 | ||
89 | struct pv_lazy_ops { | |
90 | /* Set deferred update mode, used for batching operations. */ | |
91 | void (*enter)(void); | |
92 | void (*leave)(void); | |
511ba86e | 93 | void (*flush)(void); |
ac5672f8 JF |
94 | }; |
95 | ||
96 | struct pv_time_ops { | |
ac5672f8 | 97 | unsigned long long (*sched_clock)(void); |
3c404b57 | 98 | unsigned long long (*steal_clock)(int cpu); |
ac5672f8 JF |
99 | }; |
100 | ||
101 | struct pv_cpu_ops { | |
102 | /* hooks for various privileged instructions */ | |
103 | unsigned long (*get_debugreg)(int regno); | |
104 | void (*set_debugreg)(int regno, unsigned long value); | |
105 | ||
106 | void (*clts)(void); | |
107 | ||
108 | unsigned long (*read_cr0)(void); | |
109 | void (*write_cr0)(unsigned long); | |
110 | ||
111 | unsigned long (*read_cr4_safe)(void); | |
112 | unsigned long (*read_cr4)(void); | |
113 | void (*write_cr4)(unsigned long); | |
114 | ||
115 | #ifdef CONFIG_X86_64 | |
116 | unsigned long (*read_cr8)(void); | |
117 | void (*write_cr8)(unsigned long); | |
118 | #endif | |
119 | ||
120 | /* Segment descriptor handling */ | |
121 | void (*load_tr_desc)(void); | |
122 | void (*load_gdt)(const struct desc_ptr *); | |
123 | void (*load_idt)(const struct desc_ptr *); | |
357d1226 | 124 | /* store_gdt has been removed. */ |
ac5672f8 JF |
125 | void (*store_idt)(struct desc_ptr *); |
126 | void (*set_ldt)(const void *desc, unsigned entries); | |
127 | unsigned long (*store_tr)(void); | |
128 | void (*load_tls)(struct thread_struct *t, unsigned int cpu); | |
129 | #ifdef CONFIG_X86_64 | |
130 | void (*load_gs_index)(unsigned int idx); | |
131 | #endif | |
132 | void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum, | |
133 | const void *desc); | |
134 | void (*write_gdt_entry)(struct desc_struct *, | |
135 | int entrynum, const void *desc, int size); | |
136 | void (*write_idt_entry)(gate_desc *, | |
137 | int entrynum, const gate_desc *gate); | |
138 | void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries); | |
139 | void (*free_ldt)(struct desc_struct *ldt, unsigned entries); | |
140 | ||
141 | void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); | |
142 | ||
143 | void (*set_iopl_mask)(unsigned mask); | |
144 | ||
145 | void (*wbinvd)(void); | |
146 | void (*io_delay)(void); | |
147 | ||
148 | /* cpuid emulation, mostly so that caps bits can be disabled */ | |
149 | void (*cpuid)(unsigned int *eax, unsigned int *ebx, | |
150 | unsigned int *ecx, unsigned int *edx); | |
151 | ||
dd2f4a00 AL |
152 | /* Unsafe MSR operations. These will warn or panic on failure. */ |
153 | u64 (*read_msr)(unsigned int msr); | |
154 | void (*write_msr)(unsigned int msr, unsigned low, unsigned high); | |
155 | ||
156 | /* | |
157 | * Safe MSR operations. | |
158 | * read sets err to 0 or -EIO. write returns 0 or -EIO. | |
159 | */ | |
c2ee03b2 AL |
160 | u64 (*read_msr_safe)(unsigned int msr, int *err); |
161 | int (*write_msr_safe)(unsigned int msr, unsigned low, unsigned high); | |
ac5672f8 | 162 | |
ac5672f8 | 163 | u64 (*read_pmc)(int counter); |
ac5672f8 | 164 | |
ac5672f8 JF |
165 | /* |
166 | * Switch to usermode gs and return to 64-bit usermode using | |
167 | * sysret. Only used in 64-bit kernels to return to 64-bit | |
168 | * processes. Usermode register state, including %rsp, must | |
169 | * already be restored. | |
170 | */ | |
171 | void (*usergs_sysret64)(void); | |
172 | ||
ac5672f8 JF |
173 | /* Normal iret. Jump to this with the standard iret stack |
174 | frame set up. */ | |
175 | void (*iret)(void); | |
176 | ||
177 | void (*swapgs)(void); | |
178 | ||
179 | void (*start_context_switch)(struct task_struct *prev); | |
180 | void (*end_context_switch)(struct task_struct *next); | |
181 | }; | |
182 | ||
183 | struct pv_irq_ops { | |
ac5672f8 JF |
184 | /* |
185 | * Get/set interrupt state. save_fl and restore_fl are only | |
186 | * expected to use X86_EFLAGS_IF; all other bits | |
187 | * returned from save_fl are undefined, and may be ignored by | |
188 | * restore_fl. | |
189 | * | |
190 | * NOTE: These functions callers expect the callee to preserve | |
191 | * more registers than the standard C calling convention. | |
192 | */ | |
193 | struct paravirt_callee_save save_fl; | |
194 | struct paravirt_callee_save restore_fl; | |
195 | struct paravirt_callee_save irq_disable; | |
196 | struct paravirt_callee_save irq_enable; | |
197 | ||
198 | void (*safe_halt)(void); | |
199 | void (*halt)(void); | |
200 | ||
201 | #ifdef CONFIG_X86_64 | |
202 | void (*adjust_exception_frame)(void); | |
203 | #endif | |
204 | }; | |
205 | ||
ac5672f8 | 206 | struct pv_mmu_ops { |
ac5672f8 JF |
207 | unsigned long (*read_cr2)(void); |
208 | void (*write_cr2)(unsigned long); | |
209 | ||
210 | unsigned long (*read_cr3)(void); | |
211 | void (*write_cr3)(unsigned long); | |
212 | ||
213 | /* | |
214 | * Hooks for intercepting the creation/use/destruction of an | |
215 | * mm_struct. | |
216 | */ | |
217 | void (*activate_mm)(struct mm_struct *prev, | |
218 | struct mm_struct *next); | |
219 | void (*dup_mmap)(struct mm_struct *oldmm, | |
220 | struct mm_struct *mm); | |
221 | void (*exit_mmap)(struct mm_struct *mm); | |
222 | ||
223 | ||
224 | /* TLB operations */ | |
225 | void (*flush_tlb_user)(void); | |
226 | void (*flush_tlb_kernel)(void); | |
227 | void (*flush_tlb_single)(unsigned long addr); | |
228 | void (*flush_tlb_others)(const struct cpumask *cpus, | |
229 | struct mm_struct *mm, | |
e7b52ffd AS |
230 | unsigned long start, |
231 | unsigned long end); | |
ac5672f8 JF |
232 | |
233 | /* Hooks for allocating and freeing a pagetable top-level */ | |
234 | int (*pgd_alloc)(struct mm_struct *mm); | |
235 | void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd); | |
236 | ||
237 | /* | |
238 | * Hooks for allocating/releasing pagetable pages when they're | |
239 | * attached to a pagetable | |
240 | */ | |
241 | void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn); | |
242 | void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn); | |
ac5672f8 JF |
243 | void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn); |
244 | void (*release_pte)(unsigned long pfn); | |
245 | void (*release_pmd)(unsigned long pfn); | |
246 | void (*release_pud)(unsigned long pfn); | |
247 | ||
248 | /* Pagetable manipulation functions */ | |
249 | void (*set_pte)(pte_t *ptep, pte_t pteval); | |
250 | void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, | |
251 | pte_t *ptep, pte_t pteval); | |
252 | void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); | |
331127f7 AA |
253 | void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr, |
254 | pmd_t *pmdp, pmd_t pmdval); | |
ac5672f8 JF |
255 | void (*pte_update)(struct mm_struct *mm, unsigned long addr, |
256 | pte_t *ptep); | |
ac5672f8 JF |
257 | |
258 | pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, | |
259 | pte_t *ptep); | |
260 | void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr, | |
261 | pte_t *ptep, pte_t pte); | |
262 | ||
263 | struct paravirt_callee_save pte_val; | |
264 | struct paravirt_callee_save make_pte; | |
265 | ||
266 | struct paravirt_callee_save pgd_val; | |
267 | struct paravirt_callee_save make_pgd; | |
268 | ||
98233368 | 269 | #if CONFIG_PGTABLE_LEVELS >= 3 |
ac5672f8 JF |
270 | #ifdef CONFIG_X86_PAE |
271 | void (*set_pte_atomic)(pte_t *ptep, pte_t pteval); | |
272 | void (*pte_clear)(struct mm_struct *mm, unsigned long addr, | |
273 | pte_t *ptep); | |
274 | void (*pmd_clear)(pmd_t *pmdp); | |
275 | ||
276 | #endif /* CONFIG_X86_PAE */ | |
277 | ||
278 | void (*set_pud)(pud_t *pudp, pud_t pudval); | |
279 | ||
280 | struct paravirt_callee_save pmd_val; | |
281 | struct paravirt_callee_save make_pmd; | |
282 | ||
98233368 | 283 | #if CONFIG_PGTABLE_LEVELS == 4 |
ac5672f8 JF |
284 | struct paravirt_callee_save pud_val; |
285 | struct paravirt_callee_save make_pud; | |
286 | ||
287 | void (*set_pgd)(pgd_t *pudp, pgd_t pgdval); | |
98233368 KS |
288 | #endif /* CONFIG_PGTABLE_LEVELS == 4 */ |
289 | #endif /* CONFIG_PGTABLE_LEVELS >= 3 */ | |
ac5672f8 | 290 | |
ac5672f8 JF |
291 | struct pv_lazy_ops lazy_mode; |
292 | ||
293 | /* dom0 ops */ | |
294 | ||
295 | /* Sometimes the physical address is a pfn, and sometimes its | |
296 | an mfn. We can tell which is which from the index. */ | |
297 | void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, | |
298 | phys_addr_t phys, pgprot_t flags); | |
299 | }; | |
300 | ||
445c8951 | 301 | struct arch_spinlock; |
545ac138 JF |
302 | #ifdef CONFIG_SMP |
303 | #include <asm/spinlock_types.h> | |
304 | #else | |
305 | typedef u16 __ticket_t; | |
306 | #endif | |
307 | ||
f233f7f1 PZI |
308 | struct qspinlock; |
309 | ||
ac5672f8 | 310 | struct pv_lock_ops { |
62c7a1e9 | 311 | #ifdef CONFIG_QUEUED_SPINLOCKS |
f233f7f1 PZI |
312 | void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val); |
313 | struct paravirt_callee_save queued_spin_unlock; | |
314 | ||
315 | void (*wait)(u8 *ptr, u8 val); | |
316 | void (*kick)(int cpu); | |
62c7a1e9 | 317 | #else /* !CONFIG_QUEUED_SPINLOCKS */ |
354714dd | 318 | struct paravirt_callee_save lock_spinning; |
545ac138 | 319 | void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket); |
62c7a1e9 | 320 | #endif /* !CONFIG_QUEUED_SPINLOCKS */ |
ac5672f8 JF |
321 | }; |
322 | ||
323 | /* This contains all the paravirt structures: we get a convenient | |
324 | * number for each function using the offset which we use to indicate | |
325 | * what to patch. */ | |
326 | struct paravirt_patch_template { | |
327 | struct pv_init_ops pv_init_ops; | |
328 | struct pv_time_ops pv_time_ops; | |
329 | struct pv_cpu_ops pv_cpu_ops; | |
330 | struct pv_irq_ops pv_irq_ops; | |
ac5672f8 JF |
331 | struct pv_mmu_ops pv_mmu_ops; |
332 | struct pv_lock_ops pv_lock_ops; | |
333 | }; | |
334 | ||
335 | extern struct pv_info pv_info; | |
336 | extern struct pv_init_ops pv_init_ops; | |
337 | extern struct pv_time_ops pv_time_ops; | |
338 | extern struct pv_cpu_ops pv_cpu_ops; | |
339 | extern struct pv_irq_ops pv_irq_ops; | |
ac5672f8 JF |
340 | extern struct pv_mmu_ops pv_mmu_ops; |
341 | extern struct pv_lock_ops pv_lock_ops; | |
342 | ||
343 | #define PARAVIRT_PATCH(x) \ | |
344 | (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) | |
345 | ||
346 | #define paravirt_type(op) \ | |
347 | [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ | |
348 | [paravirt_opptr] "i" (&(op)) | |
349 | #define paravirt_clobber(clobber) \ | |
350 | [paravirt_clobber] "i" (clobber) | |
351 | ||
352 | /* | |
353 | * Generate some code, and mark it as patchable by the | |
354 | * apply_paravirt() alternate instruction patcher. | |
355 | */ | |
356 | #define _paravirt_alt(insn_string, type, clobber) \ | |
357 | "771:\n\t" insn_string "\n" "772:\n" \ | |
358 | ".pushsection .parainstructions,\"a\"\n" \ | |
359 | _ASM_ALIGN "\n" \ | |
360 | _ASM_PTR " 771b\n" \ | |
361 | " .byte " type "\n" \ | |
362 | " .byte 772b-771b\n" \ | |
363 | " .short " clobber "\n" \ | |
364 | ".popsection\n" | |
365 | ||
366 | /* Generate patchable code, with the default asm parameters. */ | |
367 | #define paravirt_alt(insn_string) \ | |
368 | _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]") | |
369 | ||
370 | /* Simple instruction patching code. */ | |
824a2870 AK |
371 | #define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" |
372 | ||
373 | #define DEF_NATIVE(ops, name, code) \ | |
374 | __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \ | |
375 | asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name)) | |
ac5672f8 | 376 | |
ac5672f8 JF |
377 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len); |
378 | unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len); | |
ac5672f8 JF |
379 | unsigned paravirt_patch_call(void *insnbuf, |
380 | const void *target, u16 tgt_clobbers, | |
381 | unsigned long addr, u16 site_clobbers, | |
382 | unsigned len); | |
383 | unsigned paravirt_patch_jmp(void *insnbuf, const void *target, | |
384 | unsigned long addr, unsigned len); | |
385 | unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | |
386 | unsigned long addr, unsigned len); | |
387 | ||
388 | unsigned paravirt_patch_insns(void *insnbuf, unsigned len, | |
389 | const char *start, const char *end); | |
390 | ||
391 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |
392 | unsigned long addr, unsigned len); | |
393 | ||
394 | int paravirt_disable_iospace(void); | |
395 | ||
396 | /* | |
397 | * This generates an indirect call based on the operation type number. | |
398 | * The type number, computed in PARAVIRT_PATCH, is derived from the | |
399 | * offset into the paravirt_patch_template structure, and can therefore be | |
400 | * freely converted back into a structure offset. | |
401 | */ | |
402 | #define PARAVIRT_CALL "call *%c[paravirt_opptr];" | |
403 | ||
404 | /* | |
405 | * These macros are intended to wrap calls through one of the paravirt | |
406 | * ops structs, so that they can be later identified and patched at | |
407 | * runtime. | |
408 | * | |
409 | * Normally, a call to a pv_op function is a simple indirect call: | |
410 | * (pv_op_struct.operations)(args...). | |
411 | * | |
412 | * Unfortunately, this is a relatively slow operation for modern CPUs, | |
413 | * because it cannot necessarily determine what the destination | |
414 | * address is. In this case, the address is a runtime constant, so at | |
415 | * the very least we can patch the call to e a simple direct call, or | |
416 | * ideally, patch an inline implementation into the callsite. (Direct | |
417 | * calls are essentially free, because the call and return addresses | |
418 | * are completely predictable.) | |
419 | * | |
420 | * For i386, these macros rely on the standard gcc "regparm(3)" calling | |
421 | * convention, in which the first three arguments are placed in %eax, | |
422 | * %edx, %ecx (in that order), and the remaining arguments are placed | |
423 | * on the stack. All caller-save registers (eax,edx,ecx) are expected | |
424 | * to be modified (either clobbered or used for return values). | |
425 | * X86_64, on the other hand, already specifies a register-based calling | |
426 | * conventions, returning at %rax, with parameteres going on %rdi, %rsi, | |
427 | * %rdx, and %rcx. Note that for this reason, x86_64 does not need any | |
428 | * special handling for dealing with 4 arguments, unlike i386. | |
429 | * However, x86_64 also have to clobber all caller saved registers, which | |
430 | * unfortunately, are quite a bit (r8 - r11) | |
431 | * | |
432 | * The call instruction itself is marked by placing its start address | |
433 | * and size into the .parainstructions section, so that | |
434 | * apply_paravirt() in arch/i386/kernel/alternative.c can do the | |
435 | * appropriate patching under the control of the backend pv_init_ops | |
436 | * implementation. | |
437 | * | |
438 | * Unfortunately there's no way to get gcc to generate the args setup | |
439 | * for the call, and then allow the call itself to be generated by an | |
440 | * inline asm. Because of this, we must do the complete arg setup and | |
441 | * return value handling from within these macros. This is fairly | |
442 | * cumbersome. | |
443 | * | |
444 | * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments. | |
445 | * It could be extended to more arguments, but there would be little | |
446 | * to be gained from that. For each number of arguments, there are | |
447 | * the two VCALL and CALL variants for void and non-void functions. | |
448 | * | |
449 | * When there is a return value, the invoker of the macro must specify | |
450 | * the return type. The macro then uses sizeof() on that type to | |
451 | * determine whether its a 32 or 64 bit value, and places the return | |
452 | * in the right register(s) (just %eax for 32-bit, and %edx:%eax for | |
453 | * 64-bit). For x86_64 machines, it just returns at %rax regardless of | |
454 | * the return value size. | |
455 | * | |
456 | * 64-bit arguments are passed as a pair of adjacent 32-bit arguments | |
457 | * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments | |
458 | * in low,high order | |
459 | * | |
460 | * Small structures are passed and returned in registers. The macro | |
461 | * calling convention can't directly deal with this, so the wrapper | |
462 | * functions must do this. | |
463 | * | |
464 | * These PVOP_* macros are only defined within this header. This | |
465 | * means that all uses must be wrapped in inline functions. This also | |
466 | * makes sure the incoming and outgoing types are always correct. | |
467 | */ | |
468 | #ifdef CONFIG_X86_32 | |
bb93eb4c JP |
469 | #define PVOP_VCALL_ARGS \ |
470 | unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx; \ | |
471 | register void *__sp asm("esp") | |
ac5672f8 JF |
472 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS |
473 | ||
474 | #define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x)) | |
475 | #define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x)) | |
476 | #define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x)) | |
477 | ||
478 | #define PVOP_VCALL_CLOBBERS "=a" (__eax), "=d" (__edx), \ | |
479 | "=c" (__ecx) | |
480 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS | |
481 | ||
482 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax), "=d" (__edx) | |
483 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS | |
484 | ||
485 | #define EXTRA_CLOBBERS | |
486 | #define VEXTRA_CLOBBERS | |
487 | #else /* CONFIG_X86_64 */ | |
71999d98 | 488 | /* [re]ax isn't an arg, but the return val */ |
bb93eb4c JP |
489 | #define PVOP_VCALL_ARGS \ |
490 | unsigned long __edi = __edi, __esi = __esi, \ | |
491 | __edx = __edx, __ecx = __ecx, __eax = __eax; \ | |
492 | register void *__sp asm("rsp") | |
71999d98 | 493 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS |
ac5672f8 JF |
494 | |
495 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) | |
496 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) | |
497 | #define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x)) | |
498 | #define PVOP_CALL_ARG4(x) "c" ((unsigned long)(x)) | |
499 | ||
500 | #define PVOP_VCALL_CLOBBERS "=D" (__edi), \ | |
501 | "=S" (__esi), "=d" (__edx), \ | |
502 | "=c" (__ecx) | |
503 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) | |
504 | ||
71999d98 | 505 | /* void functions are still allowed [re]ax for scratch */ |
ac5672f8 JF |
506 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) |
507 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS | |
508 | ||
509 | #define EXTRA_CLOBBERS , "r8", "r9", "r10", "r11" | |
510 | #define VEXTRA_CLOBBERS , "rax", "r8", "r9", "r10", "r11" | |
511 | #endif /* CONFIG_X86_32 */ | |
512 | ||
513 | #ifdef CONFIG_PARAVIRT_DEBUG | |
514 | #define PVOP_TEST_NULL(op) BUG_ON(op == NULL) | |
515 | #else | |
516 | #define PVOP_TEST_NULL(op) ((void)op) | |
517 | #endif | |
518 | ||
519 | #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \ | |
520 | pre, post, ...) \ | |
521 | ({ \ | |
522 | rettype __ret; \ | |
523 | PVOP_CALL_ARGS; \ | |
524 | PVOP_TEST_NULL(op); \ | |
525 | /* This is 32-bit specific, but is okay in 64-bit */ \ | |
526 | /* since this condition will never hold */ \ | |
527 | if (sizeof(rettype) > sizeof(unsigned long)) { \ | |
528 | asm volatile(pre \ | |
529 | paravirt_alt(PARAVIRT_CALL) \ | |
530 | post \ | |
bb93eb4c | 531 | : call_clbr, "+r" (__sp) \ |
ac5672f8 JF |
532 | : paravirt_type(op), \ |
533 | paravirt_clobber(clbr), \ | |
534 | ##__VA_ARGS__ \ | |
535 | : "memory", "cc" extra_clbr); \ | |
536 | __ret = (rettype)((((u64)__edx) << 32) | __eax); \ | |
537 | } else { \ | |
538 | asm volatile(pre \ | |
539 | paravirt_alt(PARAVIRT_CALL) \ | |
540 | post \ | |
bb93eb4c | 541 | : call_clbr, "+r" (__sp) \ |
ac5672f8 JF |
542 | : paravirt_type(op), \ |
543 | paravirt_clobber(clbr), \ | |
544 | ##__VA_ARGS__ \ | |
545 | : "memory", "cc" extra_clbr); \ | |
546 | __ret = (rettype)__eax; \ | |
547 | } \ | |
548 | __ret; \ | |
549 | }) | |
550 | ||
551 | #define __PVOP_CALL(rettype, op, pre, post, ...) \ | |
552 | ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \ | |
553 | EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__) | |
554 | ||
555 | #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \ | |
556 | ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ | |
557 | PVOP_CALLEE_CLOBBERS, , \ | |
558 | pre, post, ##__VA_ARGS__) | |
559 | ||
560 | ||
561 | #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \ | |
562 | ({ \ | |
563 | PVOP_VCALL_ARGS; \ | |
564 | PVOP_TEST_NULL(op); \ | |
565 | asm volatile(pre \ | |
566 | paravirt_alt(PARAVIRT_CALL) \ | |
567 | post \ | |
bb93eb4c | 568 | : call_clbr, "+r" (__sp) \ |
ac5672f8 JF |
569 | : paravirt_type(op), \ |
570 | paravirt_clobber(clbr), \ | |
571 | ##__VA_ARGS__ \ | |
572 | : "memory", "cc" extra_clbr); \ | |
573 | }) | |
574 | ||
575 | #define __PVOP_VCALL(op, pre, post, ...) \ | |
576 | ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \ | |
577 | VEXTRA_CLOBBERS, \ | |
578 | pre, post, ##__VA_ARGS__) | |
579 | ||
71999d98 JF |
580 | #define __PVOP_VCALLEESAVE(op, pre, post, ...) \ |
581 | ____PVOP_VCALL(op.func, CLBR_RET_REG, \ | |
ac5672f8 JF |
582 | PVOP_VCALLEE_CLOBBERS, , \ |
583 | pre, post, ##__VA_ARGS__) | |
584 | ||
585 | ||
586 | ||
587 | #define PVOP_CALL0(rettype, op) \ | |
588 | __PVOP_CALL(rettype, op, "", "") | |
589 | #define PVOP_VCALL0(op) \ | |
590 | __PVOP_VCALL(op, "", "") | |
591 | ||
592 | #define PVOP_CALLEE0(rettype, op) \ | |
593 | __PVOP_CALLEESAVE(rettype, op, "", "") | |
594 | #define PVOP_VCALLEE0(op) \ | |
595 | __PVOP_VCALLEESAVE(op, "", "") | |
596 | ||
597 | ||
598 | #define PVOP_CALL1(rettype, op, arg1) \ | |
599 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) | |
600 | #define PVOP_VCALL1(op, arg1) \ | |
601 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1)) | |
602 | ||
603 | #define PVOP_CALLEE1(rettype, op, arg1) \ | |
604 | __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1)) | |
605 | #define PVOP_VCALLEE1(op, arg1) \ | |
606 | __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1)) | |
607 | ||
608 | ||
609 | #define PVOP_CALL2(rettype, op, arg1, arg2) \ | |
610 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ | |
611 | PVOP_CALL_ARG2(arg2)) | |
612 | #define PVOP_VCALL2(op, arg1, arg2) \ | |
613 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ | |
614 | PVOP_CALL_ARG2(arg2)) | |
615 | ||
616 | #define PVOP_CALLEE2(rettype, op, arg1, arg2) \ | |
617 | __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ | |
618 | PVOP_CALL_ARG2(arg2)) | |
619 | #define PVOP_VCALLEE2(op, arg1, arg2) \ | |
620 | __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \ | |
621 | PVOP_CALL_ARG2(arg2)) | |
622 | ||
623 | ||
624 | #define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \ | |
625 | __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \ | |
626 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) | |
627 | #define PVOP_VCALL3(op, arg1, arg2, arg3) \ | |
628 | __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \ | |
629 | PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3)) | |
630 | ||
631 | /* This is the only difference in x86_64. We can make it much simpler */ | |
632 | #ifdef CONFIG_X86_32 | |
633 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | |
634 | __PVOP_CALL(rettype, op, \ | |
635 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | |
636 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ | |
637 | PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4))) | |
638 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | |
639 | __PVOP_VCALL(op, \ | |
640 | "push %[_arg4];", "lea 4(%%esp),%%esp;", \ | |
641 | "0" ((u32)(arg1)), "1" ((u32)(arg2)), \ | |
642 | "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4))) | |
643 | #else | |
644 | #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \ | |
645 | __PVOP_CALL(rettype, op, "", "", \ | |
646 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ | |
647 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) | |
648 | #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \ | |
649 | __PVOP_VCALL(op, "", "", \ | |
650 | PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \ | |
651 | PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4)) | |
652 | #endif | |
653 | ||
654 | /* Lazy mode for batching updates / context switch */ | |
655 | enum paravirt_lazy_mode { | |
656 | PARAVIRT_LAZY_NONE, | |
657 | PARAVIRT_LAZY_MMU, | |
658 | PARAVIRT_LAZY_CPU, | |
659 | }; | |
660 | ||
661 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void); | |
662 | void paravirt_start_context_switch(struct task_struct *prev); | |
663 | void paravirt_end_context_switch(struct task_struct *next); | |
664 | ||
665 | void paravirt_enter_lazy_mmu(void); | |
666 | void paravirt_leave_lazy_mmu(void); | |
511ba86e | 667 | void paravirt_flush_lazy_mmu(void); |
ac5672f8 JF |
668 | |
669 | void _paravirt_nop(void); | |
670 | u32 _paravirt_ident_32(u32); | |
671 | u64 _paravirt_ident_64(u64); | |
672 | ||
673 | #define paravirt_nop ((void *)_paravirt_nop) | |
674 | ||
675 | /* These all sit in the .parainstructions section to tell us what to patch. */ | |
676 | struct paravirt_patch_site { | |
677 | u8 *instr; /* original instructions */ | |
678 | u8 instrtype; /* type of this instruction */ | |
679 | u8 len; /* length of original instruction */ | |
680 | u16 clobbers; /* what registers you may clobber */ | |
681 | }; | |
682 | ||
683 | extern struct paravirt_patch_site __parainstructions[], | |
684 | __parainstructions_end[]; | |
685 | ||
686 | #endif /* __ASSEMBLY__ */ | |
687 | ||
688 | #endif /* _ASM_X86_PARAVIRT_TYPES_H */ |