Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_PARAVIRT_H
3#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
4/* Various instructions on x86 need to be replaced for
5 * para-virtualization: those hooks are defined here. */
b239fb25
JF
6
7#ifdef CONFIG_PARAVIRT
54321d94 8#include <asm/pgtable_types.h>
658be9d3 9#include <asm/asm.h>
3010a066 10#include <asm/nospec-branch.h>
d3561b7f 11
ac5672f8 12#include <asm/paravirt_types.h>
ecb93d1c 13
d3561b7f 14#ifndef __ASSEMBLY__
187f1882 15#include <linux/bug.h>
3dc494e8 16#include <linux/types.h>
d4c10477 17#include <linux/cpumask.h>
87b240cb 18#include <asm/frame.h>
1a45b7aa 19
fdc0269e
JG
20static inline unsigned long long paravirt_sched_clock(void)
21{
22 return PVOP_CALL0(unsigned long long, time.sched_clock);
23}
24
25struct static_key;
26extern struct static_key paravirt_steal_enabled;
27extern struct static_key paravirt_steal_rq_enabled;
28
89f579ce
YW
29__visible void __native_queued_spin_unlock(struct qspinlock *lock);
30bool pv_is_native_spin_unlock(void);
31__visible bool __native_vcpu_is_preempted(long cpu);
32bool pv_is_native_vcpu_is_preempted(void);
33
fdc0269e
JG
34static inline u64 paravirt_steal_clock(int cpu)
35{
36 return PVOP_CALL1(u64, time.steal_clock, cpu);
37}
38
39/* The paravirtualized I/O functions */
40static inline void slow_down_io(void)
41{
42 pv_ops.cpu.io_delay();
43#ifdef REALLY_SLOW_IO
44 pv_ops.cpu.io_delay();
45 pv_ops.cpu.io_delay();
46 pv_ops.cpu.io_delay();
47#endif
48}
49
50static inline void __flush_tlb(void)
51{
52 PVOP_VCALL0(mmu.flush_tlb_user);
53}
54
55static inline void __flush_tlb_global(void)
56{
57 PVOP_VCALL0(mmu.flush_tlb_kernel);
58}
59
60static inline void __flush_tlb_one_user(unsigned long addr)
61{
62 PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
63}
64
65static inline void flush_tlb_others(const struct cpumask *cpumask,
66 const struct flush_tlb_info *info)
67{
68 PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
69}
70
71static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
72{
73 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
74}
75
76static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
77{
78 PVOP_VCALL1(mmu.exit_mmap, mm);
79}
80
9bad5658 81#ifdef CONFIG_PARAVIRT_XXL
da51da18 82static inline void load_sp0(unsigned long sp0)
d3561b7f 83{
5c83511b 84 PVOP_VCALL1(cpu.load_sp0, sp0);
d3561b7f
RR
85}
86
d3561b7f
RR
87/* The paravirtualized CPUID instruction. */
88static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
89 unsigned int *ecx, unsigned int *edx)
90{
5c83511b 91 PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
92}
93
94/*
95 * These special macros can be used to get or set a debugging register
96 */
f8822f42
JF
97static inline unsigned long paravirt_get_debugreg(int reg)
98{
5c83511b 99 return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
f8822f42
JF
100}
101#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
102static inline void set_debugreg(unsigned long val, int reg)
103{
5c83511b 104 PVOP_VCALL2(cpu.set_debugreg, reg, val);
f8822f42 105}
d3561b7f 106
f8822f42
JF
107static inline unsigned long read_cr0(void)
108{
5c83511b 109 return PVOP_CALL0(unsigned long, cpu.read_cr0);
f8822f42 110}
d3561b7f 111
f8822f42
JF
112static inline void write_cr0(unsigned long x)
113{
5c83511b 114 PVOP_VCALL1(cpu.write_cr0, x);
f8822f42
JF
115}
116
117static inline unsigned long read_cr2(void)
118{
55aedddb 119 return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
f8822f42
JF
120}
121
122static inline void write_cr2(unsigned long x)
123{
5c83511b 124 PVOP_VCALL1(mmu.write_cr2, x);
f8822f42
JF
125}
126
6c690ee1 127static inline unsigned long __read_cr3(void)
f8822f42 128{
5c83511b 129 return PVOP_CALL0(unsigned long, mmu.read_cr3);
f8822f42 130}
d3561b7f 131
f8822f42
JF
132static inline void write_cr3(unsigned long x)
133{
5c83511b 134 PVOP_VCALL1(mmu.write_cr3, x);
f8822f42 135}
d3561b7f 136
1e02ce4c 137static inline void __write_cr4(unsigned long x)
f8822f42 138{
5c83511b 139 PVOP_VCALL1(cpu.write_cr4, x);
f8822f42 140}
3dc494e8 141
df9ee292 142static inline void arch_safe_halt(void)
d3561b7f 143{
5c83511b 144 PVOP_VCALL0(irq.safe_halt);
d3561b7f
RR
145}
146
147static inline void halt(void)
148{
5c83511b 149 PVOP_VCALL0(irq.halt);
f8822f42
JF
150}
151
152static inline void wbinvd(void)
153{
5c83511b 154 PVOP_VCALL0(cpu.wbinvd);
d3561b7f 155}
d3561b7f 156
93b1eab3 157#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 158
dd2f4a00
AL
159static inline u64 paravirt_read_msr(unsigned msr)
160{
5c83511b 161 return PVOP_CALL1(u64, cpu.read_msr, msr);
dd2f4a00
AL
162}
163
164static inline void paravirt_write_msr(unsigned msr,
165 unsigned low, unsigned high)
166{
5c83511b 167 PVOP_VCALL3(cpu.write_msr, msr, low, high);
dd2f4a00
AL
168}
169
c2ee03b2 170static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
f8822f42 171{
5c83511b 172 return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
f8822f42 173}
132ec92f 174
c2ee03b2
AL
175static inline int paravirt_write_msr_safe(unsigned msr,
176 unsigned low, unsigned high)
f8822f42 177{
5c83511b 178 return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
f8822f42
JF
179}
180
49cd740b
JP
181#define rdmsr(msr, val1, val2) \
182do { \
4985ce15 183 u64 _l = paravirt_read_msr(msr); \
f8822f42
JF
184 val1 = (u32)_l; \
185 val2 = _l >> 32; \
49cd740b 186} while (0)
d3561b7f 187
49cd740b
JP
188#define wrmsr(msr, val1, val2) \
189do { \
4985ce15 190 paravirt_write_msr(msr, val1, val2); \
49cd740b 191} while (0)
d3561b7f 192
49cd740b
JP
193#define rdmsrl(msr, val) \
194do { \
4985ce15 195 val = paravirt_read_msr(msr); \
49cd740b 196} while (0)
d3561b7f 197
47edb651
AL
198static inline void wrmsrl(unsigned msr, u64 val)
199{
200 wrmsr(msr, (u32)val, (u32)(val>>32));
201}
202
c2ee03b2 203#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
d3561b7f
RR
204
205/* rdmsr with exception handling */
c2ee03b2
AL
206#define rdmsr_safe(msr, a, b) \
207({ \
208 int _err; \
209 u64 _l = paravirt_read_msr_safe(msr, &_err); \
210 (*a) = (u32)_l; \
211 (*b) = _l >> 32; \
212 _err; \
49cd740b 213})
d3561b7f 214
1de87bd4
AK
215static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
216{
217 int err;
218
c2ee03b2 219 *p = paravirt_read_msr_safe(msr, &err);
1de87bd4
AK
220 return err;
221}
177fed1e 222
f8822f42
JF
223static inline unsigned long long paravirt_read_pmc(int counter)
224{
5c83511b 225 return PVOP_CALL1(u64, cpu.read_pmc, counter);
f8822f42 226}
d3561b7f 227
49cd740b
JP
228#define rdpmc(counter, low, high) \
229do { \
f8822f42
JF
230 u64 _l = paravirt_read_pmc(counter); \
231 low = (u32)_l; \
232 high = _l >> 32; \
49cd740b 233} while (0)
3dc494e8 234
1ff4d58a
AK
235#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
236
38ffbe66
JF
237static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
238{
5c83511b 239 PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
38ffbe66
JF
240}
241
242static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
243{
5c83511b 244 PVOP_VCALL2(cpu.free_ldt, ldt, entries);
38ffbe66
JF
245}
246
f8822f42
JF
247static inline void load_TR_desc(void)
248{
5c83511b 249 PVOP_VCALL0(cpu.load_tr_desc);
f8822f42 250}
6b68f01b 251static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 252{
5c83511b 253 PVOP_VCALL1(cpu.load_gdt, dtr);
f8822f42 254}
6b68f01b 255static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 256{
5c83511b 257 PVOP_VCALL1(cpu.load_idt, dtr);
f8822f42
JF
258}
259static inline void set_ldt(const void *addr, unsigned entries)
260{
5c83511b 261 PVOP_VCALL2(cpu.set_ldt, addr, entries);
f8822f42 262}
f8822f42
JF
263static inline unsigned long paravirt_store_tr(void)
264{
5c83511b 265 return PVOP_CALL0(unsigned long, cpu.store_tr);
f8822f42 266}
9bad5658 267
f8822f42
JF
268#define store_tr(tr) ((tr) = paravirt_store_tr())
269static inline void load_TLS(struct thread_struct *t, unsigned cpu)
270{
5c83511b 271 PVOP_VCALL2(cpu.load_tls, t, cpu);
f8822f42 272}
75b8bb3e 273
9f9d489a
JF
274#ifdef CONFIG_X86_64
275static inline void load_gs_index(unsigned int gs)
276{
5c83511b 277 PVOP_VCALL1(cpu.load_gs_index, gs);
9f9d489a
JF
278}
279#endif
280
75b8bb3e
GOC
281static inline void write_ldt_entry(struct desc_struct *dt, int entry,
282 const void *desc)
f8822f42 283{
5c83511b 284 PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
f8822f42 285}
014b15be
GOC
286
287static inline void write_gdt_entry(struct desc_struct *dt, int entry,
288 void *desc, int type)
f8822f42 289{
5c83511b 290 PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
f8822f42 291}
014b15be 292
8d947344 293static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 294{
5c83511b 295 PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
f8822f42
JF
296}
297static inline void set_iopl_mask(unsigned mask)
298{
5c83511b 299 PVOP_VCALL1(cpu.set_iopl_mask, mask);
f8822f42 300}
d3561b7f 301
d6dd61c8
JF
302static inline void paravirt_activate_mm(struct mm_struct *prev,
303 struct mm_struct *next)
304{
5c83511b 305 PVOP_VCALL2(mmu.activate_mm, prev, next);
d6dd61c8
JF
306}
307
a1ea1c03
DH
308static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
309 struct mm_struct *mm)
d6dd61c8 310{
5c83511b 311 PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
d6dd61c8
JF
312}
313
eba0045f
JF
314static inline int paravirt_pgd_alloc(struct mm_struct *mm)
315{
5c83511b 316 return PVOP_CALL1(int, mmu.pgd_alloc, mm);
eba0045f
JF
317}
318
319static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
320{
5c83511b 321 PVOP_VCALL2(mmu.pgd_free, mm, pgd);
eba0045f
JF
322}
323
f8639939 324static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 325{
5c83511b 326 PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
f8822f42 327}
f8639939 328static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 329{
5c83511b 330 PVOP_VCALL1(mmu.release_pte, pfn);
f8822f42 331}
c119ecce 332
f8639939 333static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 334{
5c83511b 335 PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
f8822f42 336}
c119ecce 337
f8639939 338static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 339{
5c83511b 340 PVOP_VCALL1(mmu.release_pmd, pfn);
da181a8b
RR
341}
342
f8639939 343static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09 344{
5c83511b 345 PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
2761fa09 346}
f8639939 347static inline void paravirt_release_pud(unsigned long pfn)
2761fa09 348{
5c83511b 349 PVOP_VCALL1(mmu.release_pud, pfn);
2761fa09
JF
350}
351
335437fb
KS
352static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
353{
5c83511b 354 PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
335437fb
KS
355}
356
357static inline void paravirt_release_p4d(unsigned long pfn)
358{
5c83511b 359 PVOP_VCALL1(mmu.release_p4d, pfn);
335437fb
KS
360}
361
773221f4 362static inline pte_t __pte(pteval_t val)
da181a8b 363{
773221f4
JF
364 pteval_t ret;
365
366 if (sizeof(pteval_t) > sizeof(long))
5c83511b 367 ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
773221f4 368 else
5c83511b 369 ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
773221f4 370
c8e5393a 371 return (pte_t) { .pte = ret };
da181a8b
RR
372}
373
773221f4
JF
374static inline pteval_t pte_val(pte_t pte)
375{
376 pteval_t ret;
377
378 if (sizeof(pteval_t) > sizeof(long))
5c83511b 379 ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
da5de7c2 380 pte.pte, (u64)pte.pte >> 32);
773221f4 381 else
5c83511b 382 ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
773221f4
JF
383
384 return ret;
385}
386
ef38503e 387static inline pgd_t __pgd(pgdval_t val)
da181a8b 388{
ef38503e
JF
389 pgdval_t ret;
390
391 if (sizeof(pgdval_t) > sizeof(long))
5c83511b 392 ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
ef38503e 393 else
5c83511b 394 ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
ef38503e
JF
395
396 return (pgd_t) { ret };
397}
398
399static inline pgdval_t pgd_val(pgd_t pgd)
400{
401 pgdval_t ret;
402
403 if (sizeof(pgdval_t) > sizeof(long))
5c83511b 404 ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
da5de7c2 405 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 406 else
5c83511b 407 ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
ef38503e
JF
408
409 return ret;
f8822f42
JF
410}
411
08b882c6 412#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
0cbe3e26 413static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
08b882c6
JF
414 pte_t *ptep)
415{
416 pteval_t ret;
417
0cbe3e26 418 ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
08b882c6
JF
419
420 return (pte_t) { .pte = ret };
421}
422
0cbe3e26 423static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
04a86453 424 pte_t *ptep, pte_t old_pte, pte_t pte)
08b882c6 425{
0cbe3e26 426
08b882c6
JF
427 if (sizeof(pteval_t) > sizeof(long))
428 /* 5 arg words */
0cbe3e26 429 pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
08b882c6 430 else
5c83511b 431 PVOP_VCALL4(mmu.ptep_modify_prot_commit,
0cbe3e26 432 vma, addr, ptep, pte.pte);
08b882c6
JF
433}
434
4eed80cd
JF
435static inline void set_pte(pte_t *ptep, pte_t pte)
436{
437 if (sizeof(pteval_t) > sizeof(long))
5c83511b 438 PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
4eed80cd 439 else
5c83511b 440 PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
4eed80cd
JF
441}
442
443static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
444 pte_t *ptep, pte_t pte)
445{
446 if (sizeof(pteval_t) > sizeof(long))
447 /* 5 arg words */
5c83511b 448 pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
4eed80cd 449 else
5c83511b 450 PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
4eed80cd
JF
451}
452
60b3f626
JF
453static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
454{
455 pmdval_t val = native_pmd_val(pmd);
456
457 if (sizeof(pmdval_t) > sizeof(long))
5c83511b 458 PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
60b3f626 459 else
5c83511b 460 PVOP_VCALL2(mmu.set_pmd, pmdp, val);
60b3f626
JF
461}
462
98233368 463#if CONFIG_PGTABLE_LEVELS >= 3
1fe91514
GOC
464static inline pmd_t __pmd(pmdval_t val)
465{
466 pmdval_t ret;
467
468 if (sizeof(pmdval_t) > sizeof(long))
5c83511b 469 ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
1fe91514 470 else
5c83511b 471 ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
1fe91514
GOC
472
473 return (pmd_t) { ret };
474}
475
476static inline pmdval_t pmd_val(pmd_t pmd)
477{
478 pmdval_t ret;
479
480 if (sizeof(pmdval_t) > sizeof(long))
5c83511b 481 ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
da5de7c2 482 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 483 else
5c83511b 484 ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
1fe91514
GOC
485
486 return ret;
487}
488
489static inline void set_pud(pud_t *pudp, pud_t pud)
490{
491 pudval_t val = native_pud_val(pud);
492
493 if (sizeof(pudval_t) > sizeof(long))
5c83511b 494 PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
1fe91514 495 else
5c83511b 496 PVOP_VCALL2(mmu.set_pud, pudp, val);
1fe91514 497}
f2a6a705 498#if CONFIG_PGTABLE_LEVELS >= 4
9042219c
EH
499static inline pud_t __pud(pudval_t val)
500{
501 pudval_t ret;
502
495310e4 503 ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
9042219c
EH
504
505 return (pud_t) { ret };
506}
507
508static inline pudval_t pud_val(pud_t pud)
509{
495310e4 510 return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
9042219c
EH
511}
512
f2a6a705
KS
513static inline void pud_clear(pud_t *pudp)
514{
515 set_pud(pudp, __pud(0));
516}
517
518static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
519{
520 p4dval_t val = native_p4d_val(p4d);
521
495310e4 522 PVOP_VCALL2(mmu.set_p4d, p4dp, val);
f2a6a705
KS
523}
524
335437fb
KS
525#if CONFIG_PGTABLE_LEVELS >= 5
526
527static inline p4d_t __p4d(p4dval_t val)
f2a6a705 528{
5c83511b 529 p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
f2a6a705 530
335437fb
KS
531 return (p4d_t) { ret };
532}
f2a6a705 533
335437fb
KS
534static inline p4dval_t p4d_val(p4d_t p4d)
535{
5c83511b 536 return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
335437fb 537}
f2a6a705 538
92e1c5b3 539static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
9042219c 540{
5c83511b 541 PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
9042219c
EH
542}
543
92e1c5b3 544#define set_pgd(pgdp, pgdval) do { \
ed7588d5 545 if (pgtable_l5_enabled()) \
92e1c5b3
KS
546 __set_pgd(pgdp, pgdval); \
547 else \
548 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \
549} while (0)
550
551#define pgd_clear(pgdp) do { \
ed7588d5 552 if (pgtable_l5_enabled()) \
92e1c5b3
KS
553 set_pgd(pgdp, __pgd(0)); \
554} while (0)
9042219c 555
f2a6a705 556#endif /* CONFIG_PGTABLE_LEVELS == 5 */
9042219c 557
335437fb
KS
558static inline void p4d_clear(p4d_t *p4dp)
559{
560 set_p4d(p4dp, __p4d(0));
561}
562
98233368 563#endif /* CONFIG_PGTABLE_LEVELS == 4 */
9042219c 564
98233368 565#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
1fe91514 566
4eed80cd
JF
567#ifdef CONFIG_X86_PAE
568/* Special-case pte-setting operations for PAE, which can't update a
569 64-bit pte atomically */
570static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
571{
5c83511b 572 PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
4eed80cd
JF
573}
574
4eed80cd
JF
575static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
576 pte_t *ptep)
577{
5c83511b 578 PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
4eed80cd 579}
60b3f626
JF
580
581static inline void pmd_clear(pmd_t *pmdp)
582{
5c83511b 583 PVOP_VCALL1(mmu.pmd_clear, pmdp);
60b3f626 584}
4eed80cd
JF
585#else /* !CONFIG_X86_PAE */
586static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
587{
588 set_pte(ptep, pte);
589}
590
4eed80cd
JF
591static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
592 pte_t *ptep)
593{
594 set_pte_at(mm, addr, ptep, __pte(0));
595}
60b3f626
JF
596
597static inline void pmd_clear(pmd_t *pmdp)
598{
599 set_pmd(pmdp, __pmd(0));
600}
4eed80cd
JF
601#endif /* CONFIG_X86_PAE */
602
7fd7d83d 603#define __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 604static inline void arch_start_context_switch(struct task_struct *prev)
f8822f42 605{
5c83511b 606 PVOP_VCALL1(cpu.start_context_switch, prev);
f8822f42
JF
607}
608
224101ed 609static inline void arch_end_context_switch(struct task_struct *next)
f8822f42 610{
5c83511b 611 PVOP_VCALL1(cpu.end_context_switch, next);
f8822f42
JF
612}
613
9226d125 614#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
615static inline void arch_enter_lazy_mmu_mode(void)
616{
5c83511b 617 PVOP_VCALL0(mmu.lazy_mode.enter);
f8822f42
JF
618}
619
620static inline void arch_leave_lazy_mmu_mode(void)
621{
5c83511b 622 PVOP_VCALL0(mmu.lazy_mode.leave);
f8822f42
JF
623}
624
511ba86e
BO
625static inline void arch_flush_lazy_mmu_mode(void)
626{
5c83511b 627 PVOP_VCALL0(mmu.lazy_mode.flush);
511ba86e 628}
9226d125 629
aeaaa59c 630static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
3b3809ac 631 phys_addr_t phys, pgprot_t flags)
aeaaa59c 632{
5c83511b 633 pv_ops.mmu.set_fixmap(idx, phys, flags);
aeaaa59c 634}
fdc0269e 635#endif
aeaaa59c 636
b4ecc126 637#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
4bb689ee 638
f233f7f1
PZI
639static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
640 u32 val)
641{
5c83511b 642 PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
f233f7f1
PZI
643}
644
645static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
646{
5c83511b 647 PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
f233f7f1
PZI
648}
649
650static __always_inline void pv_wait(u8 *ptr, u8 val)
651{
5c83511b 652 PVOP_VCALL2(lock.wait, ptr, val);
f233f7f1
PZI
653}
654
655static __always_inline void pv_kick(int cpu)
656{
5c83511b 657 PVOP_VCALL1(lock.kick, cpu);
f233f7f1
PZI
658}
659
6c62985d 660static __always_inline bool pv_vcpu_is_preempted(long cpu)
3cded417 661{
5c83511b 662 return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
3cded417
PZ
663}
664
5c83511b
JG
665void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
666bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
667
f233f7f1 668#endif /* SMP && PARAVIRT_SPINLOCKS */
4bb689ee 669
2e47d3e6 670#ifdef CONFIG_X86_32
ecb93d1c
JF
671#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
672#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
673
674/* save and restore all caller-save registers, except return value */
e584f559
JF
675#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
676#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
ecb93d1c 677
2e47d3e6
GOC
678#define PV_FLAGS_ARG "0"
679#define PV_EXTRA_CLOBBERS
680#define PV_VEXTRA_CLOBBERS
681#else
ecb93d1c
JF
682/* save and restore all caller-save registers, except return value */
683#define PV_SAVE_ALL_CALLER_REGS \
684 "push %rcx;" \
685 "push %rdx;" \
686 "push %rsi;" \
687 "push %rdi;" \
688 "push %r8;" \
689 "push %r9;" \
690 "push %r10;" \
691 "push %r11;"
692#define PV_RESTORE_ALL_CALLER_REGS \
693 "pop %r11;" \
694 "pop %r10;" \
695 "pop %r9;" \
696 "pop %r8;" \
697 "pop %rdi;" \
698 "pop %rsi;" \
699 "pop %rdx;" \
700 "pop %rcx;"
701
2e47d3e6
GOC
702/* We save some registers, but all of them, that's too much. We clobber all
703 * caller saved registers but the argument parameter */
704#define PV_SAVE_REGS "pushq %%rdi;"
705#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
706#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
707#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
708#define PV_FLAGS_ARG "D"
709#endif
710
ecb93d1c
JF
711/*
712 * Generate a thunk around a function which saves all caller-save
713 * registers except for the return value. This allows C functions to
714 * be called from assembler code where fewer than normal registers are
715 * available. It may also help code generation around calls from C
716 * code if the common case doesn't use many registers.
717 *
718 * When a callee is wrapped in a thunk, the caller can assume that all
719 * arg regs and all scratch registers are preserved across the
720 * call. The return value in rax/eax will not be saved, even for void
721 * functions.
722 */
87b240cb 723#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
ecb93d1c
JF
724#define PV_CALLEE_SAVE_REGS_THUNK(func) \
725 extern typeof(func) __raw_callee_save_##func; \
ecb93d1c
JF
726 \
727 asm(".pushsection .text;" \
87b240cb
JP
728 ".globl " PV_THUNK_NAME(func) ";" \
729 ".type " PV_THUNK_NAME(func) ", @function;" \
730 PV_THUNK_NAME(func) ":" \
731 FRAME_BEGIN \
ecb93d1c
JF
732 PV_SAVE_ALL_CALLER_REGS \
733 "call " #func ";" \
734 PV_RESTORE_ALL_CALLER_REGS \
87b240cb 735 FRAME_END \
ecb93d1c 736 "ret;" \
083db676 737 ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
ecb93d1c
JF
738 ".popsection")
739
740/* Get a reference to a callee-save function */
741#define PV_CALLEE_SAVE(func) \
742 ((struct paravirt_callee_save) { __raw_callee_save_##func })
743
744/* Promise that "func" already uses the right calling convention */
745#define __PV_IS_CALLEE_SAVE(func) \
746 ((struct paravirt_callee_save) { func })
747
6da63eb2 748#ifdef CONFIG_PARAVIRT_XXL
b5908548 749static inline notrace unsigned long arch_local_save_flags(void)
139ec7c4 750{
5c83511b 751 return PVOP_CALLEE0(unsigned long, irq.save_fl);
139ec7c4
RR
752}
753
b5908548 754static inline notrace void arch_local_irq_restore(unsigned long f)
139ec7c4 755{
5c83511b 756 PVOP_VCALLEE1(irq.restore_fl, f);
139ec7c4
RR
757}
758
b5908548 759static inline notrace void arch_local_irq_disable(void)
139ec7c4 760{
5c83511b 761 PVOP_VCALLEE0(irq.irq_disable);
139ec7c4
RR
762}
763
b5908548 764static inline notrace void arch_local_irq_enable(void)
139ec7c4 765{
5c83511b 766 PVOP_VCALLEE0(irq.irq_enable);
139ec7c4
RR
767}
768
b5908548 769static inline notrace unsigned long arch_local_irq_save(void)
139ec7c4
RR
770{
771 unsigned long f;
772
df9ee292
DH
773 f = arch_local_save_flags();
774 arch_local_irq_disable();
139ec7c4
RR
775 return f;
776}
6da63eb2 777#endif
139ec7c4 778
74d4affd 779
294688c0 780/* Make sure as little as possible of this mess escapes. */
d5822035 781#undef PARAVIRT_CALL
1a45b7aa
JF
782#undef __PVOP_CALL
783#undef __PVOP_VCALL
f8822f42
JF
784#undef PVOP_VCALL0
785#undef PVOP_CALL0
786#undef PVOP_VCALL1
787#undef PVOP_CALL1
788#undef PVOP_VCALL2
789#undef PVOP_CALL2
790#undef PVOP_VCALL3
791#undef PVOP_CALL3
792#undef PVOP_VCALL4
793#undef PVOP_CALL4
139ec7c4 794
6f30c1ac
TG
795extern void default_banner(void);
796
d3561b7f
RR
797#else /* __ASSEMBLY__ */
798
27876f38 799#define _PVSITE(ptype, ops, word, algn) \
139ec7c4
RR
800771:; \
801 ops; \
802772:; \
803 .pushsection .parainstructions,"a"; \
658be9d3
GOC
804 .align algn; \
805 word 771b; \
139ec7c4
RR
806 .byte ptype; \
807 .byte 772b-771b; \
139ec7c4
RR
808 .popsection
809
658be9d3 810
9104a18d 811#define COND_PUSH(set, mask, reg) \
ecb93d1c 812 .if ((~(set)) & mask); push %reg; .endif
9104a18d 813#define COND_POP(set, mask, reg) \
ecb93d1c 814 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 815
658be9d3 816#ifdef CONFIG_X86_64
9104a18d
JF
817
818#define PV_SAVE_REGS(set) \
819 COND_PUSH(set, CLBR_RAX, rax); \
820 COND_PUSH(set, CLBR_RCX, rcx); \
821 COND_PUSH(set, CLBR_RDX, rdx); \
822 COND_PUSH(set, CLBR_RSI, rsi); \
823 COND_PUSH(set, CLBR_RDI, rdi); \
824 COND_PUSH(set, CLBR_R8, r8); \
825 COND_PUSH(set, CLBR_R9, r9); \
826 COND_PUSH(set, CLBR_R10, r10); \
827 COND_PUSH(set, CLBR_R11, r11)
828#define PV_RESTORE_REGS(set) \
829 COND_POP(set, CLBR_R11, r11); \
830 COND_POP(set, CLBR_R10, r10); \
831 COND_POP(set, CLBR_R9, r9); \
832 COND_POP(set, CLBR_R8, r8); \
833 COND_POP(set, CLBR_RDI, rdi); \
834 COND_POP(set, CLBR_RSI, rsi); \
835 COND_POP(set, CLBR_RDX, rdx); \
836 COND_POP(set, CLBR_RCX, rcx); \
837 COND_POP(set, CLBR_RAX, rax)
838
5c83511b 839#define PARA_PATCH(off) ((off) / 8)
27876f38 840#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
491eccb7 841#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 842#else
9104a18d
JF
843#define PV_SAVE_REGS(set) \
844 COND_PUSH(set, CLBR_EAX, eax); \
845 COND_PUSH(set, CLBR_EDI, edi); \
846 COND_PUSH(set, CLBR_ECX, ecx); \
847 COND_PUSH(set, CLBR_EDX, edx)
848#define PV_RESTORE_REGS(set) \
849 COND_POP(set, CLBR_EDX, edx); \
850 COND_POP(set, CLBR_ECX, ecx); \
851 COND_POP(set, CLBR_EDI, edi); \
852 COND_POP(set, CLBR_EAX, eax)
853
5c83511b 854#define PARA_PATCH(off) ((off) / 4)
27876f38 855#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4)
491eccb7 856#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
857#endif
858
9bad5658 859#ifdef CONFIG_PARAVIRT_XXL
93b1eab3 860#define INTERRUPT_RETURN \
5c83511b 861 PARA_SITE(PARA_PATCH(PV_CPU_iret), \
27876f38 862 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 863 jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
d5822035
JF
864
865#define DISABLE_INTERRUPTS(clobbers) \
5c83511b 866 PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
ecb93d1c 867 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
27876f38 868 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 869 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \
ecb93d1c 870 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
871
872#define ENABLE_INTERRUPTS(clobbers) \
5c83511b 873 PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \
ecb93d1c 874 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
27876f38 875 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 876 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \
ecb93d1c 877 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
6da63eb2 878#endif
d5822035 879
5def7a4c 880#ifdef CONFIG_X86_64
9bad5658 881#ifdef CONFIG_PARAVIRT_XXL
a00394f8
JF
882/*
883 * If swapgs is used while the userspace stack is still current,
884 * there's no way to call a pvop. The PV replacement *must* be
885 * inlined, or the swapgs instruction must be trapped and emulated.
886 */
887#define SWAPGS_UNSAFE_STACK \
5c83511b 888 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
a00394f8 889
9104a18d
JF
890/*
891 * Note: swapgs is very special, and in practise is either going to be
892 * implemented with a single "swapgs" instruction or something very
893 * special. Either way, we don't need to save any registers for
894 * it.
895 */
e801f864 896#define SWAPGS \
5c83511b 897 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \
27876f38 898 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 899 call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \
e801f864 900 )
4a8c4c4e 901
2be29982 902#define USERGS_SYSRET64 \
5c83511b 903 PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
27876f38 904 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 905 jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
e17f8234
BO
906
907#ifdef CONFIG_DEBUG_ENTRY
908#define SAVE_FLAGS(clobbers) \
5c83511b 909 PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
e17f8234 910 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
27876f38 911 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 912 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
e17f8234
BO
913 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
914#endif
55aedddb
PZ
915#endif /* CONFIG_PARAVIRT_XXL */
916#endif /* CONFIG_X86_64 */
917
918#ifdef CONFIG_PARAVIRT_XXL
919
920#define GET_CR2_INTO_AX \
921 PARA_SITE(PARA_PATCH(PV_MMU_read_cr2), \
922 ANNOTATE_RETPOLINE_SAFE; \
923 call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); \
924 )
925
926#endif /* CONFIG_PARAVIRT_XXL */
e17f8234 927
139ec7c4 928
d3561b7f 929#endif /* __ASSEMBLY__ */
6f30c1ac
TG
930#else /* CONFIG_PARAVIRT */
931# define default_banner x86_init_noop
fdc0269e
JG
932#endif /* !CONFIG_PARAVIRT */
933
a1ea1c03 934#ifndef __ASSEMBLY__
fdc0269e 935#ifndef CONFIG_PARAVIRT_XXL
a1ea1c03
DH
936static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
937 struct mm_struct *mm)
938{
939}
fdc0269e 940#endif
a1ea1c03 941
fdc0269e 942#ifndef CONFIG_PARAVIRT
a1ea1c03
DH
943static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
944{
945}
fdc0269e 946#endif
a1ea1c03 947#endif /* __ASSEMBLY__ */
1965aae3 948#endif /* _ASM_X86_PARAVIRT_H */