Merge branch 'work.dotdot1' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-block.git] / arch / x86 / include / asm / paravirt.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_PARAVIRT_H
3#define _ASM_X86_PARAVIRT_H
d3561b7f
RR
4/* Various instructions on x86 need to be replaced for
5 * para-virtualization: those hooks are defined here. */
b239fb25
JF
6
7#ifdef CONFIG_PARAVIRT
54321d94 8#include <asm/pgtable_types.h>
658be9d3 9#include <asm/asm.h>
3010a066 10#include <asm/nospec-branch.h>
d3561b7f 11
ac5672f8 12#include <asm/paravirt_types.h>
ecb93d1c 13
d3561b7f 14#ifndef __ASSEMBLY__
187f1882 15#include <linux/bug.h>
3dc494e8 16#include <linux/types.h>
d4c10477 17#include <linux/cpumask.h>
87b240cb 18#include <asm/frame.h>
1a45b7aa 19
fdc0269e
JG
20static inline unsigned long long paravirt_sched_clock(void)
21{
22 return PVOP_CALL0(unsigned long long, time.sched_clock);
23}
24
25struct static_key;
26extern struct static_key paravirt_steal_enabled;
27extern struct static_key paravirt_steal_rq_enabled;
28
89f579ce
YW
29__visible void __native_queued_spin_unlock(struct qspinlock *lock);
30bool pv_is_native_spin_unlock(void);
31__visible bool __native_vcpu_is_preempted(long cpu);
32bool pv_is_native_vcpu_is_preempted(void);
33
fdc0269e
JG
34static inline u64 paravirt_steal_clock(int cpu)
35{
36 return PVOP_CALL1(u64, time.steal_clock, cpu);
37}
38
39/* The paravirtualized I/O functions */
40static inline void slow_down_io(void)
41{
42 pv_ops.cpu.io_delay();
43#ifdef REALLY_SLOW_IO
44 pv_ops.cpu.io_delay();
45 pv_ops.cpu.io_delay();
46 pv_ops.cpu.io_delay();
47#endif
48}
49
50static inline void __flush_tlb(void)
51{
52 PVOP_VCALL0(mmu.flush_tlb_user);
53}
54
55static inline void __flush_tlb_global(void)
56{
57 PVOP_VCALL0(mmu.flush_tlb_kernel);
58}
59
60static inline void __flush_tlb_one_user(unsigned long addr)
61{
62 PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
63}
64
65static inline void flush_tlb_others(const struct cpumask *cpumask,
66 const struct flush_tlb_info *info)
67{
68 PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
69}
70
71static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
72{
73 PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
74}
75
76static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
77{
78 PVOP_VCALL1(mmu.exit_mmap, mm);
79}
80
9bad5658 81#ifdef CONFIG_PARAVIRT_XXL
da51da18 82static inline void load_sp0(unsigned long sp0)
d3561b7f 83{
5c83511b 84 PVOP_VCALL1(cpu.load_sp0, sp0);
d3561b7f
RR
85}
86
d3561b7f
RR
87/* The paravirtualized CPUID instruction. */
88static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
89 unsigned int *ecx, unsigned int *edx)
90{
5c83511b 91 PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
d3561b7f
RR
92}
93
94/*
95 * These special macros can be used to get or set a debugging register
96 */
f8822f42
JF
97static inline unsigned long paravirt_get_debugreg(int reg)
98{
5c83511b 99 return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
f8822f42
JF
100}
101#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
102static inline void set_debugreg(unsigned long val, int reg)
103{
5c83511b 104 PVOP_VCALL2(cpu.set_debugreg, reg, val);
f8822f42 105}
d3561b7f 106
f8822f42
JF
107static inline unsigned long read_cr0(void)
108{
5c83511b 109 return PVOP_CALL0(unsigned long, cpu.read_cr0);
f8822f42 110}
d3561b7f 111
f8822f42
JF
112static inline void write_cr0(unsigned long x)
113{
5c83511b 114 PVOP_VCALL1(cpu.write_cr0, x);
f8822f42
JF
115}
116
117static inline unsigned long read_cr2(void)
118{
55aedddb 119 return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
f8822f42
JF
120}
121
122static inline void write_cr2(unsigned long x)
123{
5c83511b 124 PVOP_VCALL1(mmu.write_cr2, x);
f8822f42
JF
125}
126
6c690ee1 127static inline unsigned long __read_cr3(void)
f8822f42 128{
5c83511b 129 return PVOP_CALL0(unsigned long, mmu.read_cr3);
f8822f42 130}
d3561b7f 131
f8822f42
JF
132static inline void write_cr3(unsigned long x)
133{
5c83511b 134 PVOP_VCALL1(mmu.write_cr3, x);
f8822f42 135}
d3561b7f 136
1e02ce4c 137static inline void __write_cr4(unsigned long x)
f8822f42 138{
5c83511b 139 PVOP_VCALL1(cpu.write_cr4, x);
f8822f42 140}
3dc494e8 141
df9ee292 142static inline void arch_safe_halt(void)
d3561b7f 143{
5c83511b 144 PVOP_VCALL0(irq.safe_halt);
d3561b7f
RR
145}
146
147static inline void halt(void)
148{
5c83511b 149 PVOP_VCALL0(irq.halt);
f8822f42
JF
150}
151
152static inline void wbinvd(void)
153{
5c83511b 154 PVOP_VCALL0(cpu.wbinvd);
d3561b7f 155}
d3561b7f 156
93b1eab3 157#define get_kernel_rpl() (pv_info.kernel_rpl)
d3561b7f 158
dd2f4a00
AL
159static inline u64 paravirt_read_msr(unsigned msr)
160{
5c83511b 161 return PVOP_CALL1(u64, cpu.read_msr, msr);
dd2f4a00
AL
162}
163
164static inline void paravirt_write_msr(unsigned msr,
165 unsigned low, unsigned high)
166{
5c83511b 167 PVOP_VCALL3(cpu.write_msr, msr, low, high);
dd2f4a00
AL
168}
169
c2ee03b2 170static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
f8822f42 171{
5c83511b 172 return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
f8822f42 173}
132ec92f 174
c2ee03b2
AL
175static inline int paravirt_write_msr_safe(unsigned msr,
176 unsigned low, unsigned high)
f8822f42 177{
5c83511b 178 return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
f8822f42
JF
179}
180
49cd740b
JP
181#define rdmsr(msr, val1, val2) \
182do { \
4985ce15 183 u64 _l = paravirt_read_msr(msr); \
f8822f42
JF
184 val1 = (u32)_l; \
185 val2 = _l >> 32; \
49cd740b 186} while (0)
d3561b7f 187
49cd740b
JP
188#define wrmsr(msr, val1, val2) \
189do { \
4985ce15 190 paravirt_write_msr(msr, val1, val2); \
49cd740b 191} while (0)
d3561b7f 192
49cd740b
JP
193#define rdmsrl(msr, val) \
194do { \
4985ce15 195 val = paravirt_read_msr(msr); \
49cd740b 196} while (0)
d3561b7f 197
47edb651
AL
198static inline void wrmsrl(unsigned msr, u64 val)
199{
200 wrmsr(msr, (u32)val, (u32)(val>>32));
201}
202
c2ee03b2 203#define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
d3561b7f
RR
204
205/* rdmsr with exception handling */
c2ee03b2
AL
206#define rdmsr_safe(msr, a, b) \
207({ \
208 int _err; \
209 u64 _l = paravirt_read_msr_safe(msr, &_err); \
210 (*a) = (u32)_l; \
211 (*b) = _l >> 32; \
212 _err; \
49cd740b 213})
d3561b7f 214
1de87bd4
AK
215static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
216{
217 int err;
218
c2ee03b2 219 *p = paravirt_read_msr_safe(msr, &err);
1de87bd4
AK
220 return err;
221}
177fed1e 222
f8822f42
JF
223static inline unsigned long long paravirt_read_pmc(int counter)
224{
5c83511b 225 return PVOP_CALL1(u64, cpu.read_pmc, counter);
f8822f42 226}
d3561b7f 227
49cd740b
JP
228#define rdpmc(counter, low, high) \
229do { \
f8822f42
JF
230 u64 _l = paravirt_read_pmc(counter); \
231 low = (u32)_l; \
232 high = _l >> 32; \
49cd740b 233} while (0)
3dc494e8 234
1ff4d58a
AK
235#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
236
38ffbe66
JF
237static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
238{
5c83511b 239 PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
38ffbe66
JF
240}
241
242static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
243{
5c83511b 244 PVOP_VCALL2(cpu.free_ldt, ldt, entries);
38ffbe66
JF
245}
246
f8822f42
JF
247static inline void load_TR_desc(void)
248{
5c83511b 249 PVOP_VCALL0(cpu.load_tr_desc);
f8822f42 250}
6b68f01b 251static inline void load_gdt(const struct desc_ptr *dtr)
f8822f42 252{
5c83511b 253 PVOP_VCALL1(cpu.load_gdt, dtr);
f8822f42 254}
6b68f01b 255static inline void load_idt(const struct desc_ptr *dtr)
f8822f42 256{
5c83511b 257 PVOP_VCALL1(cpu.load_idt, dtr);
f8822f42
JF
258}
259static inline void set_ldt(const void *addr, unsigned entries)
260{
5c83511b 261 PVOP_VCALL2(cpu.set_ldt, addr, entries);
f8822f42 262}
f8822f42
JF
263static inline unsigned long paravirt_store_tr(void)
264{
5c83511b 265 return PVOP_CALL0(unsigned long, cpu.store_tr);
f8822f42 266}
9bad5658 267
f8822f42
JF
268#define store_tr(tr) ((tr) = paravirt_store_tr())
269static inline void load_TLS(struct thread_struct *t, unsigned cpu)
270{
5c83511b 271 PVOP_VCALL2(cpu.load_tls, t, cpu);
f8822f42 272}
75b8bb3e 273
9f9d489a
JF
274#ifdef CONFIG_X86_64
275static inline void load_gs_index(unsigned int gs)
276{
5c83511b 277 PVOP_VCALL1(cpu.load_gs_index, gs);
9f9d489a
JF
278}
279#endif
280
75b8bb3e
GOC
281static inline void write_ldt_entry(struct desc_struct *dt, int entry,
282 const void *desc)
f8822f42 283{
5c83511b 284 PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
f8822f42 285}
014b15be
GOC
286
287static inline void write_gdt_entry(struct desc_struct *dt, int entry,
288 void *desc, int type)
f8822f42 289{
5c83511b 290 PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
f8822f42 291}
014b15be 292
8d947344 293static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
f8822f42 294{
5c83511b 295 PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
f8822f42 296}
d3561b7f 297
99bcd4a6
JG
298#ifdef CONFIG_X86_IOPL_IOPERM
299static inline void tss_update_io_bitmap(void)
300{
301 PVOP_VCALL0(cpu.update_io_bitmap);
302}
303#endif
304
d6dd61c8
JF
305static inline void paravirt_activate_mm(struct mm_struct *prev,
306 struct mm_struct *next)
307{
5c83511b 308 PVOP_VCALL2(mmu.activate_mm, prev, next);
d6dd61c8
JF
309}
310
a1ea1c03
DH
311static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
312 struct mm_struct *mm)
d6dd61c8 313{
5c83511b 314 PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
d6dd61c8
JF
315}
316
eba0045f
JF
317static inline int paravirt_pgd_alloc(struct mm_struct *mm)
318{
5c83511b 319 return PVOP_CALL1(int, mmu.pgd_alloc, mm);
eba0045f
JF
320}
321
322static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
323{
5c83511b 324 PVOP_VCALL2(mmu.pgd_free, mm, pgd);
eba0045f
JF
325}
326
f8639939 327static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
f8822f42 328{
5c83511b 329 PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
f8822f42 330}
f8639939 331static inline void paravirt_release_pte(unsigned long pfn)
f8822f42 332{
5c83511b 333 PVOP_VCALL1(mmu.release_pte, pfn);
f8822f42 334}
c119ecce 335
f8639939 336static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
f8822f42 337{
5c83511b 338 PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
f8822f42 339}
c119ecce 340
f8639939 341static inline void paravirt_release_pmd(unsigned long pfn)
da181a8b 342{
5c83511b 343 PVOP_VCALL1(mmu.release_pmd, pfn);
da181a8b
RR
344}
345
f8639939 346static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
2761fa09 347{
5c83511b 348 PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
2761fa09 349}
f8639939 350static inline void paravirt_release_pud(unsigned long pfn)
2761fa09 351{
5c83511b 352 PVOP_VCALL1(mmu.release_pud, pfn);
2761fa09
JF
353}
354
335437fb
KS
355static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
356{
5c83511b 357 PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
335437fb
KS
358}
359
360static inline void paravirt_release_p4d(unsigned long pfn)
361{
5c83511b 362 PVOP_VCALL1(mmu.release_p4d, pfn);
335437fb
KS
363}
364
773221f4 365static inline pte_t __pte(pteval_t val)
da181a8b 366{
773221f4
JF
367 pteval_t ret;
368
369 if (sizeof(pteval_t) > sizeof(long))
5c83511b 370 ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
773221f4 371 else
5c83511b 372 ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
773221f4 373
c8e5393a 374 return (pte_t) { .pte = ret };
da181a8b
RR
375}
376
773221f4
JF
377static inline pteval_t pte_val(pte_t pte)
378{
379 pteval_t ret;
380
381 if (sizeof(pteval_t) > sizeof(long))
5c83511b 382 ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
da5de7c2 383 pte.pte, (u64)pte.pte >> 32);
773221f4 384 else
5c83511b 385 ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
773221f4
JF
386
387 return ret;
388}
389
ef38503e 390static inline pgd_t __pgd(pgdval_t val)
da181a8b 391{
ef38503e
JF
392 pgdval_t ret;
393
394 if (sizeof(pgdval_t) > sizeof(long))
5c83511b 395 ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
ef38503e 396 else
5c83511b 397 ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
ef38503e
JF
398
399 return (pgd_t) { ret };
400}
401
402static inline pgdval_t pgd_val(pgd_t pgd)
403{
404 pgdval_t ret;
405
406 if (sizeof(pgdval_t) > sizeof(long))
5c83511b 407 ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
da5de7c2 408 pgd.pgd, (u64)pgd.pgd >> 32);
ef38503e 409 else
5c83511b 410 ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
ef38503e
JF
411
412 return ret;
f8822f42
JF
413}
414
08b882c6 415#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
0cbe3e26 416static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
08b882c6
JF
417 pte_t *ptep)
418{
419 pteval_t ret;
420
0cbe3e26 421 ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
08b882c6
JF
422
423 return (pte_t) { .pte = ret };
424}
425
0cbe3e26 426static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
04a86453 427 pte_t *ptep, pte_t old_pte, pte_t pte)
08b882c6 428{
0cbe3e26 429
08b882c6
JF
430 if (sizeof(pteval_t) > sizeof(long))
431 /* 5 arg words */
0cbe3e26 432 pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
08b882c6 433 else
5c83511b 434 PVOP_VCALL4(mmu.ptep_modify_prot_commit,
0cbe3e26 435 vma, addr, ptep, pte.pte);
08b882c6
JF
436}
437
4eed80cd
JF
438static inline void set_pte(pte_t *ptep, pte_t pte)
439{
440 if (sizeof(pteval_t) > sizeof(long))
5c83511b 441 PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
4eed80cd 442 else
5c83511b 443 PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
4eed80cd
JF
444}
445
446static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
447 pte_t *ptep, pte_t pte)
448{
449 if (sizeof(pteval_t) > sizeof(long))
450 /* 5 arg words */
5c83511b 451 pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
4eed80cd 452 else
5c83511b 453 PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
4eed80cd
JF
454}
455
60b3f626
JF
456static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
457{
458 pmdval_t val = native_pmd_val(pmd);
459
460 if (sizeof(pmdval_t) > sizeof(long))
5c83511b 461 PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
60b3f626 462 else
5c83511b 463 PVOP_VCALL2(mmu.set_pmd, pmdp, val);
60b3f626
JF
464}
465
98233368 466#if CONFIG_PGTABLE_LEVELS >= 3
1fe91514
GOC
467static inline pmd_t __pmd(pmdval_t val)
468{
469 pmdval_t ret;
470
471 if (sizeof(pmdval_t) > sizeof(long))
5c83511b 472 ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
1fe91514 473 else
5c83511b 474 ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
1fe91514
GOC
475
476 return (pmd_t) { ret };
477}
478
479static inline pmdval_t pmd_val(pmd_t pmd)
480{
481 pmdval_t ret;
482
483 if (sizeof(pmdval_t) > sizeof(long))
5c83511b 484 ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
da5de7c2 485 pmd.pmd, (u64)pmd.pmd >> 32);
1fe91514 486 else
5c83511b 487 ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
1fe91514
GOC
488
489 return ret;
490}
491
492static inline void set_pud(pud_t *pudp, pud_t pud)
493{
494 pudval_t val = native_pud_val(pud);
495
496 if (sizeof(pudval_t) > sizeof(long))
5c83511b 497 PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
1fe91514 498 else
5c83511b 499 PVOP_VCALL2(mmu.set_pud, pudp, val);
1fe91514 500}
f2a6a705 501#if CONFIG_PGTABLE_LEVELS >= 4
9042219c
EH
502static inline pud_t __pud(pudval_t val)
503{
504 pudval_t ret;
505
495310e4 506 ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
9042219c
EH
507
508 return (pud_t) { ret };
509}
510
511static inline pudval_t pud_val(pud_t pud)
512{
495310e4 513 return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
9042219c
EH
514}
515
f2a6a705
KS
516static inline void pud_clear(pud_t *pudp)
517{
518 set_pud(pudp, __pud(0));
519}
520
521static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
522{
523 p4dval_t val = native_p4d_val(p4d);
524
495310e4 525 PVOP_VCALL2(mmu.set_p4d, p4dp, val);
f2a6a705
KS
526}
527
335437fb
KS
528#if CONFIG_PGTABLE_LEVELS >= 5
529
530static inline p4d_t __p4d(p4dval_t val)
f2a6a705 531{
5c83511b 532 p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
f2a6a705 533
335437fb
KS
534 return (p4d_t) { ret };
535}
f2a6a705 536
335437fb
KS
537static inline p4dval_t p4d_val(p4d_t p4d)
538{
5c83511b 539 return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
335437fb 540}
f2a6a705 541
92e1c5b3 542static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
9042219c 543{
5c83511b 544 PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
9042219c
EH
545}
546
92e1c5b3 547#define set_pgd(pgdp, pgdval) do { \
ed7588d5 548 if (pgtable_l5_enabled()) \
92e1c5b3
KS
549 __set_pgd(pgdp, pgdval); \
550 else \
551 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \
552} while (0)
553
554#define pgd_clear(pgdp) do { \
ed7588d5 555 if (pgtable_l5_enabled()) \
92e1c5b3
KS
556 set_pgd(pgdp, __pgd(0)); \
557} while (0)
9042219c 558
f2a6a705 559#endif /* CONFIG_PGTABLE_LEVELS == 5 */
9042219c 560
335437fb
KS
561static inline void p4d_clear(p4d_t *p4dp)
562{
563 set_p4d(p4dp, __p4d(0));
564}
565
98233368 566#endif /* CONFIG_PGTABLE_LEVELS == 4 */
9042219c 567
98233368 568#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
1fe91514 569
4eed80cd
JF
570#ifdef CONFIG_X86_PAE
571/* Special-case pte-setting operations for PAE, which can't update a
572 64-bit pte atomically */
573static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
574{
5c83511b 575 PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
4eed80cd
JF
576}
577
4eed80cd
JF
578static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
579 pte_t *ptep)
580{
5c83511b 581 PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
4eed80cd 582}
60b3f626
JF
583
584static inline void pmd_clear(pmd_t *pmdp)
585{
5c83511b 586 PVOP_VCALL1(mmu.pmd_clear, pmdp);
60b3f626 587}
4eed80cd
JF
588#else /* !CONFIG_X86_PAE */
589static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
590{
591 set_pte(ptep, pte);
592}
593
4eed80cd
JF
594static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
595 pte_t *ptep)
596{
597 set_pte_at(mm, addr, ptep, __pte(0));
598}
60b3f626
JF
599
600static inline void pmd_clear(pmd_t *pmdp)
601{
602 set_pmd(pmdp, __pmd(0));
603}
4eed80cd
JF
604#endif /* CONFIG_X86_PAE */
605
7fd7d83d 606#define __HAVE_ARCH_START_CONTEXT_SWITCH
224101ed 607static inline void arch_start_context_switch(struct task_struct *prev)
f8822f42 608{
5c83511b 609 PVOP_VCALL1(cpu.start_context_switch, prev);
f8822f42
JF
610}
611
224101ed 612static inline void arch_end_context_switch(struct task_struct *next)
f8822f42 613{
5c83511b 614 PVOP_VCALL1(cpu.end_context_switch, next);
f8822f42
JF
615}
616
9226d125 617#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
f8822f42
JF
618static inline void arch_enter_lazy_mmu_mode(void)
619{
5c83511b 620 PVOP_VCALL0(mmu.lazy_mode.enter);
f8822f42
JF
621}
622
623static inline void arch_leave_lazy_mmu_mode(void)
624{
5c83511b 625 PVOP_VCALL0(mmu.lazy_mode.leave);
f8822f42
JF
626}
627
511ba86e
BO
628static inline void arch_flush_lazy_mmu_mode(void)
629{
5c83511b 630 PVOP_VCALL0(mmu.lazy_mode.flush);
511ba86e 631}
9226d125 632
aeaaa59c 633static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
3b3809ac 634 phys_addr_t phys, pgprot_t flags)
aeaaa59c 635{
5c83511b 636 pv_ops.mmu.set_fixmap(idx, phys, flags);
aeaaa59c 637}
fdc0269e 638#endif
aeaaa59c 639
b4ecc126 640#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
4bb689ee 641
f233f7f1
PZI
642static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
643 u32 val)
644{
5c83511b 645 PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
f233f7f1
PZI
646}
647
648static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
649{
5c83511b 650 PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
f233f7f1
PZI
651}
652
653static __always_inline void pv_wait(u8 *ptr, u8 val)
654{
5c83511b 655 PVOP_VCALL2(lock.wait, ptr, val);
f233f7f1
PZI
656}
657
658static __always_inline void pv_kick(int cpu)
659{
5c83511b 660 PVOP_VCALL1(lock.kick, cpu);
f233f7f1
PZI
661}
662
6c62985d 663static __always_inline bool pv_vcpu_is_preempted(long cpu)
3cded417 664{
5c83511b 665 return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
3cded417
PZ
666}
667
5c83511b
JG
668void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
669bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
670
f233f7f1 671#endif /* SMP && PARAVIRT_SPINLOCKS */
4bb689ee 672
2e47d3e6 673#ifdef CONFIG_X86_32
ecb93d1c
JF
674#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
675#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
676
677/* save and restore all caller-save registers, except return value */
e584f559
JF
678#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
679#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
ecb93d1c 680
2e47d3e6
GOC
681#define PV_FLAGS_ARG "0"
682#define PV_EXTRA_CLOBBERS
683#define PV_VEXTRA_CLOBBERS
684#else
ecb93d1c
JF
685/* save and restore all caller-save registers, except return value */
686#define PV_SAVE_ALL_CALLER_REGS \
687 "push %rcx;" \
688 "push %rdx;" \
689 "push %rsi;" \
690 "push %rdi;" \
691 "push %r8;" \
692 "push %r9;" \
693 "push %r10;" \
694 "push %r11;"
695#define PV_RESTORE_ALL_CALLER_REGS \
696 "pop %r11;" \
697 "pop %r10;" \
698 "pop %r9;" \
699 "pop %r8;" \
700 "pop %rdi;" \
701 "pop %rsi;" \
702 "pop %rdx;" \
703 "pop %rcx;"
704
2e47d3e6
GOC
705/* We save some registers, but all of them, that's too much. We clobber all
706 * caller saved registers but the argument parameter */
707#define PV_SAVE_REGS "pushq %%rdi;"
708#define PV_RESTORE_REGS "popq %%rdi;"
c24481e9
JF
709#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
710#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
2e47d3e6
GOC
711#define PV_FLAGS_ARG "D"
712#endif
713
ecb93d1c
JF
714/*
715 * Generate a thunk around a function which saves all caller-save
716 * registers except for the return value. This allows C functions to
717 * be called from assembler code where fewer than normal registers are
718 * available. It may also help code generation around calls from C
719 * code if the common case doesn't use many registers.
720 *
721 * When a callee is wrapped in a thunk, the caller can assume that all
722 * arg regs and all scratch registers are preserved across the
723 * call. The return value in rax/eax will not be saved, even for void
724 * functions.
725 */
87b240cb 726#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
ecb93d1c
JF
727#define PV_CALLEE_SAVE_REGS_THUNK(func) \
728 extern typeof(func) __raw_callee_save_##func; \
ecb93d1c
JF
729 \
730 asm(".pushsection .text;" \
87b240cb
JP
731 ".globl " PV_THUNK_NAME(func) ";" \
732 ".type " PV_THUNK_NAME(func) ", @function;" \
733 PV_THUNK_NAME(func) ":" \
734 FRAME_BEGIN \
ecb93d1c
JF
735 PV_SAVE_ALL_CALLER_REGS \
736 "call " #func ";" \
737 PV_RESTORE_ALL_CALLER_REGS \
87b240cb 738 FRAME_END \
ecb93d1c 739 "ret;" \
083db676 740 ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
ecb93d1c
JF
741 ".popsection")
742
743/* Get a reference to a callee-save function */
744#define PV_CALLEE_SAVE(func) \
745 ((struct paravirt_callee_save) { __raw_callee_save_##func })
746
747/* Promise that "func" already uses the right calling convention */
748#define __PV_IS_CALLEE_SAVE(func) \
749 ((struct paravirt_callee_save) { func })
750
6da63eb2 751#ifdef CONFIG_PARAVIRT_XXL
b5908548 752static inline notrace unsigned long arch_local_save_flags(void)
139ec7c4 753{
5c83511b 754 return PVOP_CALLEE0(unsigned long, irq.save_fl);
139ec7c4
RR
755}
756
b5908548 757static inline notrace void arch_local_irq_restore(unsigned long f)
139ec7c4 758{
5c83511b 759 PVOP_VCALLEE1(irq.restore_fl, f);
139ec7c4
RR
760}
761
b5908548 762static inline notrace void arch_local_irq_disable(void)
139ec7c4 763{
5c83511b 764 PVOP_VCALLEE0(irq.irq_disable);
139ec7c4
RR
765}
766
b5908548 767static inline notrace void arch_local_irq_enable(void)
139ec7c4 768{
5c83511b 769 PVOP_VCALLEE0(irq.irq_enable);
139ec7c4
RR
770}
771
b5908548 772static inline notrace unsigned long arch_local_irq_save(void)
139ec7c4
RR
773{
774 unsigned long f;
775
df9ee292
DH
776 f = arch_local_save_flags();
777 arch_local_irq_disable();
139ec7c4
RR
778 return f;
779}
6da63eb2 780#endif
139ec7c4 781
74d4affd 782
294688c0 783/* Make sure as little as possible of this mess escapes. */
d5822035 784#undef PARAVIRT_CALL
1a45b7aa
JF
785#undef __PVOP_CALL
786#undef __PVOP_VCALL
f8822f42
JF
787#undef PVOP_VCALL0
788#undef PVOP_CALL0
789#undef PVOP_VCALL1
790#undef PVOP_CALL1
791#undef PVOP_VCALL2
792#undef PVOP_CALL2
793#undef PVOP_VCALL3
794#undef PVOP_CALL3
795#undef PVOP_VCALL4
796#undef PVOP_CALL4
139ec7c4 797
6f30c1ac
TG
798extern void default_banner(void);
799
d3561b7f
RR
800#else /* __ASSEMBLY__ */
801
27876f38 802#define _PVSITE(ptype, ops, word, algn) \
139ec7c4
RR
803771:; \
804 ops; \
805772:; \
806 .pushsection .parainstructions,"a"; \
658be9d3
GOC
807 .align algn; \
808 word 771b; \
139ec7c4
RR
809 .byte ptype; \
810 .byte 772b-771b; \
139ec7c4
RR
811 .popsection
812
658be9d3 813
9104a18d 814#define COND_PUSH(set, mask, reg) \
ecb93d1c 815 .if ((~(set)) & mask); push %reg; .endif
9104a18d 816#define COND_POP(set, mask, reg) \
ecb93d1c 817 .if ((~(set)) & mask); pop %reg; .endif
9104a18d 818
658be9d3 819#ifdef CONFIG_X86_64
9104a18d
JF
820
821#define PV_SAVE_REGS(set) \
822 COND_PUSH(set, CLBR_RAX, rax); \
823 COND_PUSH(set, CLBR_RCX, rcx); \
824 COND_PUSH(set, CLBR_RDX, rdx); \
825 COND_PUSH(set, CLBR_RSI, rsi); \
826 COND_PUSH(set, CLBR_RDI, rdi); \
827 COND_PUSH(set, CLBR_R8, r8); \
828 COND_PUSH(set, CLBR_R9, r9); \
829 COND_PUSH(set, CLBR_R10, r10); \
830 COND_PUSH(set, CLBR_R11, r11)
831#define PV_RESTORE_REGS(set) \
832 COND_POP(set, CLBR_R11, r11); \
833 COND_POP(set, CLBR_R10, r10); \
834 COND_POP(set, CLBR_R9, r9); \
835 COND_POP(set, CLBR_R8, r8); \
836 COND_POP(set, CLBR_RDI, rdi); \
837 COND_POP(set, CLBR_RSI, rsi); \
838 COND_POP(set, CLBR_RDX, rdx); \
839 COND_POP(set, CLBR_RCX, rcx); \
840 COND_POP(set, CLBR_RAX, rax)
841
5c83511b 842#define PARA_PATCH(off) ((off) / 8)
27876f38 843#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
491eccb7 844#define PARA_INDIRECT(addr) *addr(%rip)
658be9d3 845#else
9104a18d
JF
846#define PV_SAVE_REGS(set) \
847 COND_PUSH(set, CLBR_EAX, eax); \
848 COND_PUSH(set, CLBR_EDI, edi); \
849 COND_PUSH(set, CLBR_ECX, ecx); \
850 COND_PUSH(set, CLBR_EDX, edx)
851#define PV_RESTORE_REGS(set) \
852 COND_POP(set, CLBR_EDX, edx); \
853 COND_POP(set, CLBR_ECX, ecx); \
854 COND_POP(set, CLBR_EDI, edi); \
855 COND_POP(set, CLBR_EAX, eax)
856
5c83511b 857#define PARA_PATCH(off) ((off) / 4)
27876f38 858#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4)
491eccb7 859#define PARA_INDIRECT(addr) *%cs:addr
658be9d3
GOC
860#endif
861
9bad5658 862#ifdef CONFIG_PARAVIRT_XXL
93b1eab3 863#define INTERRUPT_RETURN \
5c83511b 864 PARA_SITE(PARA_PATCH(PV_CPU_iret), \
27876f38 865 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 866 jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
d5822035
JF
867
868#define DISABLE_INTERRUPTS(clobbers) \
5c83511b 869 PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
ecb93d1c 870 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
27876f38 871 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 872 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \
ecb93d1c 873 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
d5822035
JF
874
875#define ENABLE_INTERRUPTS(clobbers) \
5c83511b 876 PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \
ecb93d1c 877 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
27876f38 878 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 879 call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \
ecb93d1c 880 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
6da63eb2 881#endif
d5822035 882
5def7a4c 883#ifdef CONFIG_X86_64
9bad5658 884#ifdef CONFIG_PARAVIRT_XXL
a00394f8
JF
885/*
886 * If swapgs is used while the userspace stack is still current,
887 * there's no way to call a pvop. The PV replacement *must* be
888 * inlined, or the swapgs instruction must be trapped and emulated.
889 */
890#define SWAPGS_UNSAFE_STACK \
5c83511b 891 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
a00394f8 892
9104a18d
JF
893/*
894 * Note: swapgs is very special, and in practise is either going to be
895 * implemented with a single "swapgs" instruction or something very
896 * special. Either way, we don't need to save any registers for
897 * it.
898 */
e801f864 899#define SWAPGS \
5c83511b 900 PARA_SITE(PARA_PATCH(PV_CPU_swapgs), \
27876f38 901 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 902 call PARA_INDIRECT(pv_ops+PV_CPU_swapgs); \
e801f864 903 )
4a8c4c4e 904
2be29982 905#define USERGS_SYSRET64 \
5c83511b 906 PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
27876f38 907 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 908 jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
e17f8234
BO
909
910#ifdef CONFIG_DEBUG_ENTRY
911#define SAVE_FLAGS(clobbers) \
5c83511b 912 PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
e17f8234 913 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
27876f38 914 ANNOTATE_RETPOLINE_SAFE; \
5c83511b 915 call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
e17f8234
BO
916 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
917#endif
55aedddb
PZ
918#endif /* CONFIG_PARAVIRT_XXL */
919#endif /* CONFIG_X86_64 */
920
921#ifdef CONFIG_PARAVIRT_XXL
922
923#define GET_CR2_INTO_AX \
924 PARA_SITE(PARA_PATCH(PV_MMU_read_cr2), \
925 ANNOTATE_RETPOLINE_SAFE; \
926 call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); \
927 )
928
929#endif /* CONFIG_PARAVIRT_XXL */
e17f8234 930
139ec7c4 931
d3561b7f 932#endif /* __ASSEMBLY__ */
6f30c1ac
TG
933#else /* CONFIG_PARAVIRT */
934# define default_banner x86_init_noop
fdc0269e
JG
935#endif /* !CONFIG_PARAVIRT */
936
a1ea1c03 937#ifndef __ASSEMBLY__
fdc0269e 938#ifndef CONFIG_PARAVIRT_XXL
a1ea1c03
DH
939static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
940 struct mm_struct *mm)
941{
942}
fdc0269e 943#endif
a1ea1c03 944
fdc0269e 945#ifndef CONFIG_PARAVIRT
a1ea1c03
DH
946static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
947{
948}
fdc0269e 949#endif
a1ea1c03 950#endif /* __ASSEMBLY__ */
1965aae3 951#endif /* _ASM_X86_PARAVIRT_H */