Merge branch 'md-next' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md...
[linux-2.6-block.git] / arch / x86 / include / asm / paravirt.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PARAVIRT_H
3 #define _ASM_X86_PARAVIRT_H
4 /* Various instructions on x86 need to be replaced for
5  * para-virtualization: those hooks are defined here. */
6
7 #ifdef CONFIG_PARAVIRT
8 #include <asm/pgtable_types.h>
9 #include <asm/asm.h>
10 #include <asm/nospec-branch.h>
11
12 #include <asm/paravirt_types.h>
13
14 #ifndef __ASSEMBLY__
15 #include <linux/bug.h>
16 #include <linux/types.h>
17 #include <linux/cpumask.h>
18 #include <asm/frame.h>
19
20 static inline unsigned long long paravirt_sched_clock(void)
21 {
22         return PVOP_CALL0(unsigned long long, time.sched_clock);
23 }
24
25 struct static_key;
26 extern struct static_key paravirt_steal_enabled;
27 extern struct static_key paravirt_steal_rq_enabled;
28
29 __visible void __native_queued_spin_unlock(struct qspinlock *lock);
30 bool pv_is_native_spin_unlock(void);
31 __visible bool __native_vcpu_is_preempted(long cpu);
32 bool pv_is_native_vcpu_is_preempted(void);
33
34 static inline u64 paravirt_steal_clock(int cpu)
35 {
36         return PVOP_CALL1(u64, time.steal_clock, cpu);
37 }
38
39 /* The paravirtualized I/O functions */
40 static inline void slow_down_io(void)
41 {
42         pv_ops.cpu.io_delay();
43 #ifdef REALLY_SLOW_IO
44         pv_ops.cpu.io_delay();
45         pv_ops.cpu.io_delay();
46         pv_ops.cpu.io_delay();
47 #endif
48 }
49
50 void native_flush_tlb_local(void);
51 void native_flush_tlb_global(void);
52 void native_flush_tlb_one_user(unsigned long addr);
53 void native_flush_tlb_others(const struct cpumask *cpumask,
54                              const struct flush_tlb_info *info);
55
56 static inline void __flush_tlb_local(void)
57 {
58         PVOP_VCALL0(mmu.flush_tlb_user);
59 }
60
61 static inline void __flush_tlb_global(void)
62 {
63         PVOP_VCALL0(mmu.flush_tlb_kernel);
64 }
65
66 static inline void __flush_tlb_one_user(unsigned long addr)
67 {
68         PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
69 }
70
71 static inline void __flush_tlb_others(const struct cpumask *cpumask,
72                                       const struct flush_tlb_info *info)
73 {
74         PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
75 }
76
77 static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
78 {
79         PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
80 }
81
82 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
83 {
84         PVOP_VCALL1(mmu.exit_mmap, mm);
85 }
86
87 #ifdef CONFIG_PARAVIRT_XXL
88 static inline void load_sp0(unsigned long sp0)
89 {
90         PVOP_VCALL1(cpu.load_sp0, sp0);
91 }
92
93 /* The paravirtualized CPUID instruction. */
94 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
95                            unsigned int *ecx, unsigned int *edx)
96 {
97         PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
98 }
99
100 /*
101  * These special macros can be used to get or set a debugging register
102  */
103 static inline unsigned long paravirt_get_debugreg(int reg)
104 {
105         return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
106 }
107 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
108 static inline void set_debugreg(unsigned long val, int reg)
109 {
110         PVOP_VCALL2(cpu.set_debugreg, reg, val);
111 }
112
113 static inline unsigned long read_cr0(void)
114 {
115         return PVOP_CALL0(unsigned long, cpu.read_cr0);
116 }
117
118 static inline void write_cr0(unsigned long x)
119 {
120         PVOP_VCALL1(cpu.write_cr0, x);
121 }
122
123 static inline unsigned long read_cr2(void)
124 {
125         return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
126 }
127
128 static inline void write_cr2(unsigned long x)
129 {
130         PVOP_VCALL1(mmu.write_cr2, x);
131 }
132
133 static inline unsigned long __read_cr3(void)
134 {
135         return PVOP_CALL0(unsigned long, mmu.read_cr3);
136 }
137
138 static inline void write_cr3(unsigned long x)
139 {
140         PVOP_VCALL1(mmu.write_cr3, x);
141 }
142
143 static inline void __write_cr4(unsigned long x)
144 {
145         PVOP_VCALL1(cpu.write_cr4, x);
146 }
147
148 static inline void arch_safe_halt(void)
149 {
150         PVOP_VCALL0(irq.safe_halt);
151 }
152
153 static inline void halt(void)
154 {
155         PVOP_VCALL0(irq.halt);
156 }
157
158 static inline void wbinvd(void)
159 {
160         PVOP_VCALL0(cpu.wbinvd);
161 }
162
163 #define get_kernel_rpl()  (pv_info.kernel_rpl)
164
165 static inline u64 paravirt_read_msr(unsigned msr)
166 {
167         return PVOP_CALL1(u64, cpu.read_msr, msr);
168 }
169
170 static inline void paravirt_write_msr(unsigned msr,
171                                       unsigned low, unsigned high)
172 {
173         PVOP_VCALL3(cpu.write_msr, msr, low, high);
174 }
175
176 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
177 {
178         return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
179 }
180
181 static inline int paravirt_write_msr_safe(unsigned msr,
182                                           unsigned low, unsigned high)
183 {
184         return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
185 }
186
187 #define rdmsr(msr, val1, val2)                  \
188 do {                                            \
189         u64 _l = paravirt_read_msr(msr);        \
190         val1 = (u32)_l;                         \
191         val2 = _l >> 32;                        \
192 } while (0)
193
194 #define wrmsr(msr, val1, val2)                  \
195 do {                                            \
196         paravirt_write_msr(msr, val1, val2);    \
197 } while (0)
198
199 #define rdmsrl(msr, val)                        \
200 do {                                            \
201         val = paravirt_read_msr(msr);           \
202 } while (0)
203
204 static inline void wrmsrl(unsigned msr, u64 val)
205 {
206         wrmsr(msr, (u32)val, (u32)(val>>32));
207 }
208
209 #define wrmsr_safe(msr, a, b)   paravirt_write_msr_safe(msr, a, b)
210
211 /* rdmsr with exception handling */
212 #define rdmsr_safe(msr, a, b)                           \
213 ({                                                      \
214         int _err;                                       \
215         u64 _l = paravirt_read_msr_safe(msr, &_err);    \
216         (*a) = (u32)_l;                                 \
217         (*b) = _l >> 32;                                \
218         _err;                                           \
219 })
220
221 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
222 {
223         int err;
224
225         *p = paravirt_read_msr_safe(msr, &err);
226         return err;
227 }
228
229 static inline unsigned long long paravirt_read_pmc(int counter)
230 {
231         return PVOP_CALL1(u64, cpu.read_pmc, counter);
232 }
233
234 #define rdpmc(counter, low, high)               \
235 do {                                            \
236         u64 _l = paravirt_read_pmc(counter);    \
237         low = (u32)_l;                          \
238         high = _l >> 32;                        \
239 } while (0)
240
241 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
242
243 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
244 {
245         PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
246 }
247
248 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
249 {
250         PVOP_VCALL2(cpu.free_ldt, ldt, entries);
251 }
252
253 static inline void load_TR_desc(void)
254 {
255         PVOP_VCALL0(cpu.load_tr_desc);
256 }
257 static inline void load_gdt(const struct desc_ptr *dtr)
258 {
259         PVOP_VCALL1(cpu.load_gdt, dtr);
260 }
261 static inline void load_idt(const struct desc_ptr *dtr)
262 {
263         PVOP_VCALL1(cpu.load_idt, dtr);
264 }
265 static inline void set_ldt(const void *addr, unsigned entries)
266 {
267         PVOP_VCALL2(cpu.set_ldt, addr, entries);
268 }
269 static inline unsigned long paravirt_store_tr(void)
270 {
271         return PVOP_CALL0(unsigned long, cpu.store_tr);
272 }
273
274 #define store_tr(tr)    ((tr) = paravirt_store_tr())
275 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
276 {
277         PVOP_VCALL2(cpu.load_tls, t, cpu);
278 }
279
280 #ifdef CONFIG_X86_64
281 static inline void load_gs_index(unsigned int gs)
282 {
283         PVOP_VCALL1(cpu.load_gs_index, gs);
284 }
285 #endif
286
287 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
288                                    const void *desc)
289 {
290         PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
291 }
292
293 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
294                                    void *desc, int type)
295 {
296         PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
297 }
298
299 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
300 {
301         PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
302 }
303
304 #ifdef CONFIG_X86_IOPL_IOPERM
305 static inline void tss_invalidate_io_bitmap(void)
306 {
307         PVOP_VCALL0(cpu.invalidate_io_bitmap);
308 }
309
310 static inline void tss_update_io_bitmap(void)
311 {
312         PVOP_VCALL0(cpu.update_io_bitmap);
313 }
314 #endif
315
316 static inline void paravirt_activate_mm(struct mm_struct *prev,
317                                         struct mm_struct *next)
318 {
319         PVOP_VCALL2(mmu.activate_mm, prev, next);
320 }
321
322 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
323                                           struct mm_struct *mm)
324 {
325         PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
326 }
327
328 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
329 {
330         return PVOP_CALL1(int, mmu.pgd_alloc, mm);
331 }
332
333 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
334 {
335         PVOP_VCALL2(mmu.pgd_free, mm, pgd);
336 }
337
338 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
339 {
340         PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
341 }
342 static inline void paravirt_release_pte(unsigned long pfn)
343 {
344         PVOP_VCALL1(mmu.release_pte, pfn);
345 }
346
347 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
348 {
349         PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
350 }
351
352 static inline void paravirt_release_pmd(unsigned long pfn)
353 {
354         PVOP_VCALL1(mmu.release_pmd, pfn);
355 }
356
357 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
358 {
359         PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
360 }
361 static inline void paravirt_release_pud(unsigned long pfn)
362 {
363         PVOP_VCALL1(mmu.release_pud, pfn);
364 }
365
366 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
367 {
368         PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
369 }
370
371 static inline void paravirt_release_p4d(unsigned long pfn)
372 {
373         PVOP_VCALL1(mmu.release_p4d, pfn);
374 }
375
376 static inline pte_t __pte(pteval_t val)
377 {
378         pteval_t ret;
379
380         if (sizeof(pteval_t) > sizeof(long))
381                 ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
382         else
383                 ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
384
385         return (pte_t) { .pte = ret };
386 }
387
388 static inline pteval_t pte_val(pte_t pte)
389 {
390         pteval_t ret;
391
392         if (sizeof(pteval_t) > sizeof(long))
393                 ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
394                                    pte.pte, (u64)pte.pte >> 32);
395         else
396                 ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
397
398         return ret;
399 }
400
401 static inline pgd_t __pgd(pgdval_t val)
402 {
403         pgdval_t ret;
404
405         if (sizeof(pgdval_t) > sizeof(long))
406                 ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
407         else
408                 ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
409
410         return (pgd_t) { ret };
411 }
412
413 static inline pgdval_t pgd_val(pgd_t pgd)
414 {
415         pgdval_t ret;
416
417         if (sizeof(pgdval_t) > sizeof(long))
418                 ret =  PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
419                                     pgd.pgd, (u64)pgd.pgd >> 32);
420         else
421                 ret =  PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
422
423         return ret;
424 }
425
426 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
427 static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
428                                            pte_t *ptep)
429 {
430         pteval_t ret;
431
432         ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
433
434         return (pte_t) { .pte = ret };
435 }
436
437 static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
438                                            pte_t *ptep, pte_t old_pte, pte_t pte)
439 {
440
441         if (sizeof(pteval_t) > sizeof(long))
442                 /* 5 arg words */
443                 pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
444         else
445                 PVOP_VCALL4(mmu.ptep_modify_prot_commit,
446                             vma, addr, ptep, pte.pte);
447 }
448
449 static inline void set_pte(pte_t *ptep, pte_t pte)
450 {
451         if (sizeof(pteval_t) > sizeof(long))
452                 PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
453         else
454                 PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
455 }
456
457 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
458                               pte_t *ptep, pte_t pte)
459 {
460         if (sizeof(pteval_t) > sizeof(long))
461                 /* 5 arg words */
462                 pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
463         else
464                 PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
465 }
466
467 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
468 {
469         pmdval_t val = native_pmd_val(pmd);
470
471         if (sizeof(pmdval_t) > sizeof(long))
472                 PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
473         else
474                 PVOP_VCALL2(mmu.set_pmd, pmdp, val);
475 }
476
477 #if CONFIG_PGTABLE_LEVELS >= 3
478 static inline pmd_t __pmd(pmdval_t val)
479 {
480         pmdval_t ret;
481
482         if (sizeof(pmdval_t) > sizeof(long))
483                 ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
484         else
485                 ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
486
487         return (pmd_t) { ret };
488 }
489
490 static inline pmdval_t pmd_val(pmd_t pmd)
491 {
492         pmdval_t ret;
493
494         if (sizeof(pmdval_t) > sizeof(long))
495                 ret =  PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
496                                     pmd.pmd, (u64)pmd.pmd >> 32);
497         else
498                 ret =  PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
499
500         return ret;
501 }
502
503 static inline void set_pud(pud_t *pudp, pud_t pud)
504 {
505         pudval_t val = native_pud_val(pud);
506
507         if (sizeof(pudval_t) > sizeof(long))
508                 PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
509         else
510                 PVOP_VCALL2(mmu.set_pud, pudp, val);
511 }
512 #if CONFIG_PGTABLE_LEVELS >= 4
513 static inline pud_t __pud(pudval_t val)
514 {
515         pudval_t ret;
516
517         ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
518
519         return (pud_t) { ret };
520 }
521
522 static inline pudval_t pud_val(pud_t pud)
523 {
524         return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
525 }
526
527 static inline void pud_clear(pud_t *pudp)
528 {
529         set_pud(pudp, __pud(0));
530 }
531
532 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
533 {
534         p4dval_t val = native_p4d_val(p4d);
535
536         PVOP_VCALL2(mmu.set_p4d, p4dp, val);
537 }
538
539 #if CONFIG_PGTABLE_LEVELS >= 5
540
541 static inline p4d_t __p4d(p4dval_t val)
542 {
543         p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
544
545         return (p4d_t) { ret };
546 }
547
548 static inline p4dval_t p4d_val(p4d_t p4d)
549 {
550         return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
551 }
552
553 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
554 {
555         PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
556 }
557
558 #define set_pgd(pgdp, pgdval) do {                                      \
559         if (pgtable_l5_enabled())                                               \
560                 __set_pgd(pgdp, pgdval);                                \
561         else                                                            \
562                 set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd });     \
563 } while (0)
564
565 #define pgd_clear(pgdp) do {                                            \
566         if (pgtable_l5_enabled())                                               \
567                 set_pgd(pgdp, __pgd(0));                                \
568 } while (0)
569
570 #endif  /* CONFIG_PGTABLE_LEVELS == 5 */
571
572 static inline void p4d_clear(p4d_t *p4dp)
573 {
574         set_p4d(p4dp, __p4d(0));
575 }
576
577 #endif  /* CONFIG_PGTABLE_LEVELS == 4 */
578
579 #endif  /* CONFIG_PGTABLE_LEVELS >= 3 */
580
581 #ifdef CONFIG_X86_PAE
582 /* Special-case pte-setting operations for PAE, which can't update a
583    64-bit pte atomically */
584 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
585 {
586         PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
587 }
588
589 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
590                              pte_t *ptep)
591 {
592         PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
593 }
594
595 static inline void pmd_clear(pmd_t *pmdp)
596 {
597         PVOP_VCALL1(mmu.pmd_clear, pmdp);
598 }
599 #else  /* !CONFIG_X86_PAE */
600 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
601 {
602         set_pte(ptep, pte);
603 }
604
605 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
606                              pte_t *ptep)
607 {
608         set_pte_at(mm, addr, ptep, __pte(0));
609 }
610
611 static inline void pmd_clear(pmd_t *pmdp)
612 {
613         set_pmd(pmdp, __pmd(0));
614 }
615 #endif  /* CONFIG_X86_PAE */
616
617 #define  __HAVE_ARCH_START_CONTEXT_SWITCH
618 static inline void arch_start_context_switch(struct task_struct *prev)
619 {
620         PVOP_VCALL1(cpu.start_context_switch, prev);
621 }
622
623 static inline void arch_end_context_switch(struct task_struct *next)
624 {
625         PVOP_VCALL1(cpu.end_context_switch, next);
626 }
627
628 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
629 static inline void arch_enter_lazy_mmu_mode(void)
630 {
631         PVOP_VCALL0(mmu.lazy_mode.enter);
632 }
633
634 static inline void arch_leave_lazy_mmu_mode(void)
635 {
636         PVOP_VCALL0(mmu.lazy_mode.leave);
637 }
638
639 static inline void arch_flush_lazy_mmu_mode(void)
640 {
641         PVOP_VCALL0(mmu.lazy_mode.flush);
642 }
643
644 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
645                                 phys_addr_t phys, pgprot_t flags)
646 {
647         pv_ops.mmu.set_fixmap(idx, phys, flags);
648 }
649 #endif
650
651 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
652
653 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
654                                                         u32 val)
655 {
656         PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
657 }
658
659 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
660 {
661         PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
662 }
663
664 static __always_inline void pv_wait(u8 *ptr, u8 val)
665 {
666         PVOP_VCALL2(lock.wait, ptr, val);
667 }
668
669 static __always_inline void pv_kick(int cpu)
670 {
671         PVOP_VCALL1(lock.kick, cpu);
672 }
673
674 static __always_inline bool pv_vcpu_is_preempted(long cpu)
675 {
676         return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
677 }
678
679 void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
680 bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
681
682 #endif /* SMP && PARAVIRT_SPINLOCKS */
683
684 #ifdef CONFIG_X86_32
685 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
686 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
687
688 /* save and restore all caller-save registers, except return value */
689 #define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
690 #define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
691
692 #define PV_FLAGS_ARG "0"
693 #define PV_EXTRA_CLOBBERS
694 #define PV_VEXTRA_CLOBBERS
695 #else
696 /* save and restore all caller-save registers, except return value */
697 #define PV_SAVE_ALL_CALLER_REGS                                         \
698         "push %rcx;"                                                    \
699         "push %rdx;"                                                    \
700         "push %rsi;"                                                    \
701         "push %rdi;"                                                    \
702         "push %r8;"                                                     \
703         "push %r9;"                                                     \
704         "push %r10;"                                                    \
705         "push %r11;"
706 #define PV_RESTORE_ALL_CALLER_REGS                                      \
707         "pop %r11;"                                                     \
708         "pop %r10;"                                                     \
709         "pop %r9;"                                                      \
710         "pop %r8;"                                                      \
711         "pop %rdi;"                                                     \
712         "pop %rsi;"                                                     \
713         "pop %rdx;"                                                     \
714         "pop %rcx;"
715
716 /* We save some registers, but all of them, that's too much. We clobber all
717  * caller saved registers but the argument parameter */
718 #define PV_SAVE_REGS "pushq %%rdi;"
719 #define PV_RESTORE_REGS "popq %%rdi;"
720 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
721 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
722 #define PV_FLAGS_ARG "D"
723 #endif
724
725 /*
726  * Generate a thunk around a function which saves all caller-save
727  * registers except for the return value.  This allows C functions to
728  * be called from assembler code where fewer than normal registers are
729  * available.  It may also help code generation around calls from C
730  * code if the common case doesn't use many registers.
731  *
732  * When a callee is wrapped in a thunk, the caller can assume that all
733  * arg regs and all scratch registers are preserved across the
734  * call. The return value in rax/eax will not be saved, even for void
735  * functions.
736  */
737 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
738 #define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
739         extern typeof(func) __raw_callee_save_##func;                   \
740                                                                         \
741         asm(".pushsection .text;"                                       \
742             ".globl " PV_THUNK_NAME(func) ";"                           \
743             ".type " PV_THUNK_NAME(func) ", @function;"                 \
744             PV_THUNK_NAME(func) ":"                                     \
745             FRAME_BEGIN                                                 \
746             PV_SAVE_ALL_CALLER_REGS                                     \
747             "call " #func ";"                                           \
748             PV_RESTORE_ALL_CALLER_REGS                                  \
749             FRAME_END                                                   \
750             "ret;"                                                      \
751             ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
752             ".popsection")
753
754 /* Get a reference to a callee-save function */
755 #define PV_CALLEE_SAVE(func)                                            \
756         ((struct paravirt_callee_save) { __raw_callee_save_##func })
757
758 /* Promise that "func" already uses the right calling convention */
759 #define __PV_IS_CALLEE_SAVE(func)                       \
760         ((struct paravirt_callee_save) { func })
761
762 #ifdef CONFIG_PARAVIRT_XXL
763 static inline notrace unsigned long arch_local_save_flags(void)
764 {
765         return PVOP_CALLEE0(unsigned long, irq.save_fl);
766 }
767
768 static inline notrace void arch_local_irq_restore(unsigned long f)
769 {
770         PVOP_VCALLEE1(irq.restore_fl, f);
771 }
772
773 static inline notrace void arch_local_irq_disable(void)
774 {
775         PVOP_VCALLEE0(irq.irq_disable);
776 }
777
778 static inline notrace void arch_local_irq_enable(void)
779 {
780         PVOP_VCALLEE0(irq.irq_enable);
781 }
782
783 static inline notrace unsigned long arch_local_irq_save(void)
784 {
785         unsigned long f;
786
787         f = arch_local_save_flags();
788         arch_local_irq_disable();
789         return f;
790 }
791 #endif
792
793
794 /* Make sure as little as possible of this mess escapes. */
795 #undef PARAVIRT_CALL
796 #undef __PVOP_CALL
797 #undef __PVOP_VCALL
798 #undef PVOP_VCALL0
799 #undef PVOP_CALL0
800 #undef PVOP_VCALL1
801 #undef PVOP_CALL1
802 #undef PVOP_VCALL2
803 #undef PVOP_CALL2
804 #undef PVOP_VCALL3
805 #undef PVOP_CALL3
806 #undef PVOP_VCALL4
807 #undef PVOP_CALL4
808
809 extern void default_banner(void);
810
811 #else  /* __ASSEMBLY__ */
812
813 #define _PVSITE(ptype, ops, word, algn)         \
814 771:;                                           \
815         ops;                                    \
816 772:;                                           \
817         .pushsection .parainstructions,"a";     \
818          .align algn;                           \
819          word 771b;                             \
820          .byte ptype;                           \
821          .byte 772b-771b;                       \
822         .popsection
823
824
825 #define COND_PUSH(set, mask, reg)                       \
826         .if ((~(set)) & mask); push %reg; .endif
827 #define COND_POP(set, mask, reg)                        \
828         .if ((~(set)) & mask); pop %reg; .endif
829
830 #ifdef CONFIG_X86_64
831
832 #define PV_SAVE_REGS(set)                       \
833         COND_PUSH(set, CLBR_RAX, rax);          \
834         COND_PUSH(set, CLBR_RCX, rcx);          \
835         COND_PUSH(set, CLBR_RDX, rdx);          \
836         COND_PUSH(set, CLBR_RSI, rsi);          \
837         COND_PUSH(set, CLBR_RDI, rdi);          \
838         COND_PUSH(set, CLBR_R8, r8);            \
839         COND_PUSH(set, CLBR_R9, r9);            \
840         COND_PUSH(set, CLBR_R10, r10);          \
841         COND_PUSH(set, CLBR_R11, r11)
842 #define PV_RESTORE_REGS(set)                    \
843         COND_POP(set, CLBR_R11, r11);           \
844         COND_POP(set, CLBR_R10, r10);           \
845         COND_POP(set, CLBR_R9, r9);             \
846         COND_POP(set, CLBR_R8, r8);             \
847         COND_POP(set, CLBR_RDI, rdi);           \
848         COND_POP(set, CLBR_RSI, rsi);           \
849         COND_POP(set, CLBR_RDX, rdx);           \
850         COND_POP(set, CLBR_RCX, rcx);           \
851         COND_POP(set, CLBR_RAX, rax)
852
853 #define PARA_PATCH(off)         ((off) / 8)
854 #define PARA_SITE(ptype, ops)   _PVSITE(ptype, ops, .quad, 8)
855 #define PARA_INDIRECT(addr)     *addr(%rip)
856 #else
857 #define PV_SAVE_REGS(set)                       \
858         COND_PUSH(set, CLBR_EAX, eax);          \
859         COND_PUSH(set, CLBR_EDI, edi);          \
860         COND_PUSH(set, CLBR_ECX, ecx);          \
861         COND_PUSH(set, CLBR_EDX, edx)
862 #define PV_RESTORE_REGS(set)                    \
863         COND_POP(set, CLBR_EDX, edx);           \
864         COND_POP(set, CLBR_ECX, ecx);           \
865         COND_POP(set, CLBR_EDI, edi);           \
866         COND_POP(set, CLBR_EAX, eax)
867
868 #define PARA_PATCH(off)         ((off) / 4)
869 #define PARA_SITE(ptype, ops)   _PVSITE(ptype, ops, .long, 4)
870 #define PARA_INDIRECT(addr)     *%cs:addr
871 #endif
872
873 #ifdef CONFIG_PARAVIRT_XXL
874 #define INTERRUPT_RETURN                                                \
875         PARA_SITE(PARA_PATCH(PV_CPU_iret),                              \
876                   ANNOTATE_RETPOLINE_SAFE;                              \
877                   jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
878
879 #define DISABLE_INTERRUPTS(clobbers)                                    \
880         PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable),                       \
881                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
882                   ANNOTATE_RETPOLINE_SAFE;                              \
883                   call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable);        \
884                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
885
886 #define ENABLE_INTERRUPTS(clobbers)                                     \
887         PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable),                        \
888                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
889                   ANNOTATE_RETPOLINE_SAFE;                              \
890                   call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable);         \
891                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
892 #endif
893
894 #ifdef CONFIG_X86_64
895 #ifdef CONFIG_PARAVIRT_XXL
896 /*
897  * If swapgs is used while the userspace stack is still current,
898  * there's no way to call a pvop.  The PV replacement *must* be
899  * inlined, or the swapgs instruction must be trapped and emulated.
900  */
901 #define SWAPGS_UNSAFE_STACK                                             \
902         PARA_SITE(PARA_PATCH(PV_CPU_swapgs), swapgs)
903
904 /*
905  * Note: swapgs is very special, and in practise is either going to be
906  * implemented with a single "swapgs" instruction or something very
907  * special.  Either way, we don't need to save any registers for
908  * it.
909  */
910 #define SWAPGS                                                          \
911         PARA_SITE(PARA_PATCH(PV_CPU_swapgs),                            \
912                   ANNOTATE_RETPOLINE_SAFE;                              \
913                   call PARA_INDIRECT(pv_ops+PV_CPU_swapgs);             \
914                  )
915
916 #define USERGS_SYSRET64                                                 \
917         PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64),                   \
918                   ANNOTATE_RETPOLINE_SAFE;                              \
919                   jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
920
921 #ifdef CONFIG_DEBUG_ENTRY
922 #define SAVE_FLAGS(clobbers)                                        \
923         PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),                       \
924                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);        \
925                   ANNOTATE_RETPOLINE_SAFE;                          \
926                   call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);        \
927                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
928 #endif
929 #endif /* CONFIG_PARAVIRT_XXL */
930 #endif  /* CONFIG_X86_64 */
931
932 #ifdef CONFIG_PARAVIRT_XXL
933
934 #define GET_CR2_INTO_AX                                                 \
935         PARA_SITE(PARA_PATCH(PV_MMU_read_cr2),                          \
936                   ANNOTATE_RETPOLINE_SAFE;                              \
937                   call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2);           \
938                  )
939
940 #endif /* CONFIG_PARAVIRT_XXL */
941
942
943 #endif /* __ASSEMBLY__ */
944 #else  /* CONFIG_PARAVIRT */
945 # define default_banner x86_init_noop
946 #endif /* !CONFIG_PARAVIRT */
947
948 #ifndef __ASSEMBLY__
949 #ifndef CONFIG_PARAVIRT_XXL
950 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
951                                           struct mm_struct *mm)
952 {
953 }
954 #endif
955
956 #ifndef CONFIG_PARAVIRT
957 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
958 {
959 }
960 #endif
961 #endif /* __ASSEMBLY__ */
962 #endif /* _ASM_X86_PARAVIRT_H */