Commit | Line | Data |
---|---|---|
5393744b | 1 | /* |
130ace11 TH |
2 | * Asm versions of Xen pv-ops, suitable for either direct use or |
3 | * inlining. The inline versions are the same as the direct-use | |
4 | * versions, with the pre- and post-amble chopped off. | |
5 | * | |
6 | * This code is encoded for size rather than absolute efficiency, with | |
7 | * a view to being able to inline as much as possible. | |
8 | * | |
9 | * We only bother with direct forms (ie, vcpu in percpu data) of the | |
10 | * operations here; the indirect forms are better handled in C, since | |
11 | * they're generally too large to inline anyway. | |
5393744b JF |
12 | */ |
13 | ||
14 | #include <asm/asm-offsets.h> | |
15 | #include <asm/percpu.h> | |
16 | #include <asm/processor-flags.h> | |
8be0eb7e | 17 | #include <asm/frame.h> |
5393744b JF |
18 | |
19 | #include "xen-asm.h" | |
20 | ||
21 | /* | |
130ace11 TH |
22 | * Enable events. This clears the event mask and tests the pending |
23 | * event status with one and operation. If there are pending events, | |
24 | * then enter the hypervisor to get them handled. | |
5393744b JF |
25 | */ |
26 | ENTRY(xen_irq_enable_direct) | |
8be0eb7e | 27 | FRAME_BEGIN |
5393744b JF |
28 | /* Unmask events */ |
29 | movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | |
30 | ||
130ace11 TH |
31 | /* |
32 | * Preempt here doesn't matter because that will deal with any | |
33 | * pending interrupts. The pending check may end up being run | |
34 | * on the wrong CPU, but that doesn't hurt. | |
35 | */ | |
5393744b JF |
36 | |
37 | /* Test for pending */ | |
38 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending | |
39 | jz 1f | |
40 | ||
41 | 2: call check_events | |
42 | 1: | |
43 | ENDPATCH(xen_irq_enable_direct) | |
8be0eb7e | 44 | FRAME_END |
5393744b JF |
45 | ret |
46 | ENDPROC(xen_irq_enable_direct) | |
47 | RELOC(xen_irq_enable_direct, 2b+1) | |
48 | ||
49 | ||
50 | /* | |
130ace11 TH |
51 | * Disabling events is simply a matter of making the event mask |
52 | * non-zero. | |
5393744b JF |
53 | */ |
54 | ENTRY(xen_irq_disable_direct) | |
55 | movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | |
56 | ENDPATCH(xen_irq_disable_direct) | |
57 | ret | |
58 | ENDPROC(xen_irq_disable_direct) | |
59 | RELOC(xen_irq_disable_direct, 0) | |
60 | ||
61 | /* | |
130ace11 TH |
62 | * (xen_)save_fl is used to get the current interrupt enable status. |
63 | * Callers expect the status to be in X86_EFLAGS_IF, and other bits | |
64 | * may be set in the return value. We take advantage of this by | |
65 | * making sure that X86_EFLAGS_IF has the right value (and other bits | |
66 | * in that byte are 0), but other bits in the return value are | |
67 | * undefined. We need to toggle the state of the bit, because Xen and | |
68 | * x86 use opposite senses (mask vs enable). | |
5393744b JF |
69 | */ |
70 | ENTRY(xen_save_fl_direct) | |
71 | testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | |
72 | setz %ah | |
130ace11 | 73 | addb %ah, %ah |
5393744b JF |
74 | ENDPATCH(xen_save_fl_direct) |
75 | ret | |
76 | ENDPROC(xen_save_fl_direct) | |
77 | RELOC(xen_save_fl_direct, 0) | |
78 | ||
79 | ||
80 | /* | |
130ace11 TH |
81 | * In principle the caller should be passing us a value return from |
82 | * xen_save_fl_direct, but for robustness sake we test only the | |
83 | * X86_EFLAGS_IF flag rather than the whole byte. After setting the | |
84 | * interrupt mask state, it checks for unmasked pending events and | |
85 | * enters the hypervisor to get them delivered if so. | |
5393744b JF |
86 | */ |
87 | ENTRY(xen_restore_fl_direct) | |
8be0eb7e | 88 | FRAME_BEGIN |
5393744b JF |
89 | #ifdef CONFIG_X86_64 |
90 | testw $X86_EFLAGS_IF, %di | |
91 | #else | |
92 | testb $X86_EFLAGS_IF>>8, %ah | |
93 | #endif | |
94 | setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask | |
130ace11 TH |
95 | /* |
96 | * Preempt here doesn't matter because that will deal with any | |
97 | * pending interrupts. The pending check may end up being run | |
98 | * on the wrong CPU, but that doesn't hurt. | |
99 | */ | |
5393744b JF |
100 | |
101 | /* check for unmasked and pending */ | |
102 | cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending | |
7eb7ce4d | 103 | jnz 1f |
5393744b JF |
104 | 2: call check_events |
105 | 1: | |
106 | ENDPATCH(xen_restore_fl_direct) | |
8be0eb7e | 107 | FRAME_END |
5393744b JF |
108 | ret |
109 | ENDPROC(xen_restore_fl_direct) | |
110 | RELOC(xen_restore_fl_direct, 2b+1) | |
111 | ||
112 | ||
113 | /* | |
130ace11 TH |
114 | * Force an event check by making a hypercall, but preserve regs |
115 | * before making the call. | |
5393744b | 116 | */ |
8be0eb7e JP |
117 | ENTRY(check_events) |
118 | FRAME_BEGIN | |
5393744b JF |
119 | #ifdef CONFIG_X86_32 |
120 | push %eax | |
121 | push %ecx | |
122 | push %edx | |
123 | call xen_force_evtchn_callback | |
124 | pop %edx | |
125 | pop %ecx | |
126 | pop %eax | |
127 | #else | |
128 | push %rax | |
129 | push %rcx | |
130 | push %rdx | |
131 | push %rsi | |
132 | push %rdi | |
133 | push %r8 | |
134 | push %r9 | |
135 | push %r10 | |
136 | push %r11 | |
137 | call xen_force_evtchn_callback | |
138 | pop %r11 | |
139 | pop %r10 | |
140 | pop %r9 | |
141 | pop %r8 | |
142 | pop %rdi | |
143 | pop %rsi | |
144 | pop %rdx | |
145 | pop %rcx | |
146 | pop %rax | |
147 | #endif | |
8be0eb7e | 148 | FRAME_END |
5393744b | 149 | ret |
8be0eb7e | 150 | ENDPROC(check_events) |