Commit | Line | Data |
---|---|---|
6487673b | 1 | /* |
edcb5cf8 | 2 | * Asm versions of Xen pv-ops, suitable for direct use. |
130ace11 TH |
3 | * |
4 | * We only bother with direct forms (ie, vcpu in pda) of the | |
edcb5cf8 | 5 | * operations here; the indirect forms are better handled in C. |
6487673b JF |
6 | */ |
7 | ||
6487673b | 8 | #include <asm/thread_info.h> |
6487673b | 9 | #include <asm/processor-flags.h> |
9ec2b804 | 10 | #include <asm/segment.h> |
8f6380b9 | 11 | #include <asm/asm.h> |
9ec2b804 JF |
12 | |
13 | #include <xen/interface/xen.h> | |
6487673b | 14 | |
edcb5cf8 | 15 | #include <linux/linkage.h> |
6487673b | 16 | |
edcb5cf8 JG |
17 | /* Pseudo-flag used for virtual NMI, which we don't implement yet */ |
18 | #define XEN_EFLAGS_NMI 0x80000000 | |
6487673b | 19 | |
9ec2b804 | 20 | /* |
130ace11 TH |
21 | * This is run where a normal iret would be run, with the same stack setup: |
22 | * 8: eflags | |
23 | * 4: cs | |
24 | * esp-> 0: eip | |
25 | * | |
26 | * This attempts to make sure that any pending events are dealt with | |
27 | * on return to usermode, but there is a small window in which an | |
28 | * event can happen just before entering usermode. If the nested | |
29 | * interrupt ends up setting one of the TIF_WORK_MASK pending work | |
30 | * flags, they will not be tested again before returning to | |
31 | * usermode. This means that a process can end up with pending work, | |
32 | * which will be unprocessed until the process enters and leaves the | |
33 | * kernel again, which could be an unbounded amount of time. This | |
34 | * means that a pending signal or reschedule event could be | |
35 | * indefinitely delayed. | |
36 | * | |
37 | * The fix is to notice a nested interrupt in the critical window, and | |
38 | * if one occurs, then fold the nested interrupt into the current | |
39 | * interrupt stack frame, and re-process it iteratively rather than | |
40 | * recursively. This means that it will exit via the normal path, and | |
41 | * all pending work will be dealt with appropriately. | |
42 | * | |
43 | * Because the nested interrupt handler needs to deal with the current | |
44 | * stack state in whatever form its in, we keep things simple by only | |
45 | * using a single register which is pushed/popped on the stack. | |
9ec2b804 | 46 | */ |
4461bbc0 BO |
47 | |
48 | .macro POP_FS | |
49 | 1: | |
50 | popw %fs | |
51 | .pushsection .fixup, "ax" | |
52 | 2: movw $0, (%esp) | |
53 | jmp 1b | |
54 | .popsection | |
55 | _ASM_EXTABLE(1b,2b) | |
56 | .endm | |
57 | ||
81e103f1 | 58 | ENTRY(xen_iret) |
9ec2b804 JF |
59 | /* test eflags for special cases */ |
60 | testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) | |
61 | jnz hyper_iret | |
62 | ||
63 | push %eax | |
64 | ESP_OFFSET=4 # bytes pushed onto stack | |
65 | ||
4461bbc0 | 66 | /* Store vcpu_info pointer for easy access */ |
9ec2b804 | 67 | #ifdef CONFIG_SMP |
4461bbc0 BO |
68 | pushw %fs |
69 | movl $(__KERNEL_PERCPU), %eax | |
70 | movl %eax, %fs | |
71 | movl %fs:xen_vcpu, %eax | |
72 | POP_FS | |
9ec2b804 | 73 | #else |
13d2b4d1 | 74 | movl %ss:xen_vcpu, %eax |
9ec2b804 JF |
75 | #endif |
76 | ||
77 | /* check IF state we're restoring */ | |
78 | testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp) | |
79 | ||
130ace11 TH |
80 | /* |
81 | * Maybe enable events. Once this happens we could get a | |
82 | * recursive event, so the critical region starts immediately | |
83 | * afterwards. However, if that happens we don't end up | |
84 | * resuming the code, so we don't have to be worried about | |
85 | * being preempted to another CPU. | |
86 | */ | |
13d2b4d1 | 87 | setz %ss:XEN_vcpu_info_mask(%eax) |
9ec2b804 JF |
88 | xen_iret_start_crit: |
89 | ||
90 | /* check for unmasked and pending */ | |
13d2b4d1 | 91 | cmpw $0x0001, %ss:XEN_vcpu_info_pending(%eax) |
9ec2b804 | 92 | |
130ace11 TH |
93 | /* |
94 | * If there's something pending, mask events again so we can | |
d198d499 IM |
95 | * jump back into xen_hypervisor_callback. Otherwise do not |
96 | * touch XEN_vcpu_info_mask. | |
130ace11 | 97 | */ |
d198d499 | 98 | jne 1f |
13d2b4d1 | 99 | movb $1, %ss:XEN_vcpu_info_mask(%eax) |
9ec2b804 | 100 | |
d198d499 | 101 | 1: popl %eax |
9ec2b804 | 102 | |
130ace11 TH |
103 | /* |
104 | * From this point on the registers are restored and the stack | |
105 | * updated, so we don't need to worry about it if we're | |
106 | * preempted | |
107 | */ | |
9ec2b804 JF |
108 | iret_restore_end: |
109 | ||
130ace11 TH |
110 | /* |
111 | * Jump to hypervisor_callback after fixing up the stack. | |
112 | * Events are masked, so jumping out of the critical region is | |
113 | * OK. | |
114 | */ | |
9ec2b804 JF |
115 | je xen_hypervisor_callback |
116 | ||
90e9f536 | 117 | 1: iret |
9ec2b804 | 118 | xen_iret_end_crit: |
8f6380b9 | 119 | _ASM_EXTABLE(1b, iret_exc) |
9ec2b804 JF |
120 | |
121 | hyper_iret: | |
122 | /* put this out of line since its very rarely used */ | |
123 | jmp hypercall_page + __HYPERVISOR_iret * 32 | |
124 | ||
125 | .globl xen_iret_start_crit, xen_iret_end_crit | |
126 | ||
127 | /* | |
130ace11 TH |
128 | * This is called by xen_hypervisor_callback in entry.S when it sees |
129 | * that the EIP at the time of interrupt was between | |
130 | * xen_iret_start_crit and xen_iret_end_crit. We're passed the EIP in | |
131 | * %eax so we can do a more refined determination of what to do. | |
132 | * | |
133 | * The stack format at this point is: | |
134 | * ---------------- | |
135 | * ss : (ss/esp may be present if we came from usermode) | |
136 | * esp : | |
137 | * eflags } outer exception info | |
138 | * cs } | |
139 | * eip } | |
140 | * ---------------- <- edi (copy dest) | |
141 | * eax : outer eax if it hasn't been restored | |
142 | * ---------------- | |
143 | * eflags } nested exception info | |
144 | * cs } (no ss/esp because we're nested | |
145 | * eip } from the same ring) | |
146 | * orig_eax }<- esi (copy src) | |
147 | * - - - - - - - - | |
148 | * fs } | |
149 | * es } | |
150 | * ds } SAVE_ALL state | |
151 | * eax } | |
152 | * : : | |
153 | * ebx }<- esp | |
154 | * ---------------- | |
155 | * | |
156 | * In order to deliver the nested exception properly, we need to shift | |
157 | * everything from the return addr up to the error code so it sits | |
158 | * just under the outer exception info. This means that when we | |
159 | * handle the exception, we do it in the context of the outer | |
160 | * exception rather than starting a new one. | |
161 | * | |
162 | * The only caveat is that if the outer eax hasn't been restored yet | |
163 | * (ie, it's still on stack), we need to insert its value into the | |
164 | * SAVE_ALL state before going on, since it's usermode state which we | |
165 | * eventually need to restore. | |
9ec2b804 JF |
166 | */ |
167 | ENTRY(xen_iret_crit_fixup) | |
9ec2b804 | 168 | /* |
130ace11 TH |
169 | * Paranoia: Make sure we're really coming from kernel space. |
170 | * One could imagine a case where userspace jumps into the | |
171 | * critical range address, but just before the CPU delivers a | |
172 | * GP, it decides to deliver an interrupt instead. Unlikely? | |
173 | * Definitely. Easy to avoid? Yes. The Intel documents | |
174 | * explicitly say that the reported EIP for a bad jump is the | |
175 | * jump instruction itself, not the destination, but some | |
176 | * virtual environments get this wrong. | |
9ec2b804 | 177 | */ |
0f2c8769 | 178 | movl PT_CS(%esp), %ecx |
9ec2b804 JF |
179 | andl $SEGMENT_RPL_MASK, %ecx |
180 | cmpl $USER_RPL, %ecx | |
181 | je 2f | |
182 | ||
0f2c8769 JF |
183 | lea PT_ORIG_EAX(%esp), %esi |
184 | lea PT_EFLAGS(%esp), %edi | |
9ec2b804 | 185 | |
130ace11 TH |
186 | /* |
187 | * If eip is before iret_restore_end then stack | |
188 | * hasn't been restored yet. | |
189 | */ | |
9ec2b804 JF |
190 | cmp $iret_restore_end, %eax |
191 | jae 1f | |
192 | ||
130ace11 | 193 | movl 0+4(%edi), %eax /* copy EAX (just above top of frame) */ |
0f2c8769 | 194 | movl %eax, PT_EAX(%esp) |
9ec2b804 | 195 | |
130ace11 | 196 | lea ESP_OFFSET(%edi), %edi /* move dest up over saved regs */ |
9ec2b804 JF |
197 | |
198 | /* set up the copy */ | |
199 | 1: std | |
0f2c8769 | 200 | mov $PT_EIP / 4, %ecx /* saved regs up to orig_eax */ |
9ec2b804 JF |
201 | rep movsl |
202 | cld | |
203 | ||
130ace11 | 204 | lea 4(%edi), %esp /* point esp to new frame */ |
0f2c8769 | 205 | 2: jmp xen_do_upcall |
6487673b | 206 |