Commit | Line | Data |
---|---|---|
625efab1 JS |
1 | /* |
2 | * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation. | |
3 | * Copyright (C) 2007, Jes Sorensen <jes@sgi.com> SGI. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
13 | * NON INFRINGEMENT. See the GNU General Public License for more | |
14 | * details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
19 | */ | |
2e04ef76 RR |
20 | /*P:450 |
21 | * This file contains the x86-specific lguest code. It used to be all | |
a6bd8e13 RR |
22 | * mixed in with drivers/lguest/core.c but several foolhardy code slashers |
23 | * wrestled most of the dependencies out to here in preparation for porting | |
24 | * lguest to other architectures (see what I mean by foolhardy?). | |
25 | * | |
26 | * This also contains a couple of non-obvious setup and teardown pieces which | |
2e04ef76 RR |
27 | * were implemented after days of debugging pain. |
28 | :*/ | |
625efab1 JS |
29 | #include <linux/kernel.h> |
30 | #include <linux/start_kernel.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/console.h> | |
33 | #include <linux/screen_info.h> | |
34 | #include <linux/irq.h> | |
35 | #include <linux/interrupt.h> | |
36 | #include <linux/clocksource.h> | |
37 | #include <linux/clockchips.h> | |
38 | #include <linux/cpu.h> | |
39 | #include <linux/lguest.h> | |
40 | #include <linux/lguest_launcher.h> | |
625efab1 JS |
41 | #include <asm/paravirt.h> |
42 | #include <asm/param.h> | |
43 | #include <asm/page.h> | |
44 | #include <asm/pgtable.h> | |
45 | #include <asm/desc.h> | |
46 | #include <asm/setup.h> | |
47 | #include <asm/lguest.h> | |
48 | #include <asm/uaccess.h> | |
49 | #include <asm/i387.h> | |
50 | #include "../lg.h" | |
51 | ||
52 | static int cpu_had_pge; | |
53 | ||
54 | static struct { | |
55 | unsigned long offset; | |
56 | unsigned short segment; | |
57 | } lguest_entry; | |
58 | ||
59 | /* Offset from where switcher.S was compiled to where we've copied it */ | |
60 | static unsigned long switcher_offset(void) | |
61 | { | |
62 | return SWITCHER_ADDR - (unsigned long)start_switcher_text; | |
63 | } | |
64 | ||
65 | /* This cpu's struct lguest_pages. */ | |
66 | static struct lguest_pages *lguest_pages(unsigned int cpu) | |
67 | { | |
68 | return &(((struct lguest_pages *) | |
69 | (SWITCHER_ADDR + SHARED_SWITCHER_PAGES*PAGE_SIZE))[cpu]); | |
70 | } | |
71 | ||
c40a9f47 | 72 | static DEFINE_PER_CPU(struct lg_cpu *, last_cpu); |
625efab1 JS |
73 | |
74 | /*S:010 | |
e1e72965 | 75 | * We approach the Switcher. |
625efab1 JS |
76 | * |
77 | * Remember that each CPU has two pages which are visible to the Guest when it | |
78 | * runs on that CPU. This has to contain the state for that Guest: we copy the | |
79 | * state in just before we run the Guest. | |
80 | * | |
81 | * Each Guest has "changed" flags which indicate what has changed in the Guest | |
82 | * since it last ran. We saw this set in interrupts_and_traps.c and | |
83 | * segments.c. | |
84 | */ | |
d0953d42 | 85 | static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) |
625efab1 | 86 | { |
2e04ef76 RR |
87 | /* |
88 | * Copying all this data can be quite expensive. We usually run the | |
625efab1 JS |
89 | * same Guest we ran last time (and that Guest hasn't run anywhere else |
90 | * meanwhile). If that's not the case, we pretend everything in the | |
2e04ef76 RR |
91 | * Guest has changed. |
92 | */ | |
f34f8c5f | 93 | if (__get_cpu_var(last_cpu) != cpu || cpu->last_pages != pages) { |
c40a9f47 | 94 | __get_cpu_var(last_cpu) = cpu; |
f34f8c5f | 95 | cpu->last_pages = pages; |
ae3749dc | 96 | cpu->changed = CHANGED_ALL; |
625efab1 JS |
97 | } |
98 | ||
2e04ef76 RR |
99 | /* |
100 | * These copies are pretty cheap, so we do them unconditionally: */ | |
101 | /* Save the current Host top-level page directory. | |
102 | */ | |
625efab1 | 103 | pages->state.host_cr3 = __pa(current->mm->pgd); |
2e04ef76 RR |
104 | /* |
105 | * Set up the Guest's page tables to see this CPU's pages (and no | |
106 | * other CPU's pages). | |
107 | */ | |
0c78441c | 108 | map_switcher_in_guest(cpu, pages); |
2e04ef76 RR |
109 | /* |
110 | * Set up the two "TSS" members which tell the CPU what stack to use | |
625efab1 | 111 | * for traps which do directly into the Guest (ie. traps at privilege |
2e04ef76 RR |
112 | * level 1). |
113 | */ | |
e95035c6 | 114 | pages->state.guest_tss.sp1 = cpu->esp1; |
4665ac8e | 115 | pages->state.guest_tss.ss1 = cpu->ss1; |
625efab1 JS |
116 | |
117 | /* Copy direct-to-Guest trap entries. */ | |
ae3749dc | 118 | if (cpu->changed & CHANGED_IDT) |
fc708b3e | 119 | copy_traps(cpu, pages->state.guest_idt, default_idt_entries); |
625efab1 JS |
120 | |
121 | /* Copy all GDT entries which the Guest can change. */ | |
ae3749dc | 122 | if (cpu->changed & CHANGED_GDT) |
fc708b3e | 123 | copy_gdt(cpu, pages->state.guest_gdt); |
625efab1 | 124 | /* If only the TLS entries have changed, copy them. */ |
ae3749dc | 125 | else if (cpu->changed & CHANGED_GDT_TLS) |
fc708b3e | 126 | copy_gdt_tls(cpu, pages->state.guest_gdt); |
625efab1 JS |
127 | |
128 | /* Mark the Guest as unchanged for next time. */ | |
ae3749dc | 129 | cpu->changed = 0; |
625efab1 JS |
130 | } |
131 | ||
132 | /* Finally: the code to actually call into the Switcher to run the Guest. */ | |
d0953d42 | 133 | static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages) |
625efab1 JS |
134 | { |
135 | /* This is a dummy value we need for GCC's sake. */ | |
136 | unsigned int clobber; | |
137 | ||
2e04ef76 RR |
138 | /* |
139 | * Copy the guest-specific information into this CPU's "struct | |
140 | * lguest_pages". | |
141 | */ | |
d0953d42 | 142 | copy_in_guest_info(cpu, pages); |
625efab1 | 143 | |
2e04ef76 RR |
144 | /* |
145 | * Set the trap number to 256 (impossible value). If we fault while | |
625efab1 | 146 | * switching to the Guest (bad segment registers or bug), this will |
2e04ef76 RR |
147 | * cause us to abort the Guest. |
148 | */ | |
a53a35a8 | 149 | cpu->regs->trapnum = 256; |
625efab1 | 150 | |
2e04ef76 RR |
151 | /* |
152 | * Now: we push the "eflags" register on the stack, then do an "lcall". | |
625efab1 JS |
153 | * This is how we change from using the kernel code segment to using |
154 | * the dedicated lguest code segment, as well as jumping into the | |
155 | * Switcher. | |
156 | * | |
157 | * The lcall also pushes the old code segment (KERNEL_CS) onto the | |
158 | * stack, then the address of this call. This stack layout happens to | |
2e04ef76 RR |
159 | * exactly match the stack layout created by an interrupt... |
160 | */ | |
625efab1 | 161 | asm volatile("pushf; lcall *lguest_entry" |
2e04ef76 RR |
162 | /* |
163 | * This is how we tell GCC that %eax ("a") and %ebx ("b") | |
164 | * are changed by this routine. The "=" means output. | |
165 | */ | |
625efab1 | 166 | : "=a"(clobber), "=b"(clobber) |
2e04ef76 RR |
167 | /* |
168 | * %eax contains the pages pointer. ("0" refers to the | |
625efab1 JS |
169 | * 0-th argument above, ie "a"). %ebx contains the |
170 | * physical address of the Guest's top-level page | |
2e04ef76 RR |
171 | * directory. |
172 | */ | |
382ac6b3 | 173 | : "0"(pages), "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)) |
2e04ef76 RR |
174 | /* |
175 | * We tell gcc that all these registers could change, | |
625efab1 | 176 | * which means we don't have to save and restore them in |
2e04ef76 RR |
177 | * the Switcher. |
178 | */ | |
625efab1 JS |
179 | : "memory", "%edx", "%ecx", "%edi", "%esi"); |
180 | } | |
181 | /*:*/ | |
182 | ||
2e04ef76 RR |
183 | /*M:002 |
184 | * There are hooks in the scheduler which we can register to tell when we | |
e1e72965 RR |
185 | * get kicked off the CPU (preempt_notifier_register()). This would allow us |
186 | * to lazily disable SYSENTER which would regain some performance, and should | |
187 | * also simplify copy_in_guest_info(). Note that we'd still need to restore | |
188 | * things when we exit to Launcher userspace, but that's fairly easy. | |
189 | * | |
a91d74a3 | 190 | * We could also try using these hooks for PGE, but that might be too expensive. |
a6bd8e13 | 191 | * |
2e04ef76 RR |
192 | * The hooks were designed for KVM, but we can also put them to good use. |
193 | :*/ | |
e1e72965 | 194 | |
2e04ef76 RR |
195 | /*H:040 |
196 | * This is the i386-specific code to setup and run the Guest. Interrupts | |
197 | * are disabled: we own the CPU. | |
198 | */ | |
d0953d42 | 199 | void lguest_arch_run_guest(struct lg_cpu *cpu) |
625efab1 | 200 | { |
2e04ef76 RR |
201 | /* |
202 | * Remember the awfully-named TS bit? If the Guest has asked to set it | |
e1e72965 | 203 | * we set it now, so we can trap and pass that trap to the Guest if it |
2e04ef76 RR |
204 | * uses the FPU. |
205 | */ | |
4665ac8e | 206 | if (cpu->ts) |
54481cf8 | 207 | unlazy_fpu(current); |
625efab1 | 208 | |
2e04ef76 RR |
209 | /* |
210 | * SYSENTER is an optimized way of doing system calls. We can't allow | |
e1e72965 RR |
211 | * it because it always jumps to privilege level 0. A normal Guest |
212 | * won't try it because we don't advertise it in CPUID, but a malicious | |
213 | * Guest (or malicious Guest userspace program) could, so we tell the | |
2e04ef76 RR |
214 | * CPU to disable it before running the Guest. |
215 | */ | |
625efab1 JS |
216 | if (boot_cpu_has(X86_FEATURE_SEP)) |
217 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); | |
218 | ||
2e04ef76 RR |
219 | /* |
220 | * Now we actually run the Guest. It will return when something | |
e1e72965 | 221 | * interesting happens, and we can examine its registers to see what it |
2e04ef76 RR |
222 | * was doing. |
223 | */ | |
d0953d42 | 224 | run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); |
625efab1 | 225 | |
2e04ef76 RR |
226 | /* |
227 | * Note that the "regs" structure contains two extra entries which are | |
e1e72965 RR |
228 | * not really registers: a trap number which says what interrupt or |
229 | * trap made the switcher code come back, and an error code which some | |
2e04ef76 RR |
230 | * traps set. |
231 | */ | |
625efab1 | 232 | |
54481cf8 SS |
233 | /* Restore SYSENTER if it's supposed to be on. */ |
234 | if (boot_cpu_has(X86_FEATURE_SEP)) | |
235 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | |
236 | ||
2e04ef76 RR |
237 | /* |
238 | * If the Guest page faulted, then the cr2 register will tell us the | |
e1e72965 RR |
239 | * bad virtual address. We have to grab this now, because once we |
240 | * re-enable interrupts an interrupt could fault and thus overwrite | |
2e04ef76 RR |
241 | * cr2, or we could even move off to a different CPU. |
242 | */ | |
a53a35a8 | 243 | if (cpu->regs->trapnum == 14) |
fc708b3e | 244 | cpu->arch.last_pagefault = read_cr2(); |
2e04ef76 RR |
245 | /* |
246 | * Similarly, if we took a trap because the Guest used the FPU, | |
54481cf8 SS |
247 | * we have to restore the FPU it expects to see. |
248 | * math_state_restore() may sleep and we may even move off to | |
249 | * a different CPU. So all the critical stuff should be done | |
2e04ef76 RR |
250 | * before this. |
251 | */ | |
a53a35a8 | 252 | else if (cpu->regs->trapnum == 7) |
625efab1 | 253 | math_state_restore(); |
625efab1 JS |
254 | } |
255 | ||
2e04ef76 RR |
256 | /*H:130 |
257 | * Now we've examined the hypercall code; our Guest can make requests. | |
e1e72965 RR |
258 | * Our Guest is usually so well behaved; it never tries to do things it isn't |
259 | * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual | |
260 | * infrastructure isn't quite complete, because it doesn't contain replacements | |
261 | * for the Intel I/O instructions. As a result, the Guest sometimes fumbles | |
262 | * across one during the boot process as it probes for various things which are | |
263 | * usually attached to a PC. | |
625efab1 | 264 | * |
e1e72965 | 265 | * When the Guest uses one of these instructions, we get a trap (General |
625efab1 | 266 | * Protection Fault) and come here. We see if it's one of those troublesome |
2e04ef76 RR |
267 | * instructions and skip over it. We return true if we did. |
268 | */ | |
a3863f68 | 269 | static int emulate_insn(struct lg_cpu *cpu) |
625efab1 JS |
270 | { |
271 | u8 insn; | |
272 | unsigned int insnlen = 0, in = 0, shift = 0; | |
2e04ef76 RR |
273 | /* |
274 | * The eip contains the *virtual* address of the Guest's instruction: | |
275 | * guest_pa just subtracts the Guest's page_offset. | |
276 | */ | |
1713608f | 277 | unsigned long physaddr = guest_pa(cpu, cpu->regs->eip); |
625efab1 | 278 | |
2e04ef76 RR |
279 | /* |
280 | * This must be the Guest kernel trying to do something, not userspace! | |
47436aa4 | 281 | * The bottom two bits of the CS segment register are the privilege |
2e04ef76 RR |
282 | * level. |
283 | */ | |
a53a35a8 | 284 | if ((cpu->regs->cs & 3) != GUEST_PL) |
625efab1 JS |
285 | return 0; |
286 | ||
287 | /* Decoding x86 instructions is icky. */ | |
382ac6b3 | 288 | insn = lgread(cpu, physaddr, u8); |
625efab1 | 289 | |
2e04ef76 RR |
290 | /* |
291 | * 0x66 is an "operand prefix". It means it's using the upper 16 bits | |
292 | * of the eax register. | |
293 | */ | |
625efab1 JS |
294 | if (insn == 0x66) { |
295 | shift = 16; | |
296 | /* The instruction is 1 byte so far, read the next byte. */ | |
297 | insnlen = 1; | |
382ac6b3 | 298 | insn = lgread(cpu, physaddr + insnlen, u8); |
625efab1 JS |
299 | } |
300 | ||
2e04ef76 RR |
301 | /* |
302 | * We can ignore the lower bit for the moment and decode the 4 opcodes | |
303 | * we need to emulate. | |
304 | */ | |
625efab1 JS |
305 | switch (insn & 0xFE) { |
306 | case 0xE4: /* in <next byte>,%al */ | |
307 | insnlen += 2; | |
308 | in = 1; | |
309 | break; | |
310 | case 0xEC: /* in (%dx),%al */ | |
311 | insnlen += 1; | |
312 | in = 1; | |
313 | break; | |
314 | case 0xE6: /* out %al,<next byte> */ | |
315 | insnlen += 2; | |
316 | break; | |
317 | case 0xEE: /* out %al,(%dx) */ | |
318 | insnlen += 1; | |
319 | break; | |
320 | default: | |
321 | /* OK, we don't know what this is, can't emulate. */ | |
322 | return 0; | |
323 | } | |
324 | ||
2e04ef76 RR |
325 | /* |
326 | * If it was an "IN" instruction, they expect the result to be read | |
625efab1 | 327 | * into %eax, so we change %eax. We always return all-ones, which |
2e04ef76 RR |
328 | * traditionally means "there's nothing there". |
329 | */ | |
625efab1 JS |
330 | if (in) { |
331 | /* Lower bit tells is whether it's a 16 or 32 bit access */ | |
332 | if (insn & 0x1) | |
a53a35a8 | 333 | cpu->regs->eax = 0xFFFFFFFF; |
625efab1 | 334 | else |
a53a35a8 | 335 | cpu->regs->eax |= (0xFFFF << shift); |
625efab1 JS |
336 | } |
337 | /* Finally, we've "done" the instruction, so move past it. */ | |
a53a35a8 | 338 | cpu->regs->eip += insnlen; |
625efab1 JS |
339 | /* Success! */ |
340 | return 1; | |
341 | } | |
342 | ||
2e04ef76 RR |
343 | /* |
344 | * Our hypercalls mechanism used to be based on direct software interrupts. | |
4cd8b5e2 MZ |
345 | * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to |
346 | * change over to using kvm hypercalls. | |
347 | * | |
348 | * KVM_HYPERCALL is actually a "vmcall" instruction, which generates an invalid | |
349 | * opcode fault (fault 6) on non-VT cpus, so the easiest solution seemed to be | |
350 | * an *emulation approach*: if the fault was really produced by an hypercall | |
351 | * (is_hypercall() does exactly this check), we can just call the corresponding | |
352 | * hypercall host implementation function. | |
353 | * | |
354 | * But these invalid opcode faults are notably slower than software interrupts. | |
355 | * So we implemented the *patching (or rewriting) approach*: every time we hit | |
356 | * the KVM_HYPERCALL opcode in Guest code, we patch it to the old "int 0x1f" | |
357 | * opcode, so next time the Guest calls this hypercall it will use the | |
358 | * faster trap mechanism. | |
359 | * | |
360 | * Matias even benchmarked it to convince you: this shows the average cycle | |
361 | * cost of a hypercall. For each alternative solution mentioned above we've | |
362 | * made 5 runs of the benchmark: | |
363 | * | |
364 | * 1) direct software interrupt: 2915, 2789, 2764, 2721, 2898 | |
365 | * 2) emulation technique: 3410, 3681, 3466, 3392, 3780 | |
366 | * 3) patching (rewrite) technique: 2977, 2975, 2891, 2637, 2884 | |
367 | * | |
368 | * One two-line function is worth a 20% hypercall speed boost! | |
369 | */ | |
370 | static void rewrite_hypercall(struct lg_cpu *cpu) | |
371 | { | |
2e04ef76 RR |
372 | /* |
373 | * This are the opcodes we use to patch the Guest. The opcode for "int | |
4cd8b5e2 | 374 | * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we |
2e04ef76 RR |
375 | * complete the sequence with a NOP (0x90). |
376 | */ | |
4cd8b5e2 MZ |
377 | u8 insn[3] = {0xcd, 0x1f, 0x90}; |
378 | ||
379 | __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn)); | |
2e04ef76 RR |
380 | /* |
381 | * The above write might have caused a copy of that page to be made | |
88df781a MZ |
382 | * (if it was read-only). We need to make sure the Guest has |
383 | * up-to-date pagetables. As this doesn't happen often, we can just | |
2e04ef76 RR |
384 | * drop them all. |
385 | */ | |
88df781a | 386 | guest_pagetable_clear_all(cpu); |
4cd8b5e2 MZ |
387 | } |
388 | ||
389 | static bool is_hypercall(struct lg_cpu *cpu) | |
390 | { | |
391 | u8 insn[3]; | |
392 | ||
2e04ef76 RR |
393 | /* |
394 | * This must be the Guest kernel trying to do something. | |
4cd8b5e2 | 395 | * The bottom two bits of the CS segment register are the privilege |
2e04ef76 RR |
396 | * level. |
397 | */ | |
4cd8b5e2 MZ |
398 | if ((cpu->regs->cs & 3) != GUEST_PL) |
399 | return false; | |
400 | ||
401 | /* Is it a vmcall? */ | |
402 | __lgread(cpu, insn, guest_pa(cpu, cpu->regs->eip), sizeof(insn)); | |
403 | return insn[0] == 0x0f && insn[1] == 0x01 && insn[2] == 0xc1; | |
404 | } | |
405 | ||
625efab1 | 406 | /*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ |
73044f05 | 407 | void lguest_arch_handle_trap(struct lg_cpu *cpu) |
625efab1 | 408 | { |
a53a35a8 | 409 | switch (cpu->regs->trapnum) { |
e1e72965 | 410 | case 13: /* We've intercepted a General Protection Fault. */ |
2e04ef76 RR |
411 | /* |
412 | * Check if this was one of those annoying IN or OUT | |
e1e72965 | 413 | * instructions which we need to emulate. If so, we just go |
2e04ef76 RR |
414 | * back into the Guest after we've done it. |
415 | */ | |
a53a35a8 | 416 | if (cpu->regs->errcode == 0) { |
a3863f68 | 417 | if (emulate_insn(cpu)) |
625efab1 JS |
418 | return; |
419 | } | |
2e04ef76 RR |
420 | /* |
421 | * If KVM is active, the vmcall instruction triggers a General | |
422 | * Protection Fault. Normally it triggers an invalid opcode | |
423 | * fault (6): | |
424 | */ | |
56434622 | 425 | case 6: |
2e04ef76 RR |
426 | /* |
427 | * We need to check if ring == GUEST_PL and faulting | |
428 | * instruction == vmcall. | |
429 | */ | |
56434622 RR |
430 | if (is_hypercall(cpu)) { |
431 | rewrite_hypercall(cpu); | |
432 | return; | |
433 | } | |
625efab1 | 434 | break; |
e1e72965 | 435 | case 14: /* We've intercepted a Page Fault. */ |
2e04ef76 RR |
436 | /* |
437 | * The Guest accessed a virtual address that wasn't mapped. | |
a6bd8e13 RR |
438 | * This happens a lot: we don't actually set up most of the page |
439 | * tables for the Guest at all when we start: as it runs it asks | |
440 | * for more and more, and we set them up as required. In this | |
441 | * case, we don't even tell the Guest that the fault happened. | |
e1e72965 RR |
442 | * |
443 | * The errcode tells whether this was a read or a write, and | |
2e04ef76 RR |
444 | * whether kernel or userspace code. |
445 | */ | |
1713608f GOC |
446 | if (demand_page(cpu, cpu->arch.last_pagefault, |
447 | cpu->regs->errcode)) | |
625efab1 JS |
448 | return; |
449 | ||
2e04ef76 RR |
450 | /* |
451 | * OK, it's really not there (or not OK): the Guest needs to | |
e1e72965 RR |
452 | * know. We write out the cr2 value so it knows where the |
453 | * fault occurred. | |
454 | * | |
455 | * Note that if the Guest were really messed up, this could | |
456 | * happen before it's done the LHCALL_LGUEST_INIT hypercall, so | |
2e04ef76 RR |
457 | * lg->lguest_data could be NULL |
458 | */ | |
382ac6b3 GOC |
459 | if (cpu->lg->lguest_data && |
460 | put_user(cpu->arch.last_pagefault, | |
461 | &cpu->lg->lguest_data->cr2)) | |
462 | kill_guest(cpu, "Writing cr2"); | |
625efab1 JS |
463 | break; |
464 | case 7: /* We've intercepted a Device Not Available fault. */ | |
2e04ef76 RR |
465 | /* |
466 | * If the Guest doesn't want to know, we already restored the | |
467 | * Floating Point Unit, so we just continue without telling it. | |
468 | */ | |
4665ac8e | 469 | if (!cpu->ts) |
625efab1 JS |
470 | return; |
471 | break; | |
472 | case 32 ... 255: | |
2e04ef76 RR |
473 | /* |
474 | * These values mean a real interrupt occurred, in which case | |
4cd8b5e2 | 475 | * the Host handler has already been run. We just do a |
cc6d4fbc | 476 | * friendly check if another process should now be run, then |
2e04ef76 RR |
477 | * return to run the Guest again |
478 | */ | |
625efab1 | 479 | cond_resched(); |
cc6d4fbc RR |
480 | return; |
481 | case LGUEST_TRAP_ENTRY: | |
2e04ef76 RR |
482 | /* |
483 | * Our 'struct hcall_args' maps directly over our regs: we set | |
484 | * up the pointer now to indicate a hypercall is pending. | |
485 | */ | |
a53a35a8 | 486 | cpu->hcall = (struct hcall_args *)cpu->regs; |
625efab1 JS |
487 | return; |
488 | } | |
489 | ||
490 | /* We didn't handle the trap, so it needs to go to the Guest. */ | |
a53a35a8 | 491 | if (!deliver_trap(cpu, cpu->regs->trapnum)) |
2e04ef76 RR |
492 | /* |
493 | * If the Guest doesn't have a handler (either it hasn't | |
625efab1 | 494 | * registered any yet, or it's one of the faults we don't let |
2e04ef76 RR |
495 | * it handle), it dies with this cryptic error message. |
496 | */ | |
382ac6b3 | 497 | kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", |
a53a35a8 | 498 | cpu->regs->trapnum, cpu->regs->eip, |
fc708b3e | 499 | cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault |
a53a35a8 | 500 | : cpu->regs->errcode); |
625efab1 JS |
501 | } |
502 | ||
2e04ef76 RR |
503 | /* |
504 | * Now we can look at each of the routines this calls, in increasing order of | |
625efab1 JS |
505 | * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), |
506 | * deliver_trap() and demand_page(). After all those, we'll be ready to | |
507 | * examine the Switcher, and our philosophical understanding of the Host/Guest | |
2e04ef76 RR |
508 | * duality will be complete. |
509 | :*/ | |
625efab1 JS |
510 | static void adjust_pge(void *on) |
511 | { | |
512 | if (on) | |
513 | write_cr4(read_cr4() | X86_CR4_PGE); | |
514 | else | |
515 | write_cr4(read_cr4() & ~X86_CR4_PGE); | |
516 | } | |
517 | ||
2e04ef76 RR |
518 | /*H:020 |
519 | * Now the Switcher is mapped and every thing else is ready, we need to do | |
520 | * some more i386-specific initialization. | |
521 | */ | |
625efab1 JS |
522 | void __init lguest_arch_host_init(void) |
523 | { | |
524 | int i; | |
525 | ||
2e04ef76 RR |
526 | /* |
527 | * Most of the i386/switcher.S doesn't care that it's been moved; on | |
625efab1 JS |
528 | * Intel, jumps are relative, and it doesn't access any references to |
529 | * external code or data. | |
530 | * | |
531 | * The only exception is the interrupt handlers in switcher.S: their | |
532 | * addresses are placed in a table (default_idt_entries), so we need to | |
533 | * update the table with the new addresses. switcher_offset() is a | |
a6bd8e13 | 534 | * convenience function which returns the distance between the |
2e04ef76 RR |
535 | * compiled-in switcher code and the high-mapped copy we just made. |
536 | */ | |
625efab1 JS |
537 | for (i = 0; i < IDT_ENTRIES; i++) |
538 | default_idt_entries[i] += switcher_offset(); | |
539 | ||
540 | /* | |
541 | * Set up the Switcher's per-cpu areas. | |
542 | * | |
543 | * Each CPU gets two pages of its own within the high-mapped region | |
544 | * (aka. "struct lguest_pages"). Much of this can be initialized now, | |
545 | * but some depends on what Guest we are running (which is set up in | |
546 | * copy_in_guest_info()). | |
547 | */ | |
548 | for_each_possible_cpu(i) { | |
549 | /* lguest_pages() returns this CPU's two pages. */ | |
550 | struct lguest_pages *pages = lguest_pages(i); | |
2e04ef76 | 551 | /* This is a convenience pointer to make the code neater. */ |
625efab1 JS |
552 | struct lguest_ro_state *state = &pages->state; |
553 | ||
2e04ef76 RR |
554 | /* |
555 | * The Global Descriptor Table: the Host has a different one | |
625efab1 JS |
556 | * for each CPU. We keep a descriptor for the GDT which says |
557 | * where it is and how big it is (the size is actually the last | |
2e04ef76 RR |
558 | * byte, not the size, hence the "-1"). |
559 | */ | |
625efab1 JS |
560 | state->host_gdt_desc.size = GDT_SIZE-1; |
561 | state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); | |
562 | ||
2e04ef76 RR |
563 | /* |
564 | * All CPUs on the Host use the same Interrupt Descriptor | |
625efab1 | 565 | * Table, so we just use store_idt(), which gets this CPU's IDT |
2e04ef76 RR |
566 | * descriptor. |
567 | */ | |
625efab1 JS |
568 | store_idt(&state->host_idt_desc); |
569 | ||
2e04ef76 RR |
570 | /* |
571 | * The descriptors for the Guest's GDT and IDT can be filled | |
625efab1 | 572 | * out now, too. We copy the GDT & IDT into ->guest_gdt and |
2e04ef76 RR |
573 | * ->guest_idt before actually running the Guest. |
574 | */ | |
625efab1 JS |
575 | state->guest_idt_desc.size = sizeof(state->guest_idt)-1; |
576 | state->guest_idt_desc.address = (long)&state->guest_idt; | |
577 | state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; | |
578 | state->guest_gdt_desc.address = (long)&state->guest_gdt; | |
579 | ||
2e04ef76 RR |
580 | /* |
581 | * We know where we want the stack to be when the Guest enters | |
a6bd8e13 | 582 | * the Switcher: in pages->regs. The stack grows upwards, so |
2e04ef76 RR |
583 | * we start it at the end of that structure. |
584 | */ | |
faca6227 | 585 | state->guest_tss.sp0 = (long)(&pages->regs + 1); |
2e04ef76 RR |
586 | /* |
587 | * And this is the GDT entry to use for the stack: we keep a | |
588 | * couple of special LGUEST entries. | |
589 | */ | |
625efab1 JS |
590 | state->guest_tss.ss0 = LGUEST_DS; |
591 | ||
2e04ef76 RR |
592 | /* |
593 | * x86 can have a finegrained bitmap which indicates what I/O | |
625efab1 | 594 | * ports the process can use. We set it to the end of our |
2e04ef76 RR |
595 | * structure, meaning "none". |
596 | */ | |
625efab1 JS |
597 | state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); |
598 | ||
2e04ef76 RR |
599 | /* |
600 | * Some GDT entries are the same across all Guests, so we can | |
601 | * set them up now. | |
602 | */ | |
625efab1 JS |
603 | setup_default_gdt_entries(state); |
604 | /* Most IDT entries are the same for all Guests, too.*/ | |
605 | setup_default_idt_entries(state, default_idt_entries); | |
606 | ||
2e04ef76 RR |
607 | /* |
608 | * The Host needs to be able to use the LGUEST segments on this | |
609 | * CPU, too, so put them in the Host GDT. | |
610 | */ | |
625efab1 JS |
611 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; |
612 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; | |
613 | } | |
614 | ||
2e04ef76 RR |
615 | /* |
616 | * In the Switcher, we want the %cs segment register to use the | |
625efab1 JS |
617 | * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so |
618 | * it will be undisturbed when we switch. To change %cs and jump we | |
2e04ef76 RR |
619 | * need this structure to feed to Intel's "lcall" instruction. |
620 | */ | |
625efab1 JS |
621 | lguest_entry.offset = (long)switch_to_guest + switcher_offset(); |
622 | lguest_entry.segment = LGUEST_CS; | |
623 | ||
2e04ef76 RR |
624 | /* |
625 | * Finally, we need to turn off "Page Global Enable". PGE is an | |
625efab1 JS |
626 | * optimization where page table entries are specially marked to show |
627 | * they never change. The Host kernel marks all the kernel pages this | |
628 | * way because it's always present, even when userspace is running. | |
629 | * | |
630 | * Lguest breaks this: unbeknownst to the rest of the Host kernel, we | |
631 | * switch to the Guest kernel. If you don't disable this on all CPUs, | |
632 | * you'll get really weird bugs that you'll chase for two days. | |
633 | * | |
634 | * I used to turn PGE off every time we switched to the Guest and back | |
2e04ef76 RR |
635 | * on when we return, but that slowed the Switcher down noticibly. |
636 | */ | |
625efab1 | 637 | |
2e04ef76 RR |
638 | /* |
639 | * We don't need the complexity of CPUs coming and going while we're | |
640 | * doing this. | |
641 | */ | |
86ef5c9a | 642 | get_online_cpus(); |
625efab1 JS |
643 | if (cpu_has_pge) { /* We have a broader idea of "global". */ |
644 | /* Remember that this was originally set (for cleanup). */ | |
645 | cpu_had_pge = 1; | |
2e04ef76 RR |
646 | /* |
647 | * adjust_pge is a helper function which sets or unsets the PGE | |
648 | * bit on its CPU, depending on the argument (0 == unset). | |
649 | */ | |
15c8b6c1 | 650 | on_each_cpu(adjust_pge, (void *)0, 1); |
625efab1 | 651 | /* Turn off the feature in the global feature set. */ |
cf485e56 | 652 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); |
625efab1 | 653 | } |
86ef5c9a | 654 | put_online_cpus(); |
625efab1 JS |
655 | }; |
656 | /*:*/ | |
657 | ||
658 | void __exit lguest_arch_host_fini(void) | |
659 | { | |
660 | /* If we had PGE before we started, turn it back on now. */ | |
86ef5c9a | 661 | get_online_cpus(); |
625efab1 | 662 | if (cpu_had_pge) { |
cf485e56 | 663 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); |
625efab1 | 664 | /* adjust_pge's argument "1" means set PGE. */ |
15c8b6c1 | 665 | on_each_cpu(adjust_pge, (void *)1, 1); |
625efab1 | 666 | } |
86ef5c9a | 667 | put_online_cpus(); |
625efab1 | 668 | } |
b410e7b1 JS |
669 | |
670 | ||
671 | /*H:122 The i386-specific hypercalls simply farm out to the right functions. */ | |
73044f05 | 672 | int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args) |
b410e7b1 JS |
673 | { |
674 | switch (args->arg0) { | |
a489f0b5 RR |
675 | case LHCALL_LOAD_GDT_ENTRY: |
676 | load_guest_gdt_entry(cpu, args->arg1, args->arg2, args->arg3); | |
b410e7b1 JS |
677 | break; |
678 | case LHCALL_LOAD_IDT_ENTRY: | |
fc708b3e | 679 | load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3); |
b410e7b1 JS |
680 | break; |
681 | case LHCALL_LOAD_TLS: | |
fc708b3e | 682 | guest_load_tls(cpu, args->arg1); |
b410e7b1 JS |
683 | break; |
684 | default: | |
685 | /* Bad Guest. Bad! */ | |
686 | return -EIO; | |
687 | } | |
688 | return 0; | |
689 | } | |
690 | ||
691 | /*H:126 i386-specific hypercall initialization: */ | |
73044f05 | 692 | int lguest_arch_init_hypercalls(struct lg_cpu *cpu) |
b410e7b1 JS |
693 | { |
694 | u32 tsc_speed; | |
695 | ||
2e04ef76 RR |
696 | /* |
697 | * The pointer to the Guest's "struct lguest_data" is the only argument. | |
698 | * We check that address now. | |
699 | */ | |
382ac6b3 GOC |
700 | if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, |
701 | sizeof(*cpu->lg->lguest_data))) | |
b410e7b1 JS |
702 | return -EFAULT; |
703 | ||
2e04ef76 RR |
704 | /* |
705 | * Having checked it, we simply set lg->lguest_data to point straight | |
b410e7b1 JS |
706 | * into the Launcher's memory at the right place and then use |
707 | * copy_to_user/from_user from now on, instead of lgread/write. I put | |
708 | * this in to show that I'm not immune to writing stupid | |
2e04ef76 RR |
709 | * optimizations. |
710 | */ | |
382ac6b3 | 711 | cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1; |
b410e7b1 | 712 | |
2e04ef76 RR |
713 | /* |
714 | * We insist that the Time Stamp Counter exist and doesn't change with | |
b410e7b1 JS |
715 | * cpu frequency. Some devious chip manufacturers decided that TSC |
716 | * changes could be handled in software. I decided that time going | |
717 | * backwards might be good for benchmarks, but it's bad for users. | |
718 | * | |
719 | * We also insist that the TSC be stable: the kernel detects unreliable | |
2e04ef76 RR |
720 | * TSCs for its own purposes, and we use that here. |
721 | */ | |
b410e7b1 JS |
722 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) |
723 | tsc_speed = tsc_khz; | |
724 | else | |
725 | tsc_speed = 0; | |
382ac6b3 | 726 | if (put_user(tsc_speed, &cpu->lg->lguest_data->tsc_khz)) |
b410e7b1 JS |
727 | return -EFAULT; |
728 | ||
c18acd73 | 729 | /* The interrupt code might not like the system call vector. */ |
382ac6b3 GOC |
730 | if (!check_syscall_vector(cpu->lg)) |
731 | kill_guest(cpu, "bad syscall vector"); | |
c18acd73 | 732 | |
b410e7b1 JS |
733 | return 0; |
734 | } | |
a6bd8e13 | 735 | /*:*/ |
d612cde0 | 736 | |
2e04ef76 RR |
737 | /*L:030 |
738 | * lguest_arch_setup_regs() | |
d612cde0 JS |
739 | * |
740 | * Most of the Guest's registers are left alone: we used get_zeroed_page() to | |
2e04ef76 RR |
741 | * allocate the structure, so they will be 0. |
742 | */ | |
a53a35a8 | 743 | void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) |
d612cde0 | 744 | { |
a53a35a8 | 745 | struct lguest_regs *regs = cpu->regs; |
d612cde0 | 746 | |
2e04ef76 RR |
747 | /* |
748 | * There are four "segment" registers which the Guest needs to boot: | |
d612cde0 JS |
749 | * The "code segment" register (cs) refers to the kernel code segment |
750 | * __KERNEL_CS, and the "data", "extra" and "stack" segment registers | |
751 | * refer to the kernel data segment __KERNEL_DS. | |
752 | * | |
753 | * The privilege level is packed into the lower bits. The Guest runs | |
2e04ef76 RR |
754 | * at privilege level 1 (GUEST_PL). |
755 | */ | |
d612cde0 JS |
756 | regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; |
757 | regs->cs = __KERNEL_CS|GUEST_PL; | |
758 | ||
2e04ef76 RR |
759 | /* |
760 | * The "eflags" register contains miscellaneous flags. Bit 1 (0x002) | |
d612cde0 JS |
761 | * is supposed to always be "1". Bit 9 (0x200) controls whether |
762 | * interrupts are enabled. We always leave interrupts enabled while | |
2e04ef76 RR |
763 | * running the Guest. |
764 | */ | |
25c47bb3 | 765 | regs->eflags = X86_EFLAGS_IF | 0x2; |
d612cde0 | 766 | |
2e04ef76 RR |
767 | /* |
768 | * The "Extended Instruction Pointer" register says where the Guest is | |
769 | * running. | |
770 | */ | |
d612cde0 JS |
771 | regs->eip = start; |
772 | ||
2e04ef76 RR |
773 | /* |
774 | * %esi points to our boot information, at physical address 0, so don't | |
775 | * touch it. | |
776 | */ | |
e1e72965 | 777 | |
2e04ef76 | 778 | /* There are a couple of GDT entries the Guest expects at boot. */ |
fc708b3e | 779 | setup_guest_gdt(cpu); |
d612cde0 | 780 | } |