Commit | Line | Data |
---|---|---|
625efab1 JS |
1 | /* |
2 | * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation. | |
3 | * Copyright (C) 2007, Jes Sorensen <jes@sgi.com> SGI. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
13 | * NON INFRINGEMENT. See the GNU General Public License for more | |
14 | * details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
19 | */ | |
2e04ef76 RR |
20 | /*P:450 |
21 | * This file contains the x86-specific lguest code. It used to be all | |
a6bd8e13 RR |
22 | * mixed in with drivers/lguest/core.c but several foolhardy code slashers |
23 | * wrestled most of the dependencies out to here in preparation for porting | |
24 | * lguest to other architectures (see what I mean by foolhardy?). | |
25 | * | |
26 | * This also contains a couple of non-obvious setup and teardown pieces which | |
2e04ef76 RR |
27 | * were implemented after days of debugging pain. |
28 | :*/ | |
625efab1 JS |
29 | #include <linux/kernel.h> |
30 | #include <linux/start_kernel.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/console.h> | |
33 | #include <linux/screen_info.h> | |
34 | #include <linux/irq.h> | |
35 | #include <linux/interrupt.h> | |
36 | #include <linux/clocksource.h> | |
37 | #include <linux/clockchips.h> | |
38 | #include <linux/cpu.h> | |
39 | #include <linux/lguest.h> | |
40 | #include <linux/lguest_launcher.h> | |
625efab1 JS |
41 | #include <asm/paravirt.h> |
42 | #include <asm/param.h> | |
43 | #include <asm/page.h> | |
44 | #include <asm/pgtable.h> | |
45 | #include <asm/desc.h> | |
46 | #include <asm/setup.h> | |
47 | #include <asm/lguest.h> | |
48 | #include <asm/uaccess.h> | |
49 | #include <asm/i387.h> | |
50 | #include "../lg.h" | |
51 | ||
52 | static int cpu_had_pge; | |
53 | ||
54 | static struct { | |
55 | unsigned long offset; | |
56 | unsigned short segment; | |
57 | } lguest_entry; | |
58 | ||
59 | /* Offset from where switcher.S was compiled to where we've copied it */ | |
60 | static unsigned long switcher_offset(void) | |
61 | { | |
406a590b | 62 | return switcher_addr - (unsigned long)start_switcher_text; |
625efab1 JS |
63 | } |
64 | ||
93a2cdff | 65 | /* This cpu's struct lguest_pages (after the Switcher text page) */ |
625efab1 JS |
66 | static struct lguest_pages *lguest_pages(unsigned int cpu) |
67 | { | |
93a2cdff | 68 | return &(((struct lguest_pages *)(switcher_addr + PAGE_SIZE))[cpu]); |
625efab1 JS |
69 | } |
70 | ||
390dfd95 | 71 | static DEFINE_PER_CPU(struct lg_cpu *, lg_last_cpu); |
625efab1 JS |
72 | |
73 | /*S:010 | |
e1e72965 | 74 | * We approach the Switcher. |
625efab1 JS |
75 | * |
76 | * Remember that each CPU has two pages which are visible to the Guest when it | |
77 | * runs on that CPU. This has to contain the state for that Guest: we copy the | |
78 | * state in just before we run the Guest. | |
79 | * | |
80 | * Each Guest has "changed" flags which indicate what has changed in the Guest | |
81 | * since it last ran. We saw this set in interrupts_and_traps.c and | |
82 | * segments.c. | |
83 | */ | |
d0953d42 | 84 | static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages) |
625efab1 | 85 | { |
2e04ef76 RR |
86 | /* |
87 | * Copying all this data can be quite expensive. We usually run the | |
625efab1 JS |
88 | * same Guest we ran last time (and that Guest hasn't run anywhere else |
89 | * meanwhile). If that's not the case, we pretend everything in the | |
2e04ef76 RR |
90 | * Guest has changed. |
91 | */ | |
c9f29549 | 92 | if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { |
ced05dd7 | 93 | __this_cpu_write(lg_last_cpu, cpu); |
f34f8c5f | 94 | cpu->last_pages = pages; |
ae3749dc | 95 | cpu->changed = CHANGED_ALL; |
625efab1 JS |
96 | } |
97 | ||
2e04ef76 RR |
98 | /* |
99 | * These copies are pretty cheap, so we do them unconditionally: */ | |
100 | /* Save the current Host top-level page directory. | |
101 | */ | |
625efab1 | 102 | pages->state.host_cr3 = __pa(current->mm->pgd); |
2e04ef76 RR |
103 | /* |
104 | * Set up the Guest's page tables to see this CPU's pages (and no | |
105 | * other CPU's pages). | |
106 | */ | |
0c78441c | 107 | map_switcher_in_guest(cpu, pages); |
2e04ef76 RR |
108 | /* |
109 | * Set up the two "TSS" members which tell the CPU what stack to use | |
625efab1 | 110 | * for traps which do directly into the Guest (ie. traps at privilege |
2e04ef76 RR |
111 | * level 1). |
112 | */ | |
e95035c6 | 113 | pages->state.guest_tss.sp1 = cpu->esp1; |
4665ac8e | 114 | pages->state.guest_tss.ss1 = cpu->ss1; |
625efab1 JS |
115 | |
116 | /* Copy direct-to-Guest trap entries. */ | |
ae3749dc | 117 | if (cpu->changed & CHANGED_IDT) |
fc708b3e | 118 | copy_traps(cpu, pages->state.guest_idt, default_idt_entries); |
625efab1 JS |
119 | |
120 | /* Copy all GDT entries which the Guest can change. */ | |
ae3749dc | 121 | if (cpu->changed & CHANGED_GDT) |
fc708b3e | 122 | copy_gdt(cpu, pages->state.guest_gdt); |
625efab1 | 123 | /* If only the TLS entries have changed, copy them. */ |
ae3749dc | 124 | else if (cpu->changed & CHANGED_GDT_TLS) |
fc708b3e | 125 | copy_gdt_tls(cpu, pages->state.guest_gdt); |
625efab1 JS |
126 | |
127 | /* Mark the Guest as unchanged for next time. */ | |
ae3749dc | 128 | cpu->changed = 0; |
625efab1 JS |
129 | } |
130 | ||
131 | /* Finally: the code to actually call into the Switcher to run the Guest. */ | |
d0953d42 | 132 | static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages) |
625efab1 JS |
133 | { |
134 | /* This is a dummy value we need for GCC's sake. */ | |
135 | unsigned int clobber; | |
136 | ||
2e04ef76 RR |
137 | /* |
138 | * Copy the guest-specific information into this CPU's "struct | |
139 | * lguest_pages". | |
140 | */ | |
d0953d42 | 141 | copy_in_guest_info(cpu, pages); |
625efab1 | 142 | |
2e04ef76 RR |
143 | /* |
144 | * Set the trap number to 256 (impossible value). If we fault while | |
625efab1 | 145 | * switching to the Guest (bad segment registers or bug), this will |
2e04ef76 RR |
146 | * cause us to abort the Guest. |
147 | */ | |
a53a35a8 | 148 | cpu->regs->trapnum = 256; |
625efab1 | 149 | |
2e04ef76 RR |
150 | /* |
151 | * Now: we push the "eflags" register on the stack, then do an "lcall". | |
625efab1 JS |
152 | * This is how we change from using the kernel code segment to using |
153 | * the dedicated lguest code segment, as well as jumping into the | |
154 | * Switcher. | |
155 | * | |
156 | * The lcall also pushes the old code segment (KERNEL_CS) onto the | |
157 | * stack, then the address of this call. This stack layout happens to | |
2e04ef76 RR |
158 | * exactly match the stack layout created by an interrupt... |
159 | */ | |
cdd77e87 | 160 | asm volatile("pushf; lcall *%4" |
2e04ef76 RR |
161 | /* |
162 | * This is how we tell GCC that %eax ("a") and %ebx ("b") | |
163 | * are changed by this routine. The "=" means output. | |
164 | */ | |
625efab1 | 165 | : "=a"(clobber), "=b"(clobber) |
2e04ef76 RR |
166 | /* |
167 | * %eax contains the pages pointer. ("0" refers to the | |
625efab1 JS |
168 | * 0-th argument above, ie "a"). %ebx contains the |
169 | * physical address of the Guest's top-level page | |
2e04ef76 RR |
170 | * directory. |
171 | */ | |
cdd77e87 AK |
172 | : "0"(pages), |
173 | "1"(__pa(cpu->lg->pgdirs[cpu->cpu_pgd].pgdir)), | |
174 | "m"(lguest_entry) | |
2e04ef76 RR |
175 | /* |
176 | * We tell gcc that all these registers could change, | |
625efab1 | 177 | * which means we don't have to save and restore them in |
2e04ef76 RR |
178 | * the Switcher. |
179 | */ | |
625efab1 JS |
180 | : "memory", "%edx", "%ecx", "%edi", "%esi"); |
181 | } | |
182 | /*:*/ | |
183 | ||
18c13737 RR |
184 | unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any) |
185 | { | |
186 | switch (reg_off) { | |
187 | case offsetof(struct pt_regs, bx): | |
188 | return &cpu->regs->ebx; | |
189 | case offsetof(struct pt_regs, cx): | |
190 | return &cpu->regs->ecx; | |
191 | case offsetof(struct pt_regs, dx): | |
192 | return &cpu->regs->edx; | |
193 | case offsetof(struct pt_regs, si): | |
194 | return &cpu->regs->esi; | |
195 | case offsetof(struct pt_regs, di): | |
196 | return &cpu->regs->edi; | |
197 | case offsetof(struct pt_regs, bp): | |
198 | return &cpu->regs->ebp; | |
199 | case offsetof(struct pt_regs, ax): | |
200 | return &cpu->regs->eax; | |
201 | case offsetof(struct pt_regs, ip): | |
202 | return &cpu->regs->eip; | |
203 | case offsetof(struct pt_regs, sp): | |
204 | return &cpu->regs->esp; | |
205 | } | |
206 | ||
207 | /* Launcher can read these, but we don't allow any setting. */ | |
208 | if (any) { | |
209 | switch (reg_off) { | |
210 | case offsetof(struct pt_regs, ds): | |
211 | return &cpu->regs->ds; | |
212 | case offsetof(struct pt_regs, es): | |
213 | return &cpu->regs->es; | |
214 | case offsetof(struct pt_regs, fs): | |
215 | return &cpu->regs->fs; | |
216 | case offsetof(struct pt_regs, gs): | |
217 | return &cpu->regs->gs; | |
218 | case offsetof(struct pt_regs, cs): | |
219 | return &cpu->regs->cs; | |
220 | case offsetof(struct pt_regs, flags): | |
221 | return &cpu->regs->eflags; | |
222 | case offsetof(struct pt_regs, ss): | |
223 | return &cpu->regs->ss; | |
224 | } | |
225 | } | |
226 | ||
227 | return NULL; | |
228 | } | |
229 | ||
2e04ef76 RR |
230 | /*M:002 |
231 | * There are hooks in the scheduler which we can register to tell when we | |
e1e72965 RR |
232 | * get kicked off the CPU (preempt_notifier_register()). This would allow us |
233 | * to lazily disable SYSENTER which would regain some performance, and should | |
234 | * also simplify copy_in_guest_info(). Note that we'd still need to restore | |
235 | * things when we exit to Launcher userspace, but that's fairly easy. | |
236 | * | |
a91d74a3 | 237 | * We could also try using these hooks for PGE, but that might be too expensive. |
a6bd8e13 | 238 | * |
2e04ef76 RR |
239 | * The hooks were designed for KVM, but we can also put them to good use. |
240 | :*/ | |
e1e72965 | 241 | |
2e04ef76 RR |
242 | /*H:040 |
243 | * This is the i386-specific code to setup and run the Guest. Interrupts | |
244 | * are disabled: we own the CPU. | |
245 | */ | |
d0953d42 | 246 | void lguest_arch_run_guest(struct lg_cpu *cpu) |
625efab1 | 247 | { |
2e04ef76 RR |
248 | /* |
249 | * Remember the awfully-named TS bit? If the Guest has asked to set it | |
e1e72965 | 250 | * we set it now, so we can trap and pass that trap to the Guest if it |
2e04ef76 RR |
251 | * uses the FPU. |
252 | */ | |
9c6ff8bb SS |
253 | if (cpu->ts && user_has_fpu()) |
254 | stts(); | |
625efab1 | 255 | |
2e04ef76 RR |
256 | /* |
257 | * SYSENTER is an optimized way of doing system calls. We can't allow | |
e1e72965 RR |
258 | * it because it always jumps to privilege level 0. A normal Guest |
259 | * won't try it because we don't advertise it in CPUID, but a malicious | |
260 | * Guest (or malicious Guest userspace program) could, so we tell the | |
2e04ef76 RR |
261 | * CPU to disable it before running the Guest. |
262 | */ | |
625efab1 JS |
263 | if (boot_cpu_has(X86_FEATURE_SEP)) |
264 | wrmsr(MSR_IA32_SYSENTER_CS, 0, 0); | |
265 | ||
2e04ef76 RR |
266 | /* |
267 | * Now we actually run the Guest. It will return when something | |
e1e72965 | 268 | * interesting happens, and we can examine its registers to see what it |
2e04ef76 RR |
269 | * was doing. |
270 | */ | |
d0953d42 | 271 | run_guest_once(cpu, lguest_pages(raw_smp_processor_id())); |
625efab1 | 272 | |
2e04ef76 RR |
273 | /* |
274 | * Note that the "regs" structure contains two extra entries which are | |
e1e72965 RR |
275 | * not really registers: a trap number which says what interrupt or |
276 | * trap made the switcher code come back, and an error code which some | |
2e04ef76 RR |
277 | * traps set. |
278 | */ | |
625efab1 | 279 | |
54481cf8 SS |
280 | /* Restore SYSENTER if it's supposed to be on. */ |
281 | if (boot_cpu_has(X86_FEATURE_SEP)) | |
282 | wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); | |
283 | ||
9c6ff8bb SS |
284 | /* Clear the host TS bit if it was set above. */ |
285 | if (cpu->ts && user_has_fpu()) | |
286 | clts(); | |
287 | ||
2e04ef76 RR |
288 | /* |
289 | * If the Guest page faulted, then the cr2 register will tell us the | |
e1e72965 RR |
290 | * bad virtual address. We have to grab this now, because once we |
291 | * re-enable interrupts an interrupt could fault and thus overwrite | |
2e04ef76 RR |
292 | * cr2, or we could even move off to a different CPU. |
293 | */ | |
a53a35a8 | 294 | if (cpu->regs->trapnum == 14) |
fc708b3e | 295 | cpu->arch.last_pagefault = read_cr2(); |
2e04ef76 RR |
296 | /* |
297 | * Similarly, if we took a trap because the Guest used the FPU, | |
54481cf8 SS |
298 | * we have to restore the FPU it expects to see. |
299 | * math_state_restore() may sleep and we may even move off to | |
300 | * a different CPU. So all the critical stuff should be done | |
2e04ef76 RR |
301 | * before this. |
302 | */ | |
9c6ff8bb | 303 | else if (cpu->regs->trapnum == 7 && !user_has_fpu()) |
625efab1 | 304 | math_state_restore(); |
625efab1 JS |
305 | } |
306 | ||
2e04ef76 RR |
307 | /*H:130 |
308 | * Now we've examined the hypercall code; our Guest can make requests. | |
e1e72965 RR |
309 | * Our Guest is usually so well behaved; it never tries to do things it isn't |
310 | * allowed to, and uses hypercalls instead. Unfortunately, Linux's paravirtual | |
311 | * infrastructure isn't quite complete, because it doesn't contain replacements | |
312 | * for the Intel I/O instructions. As a result, the Guest sometimes fumbles | |
313 | * across one during the boot process as it probes for various things which are | |
314 | * usually attached to a PC. | |
625efab1 | 315 | * |
e1e72965 | 316 | * When the Guest uses one of these instructions, we get a trap (General |
c565650b RR |
317 | * Protection Fault) and come here. We queue this to be sent out to the |
318 | * Launcher to handle. | |
2e04ef76 | 319 | */ |
625efab1 | 320 | |
c565650b RR |
321 | /* |
322 | * The eip contains the *virtual* address of the Guest's instruction: | |
323 | * we copy the instruction here so the Launcher doesn't have to walk | |
324 | * the page tables to decode it. We handle the case (eg. in a kernel | |
325 | * module) where the instruction is over two pages, and the pages are | |
326 | * virtually but not physically contiguous. | |
327 | * | |
328 | * The longest possible x86 instruction is 15 bytes, but we don't handle | |
329 | * anything that strange. | |
330 | */ | |
331 | static void copy_from_guest(struct lg_cpu *cpu, | |
332 | void *dst, unsigned long vaddr, size_t len) | |
333 | { | |
334 | size_t to_page_end = PAGE_SIZE - (vaddr % PAGE_SIZE); | |
335 | unsigned long paddr; | |
336 | ||
337 | BUG_ON(len > PAGE_SIZE); | |
338 | ||
339 | /* If it goes over a page, copy in two parts. */ | |
340 | if (len > to_page_end) { | |
341 | /* But make sure the next page is mapped! */ | |
342 | if (__guest_pa(cpu, vaddr + to_page_end, &paddr)) | |
343 | copy_from_guest(cpu, dst + to_page_end, | |
344 | vaddr + to_page_end, | |
345 | len - to_page_end); | |
346 | else | |
347 | /* Otherwise fill with zeroes. */ | |
348 | memset(dst + to_page_end, 0, len - to_page_end); | |
349 | len = to_page_end; | |
5094aeaf RR |
350 | } |
351 | ||
c565650b RR |
352 | /* This will kill the guest if it isn't mapped, but that |
353 | * shouldn't happen. */ | |
354 | __lgread(cpu, dst, guest_pa(cpu, vaddr), len); | |
355 | } | |
625efab1 | 356 | |
625efab1 | 357 | |
c565650b RR |
358 | static void setup_emulate_insn(struct lg_cpu *cpu) |
359 | { | |
360 | cpu->pending.trap = 13; | |
361 | copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip, | |
362 | sizeof(cpu->pending.insn)); | |
625efab1 JS |
363 | } |
364 | ||
7313d521 RR |
365 | static void setup_iomem_insn(struct lg_cpu *cpu, unsigned long iomem_addr) |
366 | { | |
367 | cpu->pending.trap = 14; | |
368 | cpu->pending.addr = iomem_addr; | |
369 | copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip, | |
370 | sizeof(cpu->pending.insn)); | |
371 | } | |
372 | ||
625efab1 | 373 | /*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ |
73044f05 | 374 | void lguest_arch_handle_trap(struct lg_cpu *cpu) |
625efab1 | 375 | { |
7313d521 RR |
376 | unsigned long iomem_addr; |
377 | ||
a53a35a8 | 378 | switch (cpu->regs->trapnum) { |
e1e72965 | 379 | case 13: /* We've intercepted a General Protection Fault. */ |
c565650b | 380 | /* Hand to Launcher to emulate those pesky IN and OUT insns */ |
a53a35a8 | 381 | if (cpu->regs->errcode == 0) { |
c565650b RR |
382 | setup_emulate_insn(cpu); |
383 | return; | |
625efab1 JS |
384 | } |
385 | break; | |
e1e72965 | 386 | case 14: /* We've intercepted a Page Fault. */ |
2e04ef76 RR |
387 | /* |
388 | * The Guest accessed a virtual address that wasn't mapped. | |
a6bd8e13 RR |
389 | * This happens a lot: we don't actually set up most of the page |
390 | * tables for the Guest at all when we start: as it runs it asks | |
391 | * for more and more, and we set them up as required. In this | |
392 | * case, we don't even tell the Guest that the fault happened. | |
e1e72965 RR |
393 | * |
394 | * The errcode tells whether this was a read or a write, and | |
2e04ef76 RR |
395 | * whether kernel or userspace code. |
396 | */ | |
1713608f | 397 | if (demand_page(cpu, cpu->arch.last_pagefault, |
7313d521 RR |
398 | cpu->regs->errcode, &iomem_addr)) |
399 | return; | |
400 | ||
401 | /* Was this an access to memory mapped IO? */ | |
402 | if (iomem_addr) { | |
403 | /* Tell Launcher, let it handle it. */ | |
404 | setup_iomem_insn(cpu, iomem_addr); | |
625efab1 | 405 | return; |
7313d521 | 406 | } |
625efab1 | 407 | |
2e04ef76 RR |
408 | /* |
409 | * OK, it's really not there (or not OK): the Guest needs to | |
e1e72965 RR |
410 | * know. We write out the cr2 value so it knows where the |
411 | * fault occurred. | |
412 | * | |
413 | * Note that if the Guest were really messed up, this could | |
414 | * happen before it's done the LHCALL_LGUEST_INIT hypercall, so | |
2e04ef76 RR |
415 | * lg->lguest_data could be NULL |
416 | */ | |
382ac6b3 GOC |
417 | if (cpu->lg->lguest_data && |
418 | put_user(cpu->arch.last_pagefault, | |
419 | &cpu->lg->lguest_data->cr2)) | |
420 | kill_guest(cpu, "Writing cr2"); | |
625efab1 JS |
421 | break; |
422 | case 7: /* We've intercepted a Device Not Available fault. */ | |
2e04ef76 RR |
423 | /* |
424 | * If the Guest doesn't want to know, we already restored the | |
425 | * Floating Point Unit, so we just continue without telling it. | |
426 | */ | |
4665ac8e | 427 | if (!cpu->ts) |
625efab1 JS |
428 | return; |
429 | break; | |
430 | case 32 ... 255: | |
2e04ef76 RR |
431 | /* |
432 | * These values mean a real interrupt occurred, in which case | |
4cd8b5e2 | 433 | * the Host handler has already been run. We just do a |
cc6d4fbc | 434 | * friendly check if another process should now be run, then |
9f54288d | 435 | * return to run the Guest again. |
2e04ef76 | 436 | */ |
625efab1 | 437 | cond_resched(); |
cc6d4fbc RR |
438 | return; |
439 | case LGUEST_TRAP_ENTRY: | |
2e04ef76 RR |
440 | /* |
441 | * Our 'struct hcall_args' maps directly over our regs: we set | |
442 | * up the pointer now to indicate a hypercall is pending. | |
443 | */ | |
a53a35a8 | 444 | cpu->hcall = (struct hcall_args *)cpu->regs; |
625efab1 JS |
445 | return; |
446 | } | |
447 | ||
448 | /* We didn't handle the trap, so it needs to go to the Guest. */ | |
a53a35a8 | 449 | if (!deliver_trap(cpu, cpu->regs->trapnum)) |
2e04ef76 RR |
450 | /* |
451 | * If the Guest doesn't have a handler (either it hasn't | |
625efab1 | 452 | * registered any yet, or it's one of the faults we don't let |
2e04ef76 RR |
453 | * it handle), it dies with this cryptic error message. |
454 | */ | |
382ac6b3 | 455 | kill_guest(cpu, "unhandled trap %li at %#lx (%#lx)", |
a53a35a8 | 456 | cpu->regs->trapnum, cpu->regs->eip, |
fc708b3e | 457 | cpu->regs->trapnum == 14 ? cpu->arch.last_pagefault |
a53a35a8 | 458 | : cpu->regs->errcode); |
625efab1 JS |
459 | } |
460 | ||
2e04ef76 RR |
461 | /* |
462 | * Now we can look at each of the routines this calls, in increasing order of | |
625efab1 JS |
463 | * complexity: do_hypercalls(), emulate_insn(), maybe_do_interrupt(), |
464 | * deliver_trap() and demand_page(). After all those, we'll be ready to | |
465 | * examine the Switcher, and our philosophical understanding of the Host/Guest | |
2e04ef76 RR |
466 | * duality will be complete. |
467 | :*/ | |
625efab1 JS |
468 | static void adjust_pge(void *on) |
469 | { | |
470 | if (on) | |
471 | write_cr4(read_cr4() | X86_CR4_PGE); | |
472 | else | |
473 | write_cr4(read_cr4() & ~X86_CR4_PGE); | |
474 | } | |
475 | ||
2e04ef76 RR |
476 | /*H:020 |
477 | * Now the Switcher is mapped and every thing else is ready, we need to do | |
478 | * some more i386-specific initialization. | |
479 | */ | |
625efab1 JS |
480 | void __init lguest_arch_host_init(void) |
481 | { | |
482 | int i; | |
483 | ||
2e04ef76 | 484 | /* |
9f54288d | 485 | * Most of the x86/switcher_32.S doesn't care that it's been moved; on |
625efab1 JS |
486 | * Intel, jumps are relative, and it doesn't access any references to |
487 | * external code or data. | |
488 | * | |
489 | * The only exception is the interrupt handlers in switcher.S: their | |
490 | * addresses are placed in a table (default_idt_entries), so we need to | |
491 | * update the table with the new addresses. switcher_offset() is a | |
a6bd8e13 | 492 | * convenience function which returns the distance between the |
2e04ef76 RR |
493 | * compiled-in switcher code and the high-mapped copy we just made. |
494 | */ | |
625efab1 JS |
495 | for (i = 0; i < IDT_ENTRIES; i++) |
496 | default_idt_entries[i] += switcher_offset(); | |
497 | ||
498 | /* | |
499 | * Set up the Switcher's per-cpu areas. | |
500 | * | |
501 | * Each CPU gets two pages of its own within the high-mapped region | |
502 | * (aka. "struct lguest_pages"). Much of this can be initialized now, | |
503 | * but some depends on what Guest we are running (which is set up in | |
504 | * copy_in_guest_info()). | |
505 | */ | |
506 | for_each_possible_cpu(i) { | |
507 | /* lguest_pages() returns this CPU's two pages. */ | |
508 | struct lguest_pages *pages = lguest_pages(i); | |
2e04ef76 | 509 | /* This is a convenience pointer to make the code neater. */ |
625efab1 JS |
510 | struct lguest_ro_state *state = &pages->state; |
511 | ||
2e04ef76 RR |
512 | /* |
513 | * The Global Descriptor Table: the Host has a different one | |
625efab1 JS |
514 | * for each CPU. We keep a descriptor for the GDT which says |
515 | * where it is and how big it is (the size is actually the last | |
2e04ef76 RR |
516 | * byte, not the size, hence the "-1"). |
517 | */ | |
625efab1 JS |
518 | state->host_gdt_desc.size = GDT_SIZE-1; |
519 | state->host_gdt_desc.address = (long)get_cpu_gdt_table(i); | |
520 | ||
2e04ef76 RR |
521 | /* |
522 | * All CPUs on the Host use the same Interrupt Descriptor | |
625efab1 | 523 | * Table, so we just use store_idt(), which gets this CPU's IDT |
2e04ef76 RR |
524 | * descriptor. |
525 | */ | |
625efab1 JS |
526 | store_idt(&state->host_idt_desc); |
527 | ||
2e04ef76 RR |
528 | /* |
529 | * The descriptors for the Guest's GDT and IDT can be filled | |
625efab1 | 530 | * out now, too. We copy the GDT & IDT into ->guest_gdt and |
2e04ef76 RR |
531 | * ->guest_idt before actually running the Guest. |
532 | */ | |
625efab1 JS |
533 | state->guest_idt_desc.size = sizeof(state->guest_idt)-1; |
534 | state->guest_idt_desc.address = (long)&state->guest_idt; | |
535 | state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1; | |
536 | state->guest_gdt_desc.address = (long)&state->guest_gdt; | |
537 | ||
2e04ef76 RR |
538 | /* |
539 | * We know where we want the stack to be when the Guest enters | |
a6bd8e13 | 540 | * the Switcher: in pages->regs. The stack grows upwards, so |
2e04ef76 RR |
541 | * we start it at the end of that structure. |
542 | */ | |
faca6227 | 543 | state->guest_tss.sp0 = (long)(&pages->regs + 1); |
2e04ef76 RR |
544 | /* |
545 | * And this is the GDT entry to use for the stack: we keep a | |
546 | * couple of special LGUEST entries. | |
547 | */ | |
625efab1 JS |
548 | state->guest_tss.ss0 = LGUEST_DS; |
549 | ||
2e04ef76 RR |
550 | /* |
551 | * x86 can have a finegrained bitmap which indicates what I/O | |
625efab1 | 552 | * ports the process can use. We set it to the end of our |
2e04ef76 RR |
553 | * structure, meaning "none". |
554 | */ | |
625efab1 JS |
555 | state->guest_tss.io_bitmap_base = sizeof(state->guest_tss); |
556 | ||
2e04ef76 RR |
557 | /* |
558 | * Some GDT entries are the same across all Guests, so we can | |
559 | * set them up now. | |
560 | */ | |
625efab1 JS |
561 | setup_default_gdt_entries(state); |
562 | /* Most IDT entries are the same for all Guests, too.*/ | |
563 | setup_default_idt_entries(state, default_idt_entries); | |
564 | ||
2e04ef76 RR |
565 | /* |
566 | * The Host needs to be able to use the LGUEST segments on this | |
567 | * CPU, too, so put them in the Host GDT. | |
568 | */ | |
625efab1 JS |
569 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT; |
570 | get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT; | |
571 | } | |
572 | ||
2e04ef76 RR |
573 | /* |
574 | * In the Switcher, we want the %cs segment register to use the | |
625efab1 JS |
575 | * LGUEST_CS GDT entry: we've put that in the Host and Guest GDTs, so |
576 | * it will be undisturbed when we switch. To change %cs and jump we | |
2e04ef76 RR |
577 | * need this structure to feed to Intel's "lcall" instruction. |
578 | */ | |
625efab1 JS |
579 | lguest_entry.offset = (long)switch_to_guest + switcher_offset(); |
580 | lguest_entry.segment = LGUEST_CS; | |
581 | ||
2e04ef76 RR |
582 | /* |
583 | * Finally, we need to turn off "Page Global Enable". PGE is an | |
625efab1 JS |
584 | * optimization where page table entries are specially marked to show |
585 | * they never change. The Host kernel marks all the kernel pages this | |
586 | * way because it's always present, even when userspace is running. | |
587 | * | |
588 | * Lguest breaks this: unbeknownst to the rest of the Host kernel, we | |
589 | * switch to the Guest kernel. If you don't disable this on all CPUs, | |
590 | * you'll get really weird bugs that you'll chase for two days. | |
591 | * | |
592 | * I used to turn PGE off every time we switched to the Guest and back | |
2e04ef76 RR |
593 | * on when we return, but that slowed the Switcher down noticibly. |
594 | */ | |
625efab1 | 595 | |
2e04ef76 RR |
596 | /* |
597 | * We don't need the complexity of CPUs coming and going while we're | |
598 | * doing this. | |
599 | */ | |
86ef5c9a | 600 | get_online_cpus(); |
625efab1 JS |
601 | if (cpu_has_pge) { /* We have a broader idea of "global". */ |
602 | /* Remember that this was originally set (for cleanup). */ | |
603 | cpu_had_pge = 1; | |
2e04ef76 RR |
604 | /* |
605 | * adjust_pge is a helper function which sets or unsets the PGE | |
606 | * bit on its CPU, depending on the argument (0 == unset). | |
607 | */ | |
15c8b6c1 | 608 | on_each_cpu(adjust_pge, (void *)0, 1); |
625efab1 | 609 | /* Turn off the feature in the global feature set. */ |
cf485e56 | 610 | clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); |
625efab1 | 611 | } |
86ef5c9a | 612 | put_online_cpus(); |
9f54288d | 613 | } |
625efab1 JS |
614 | /*:*/ |
615 | ||
616 | void __exit lguest_arch_host_fini(void) | |
617 | { | |
618 | /* If we had PGE before we started, turn it back on now. */ | |
86ef5c9a | 619 | get_online_cpus(); |
625efab1 | 620 | if (cpu_had_pge) { |
cf485e56 | 621 | set_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE); |
625efab1 | 622 | /* adjust_pge's argument "1" means set PGE. */ |
15c8b6c1 | 623 | on_each_cpu(adjust_pge, (void *)1, 1); |
625efab1 | 624 | } |
86ef5c9a | 625 | put_online_cpus(); |
625efab1 | 626 | } |
b410e7b1 JS |
627 | |
628 | ||
629 | /*H:122 The i386-specific hypercalls simply farm out to the right functions. */ | |
73044f05 | 630 | int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args) |
b410e7b1 JS |
631 | { |
632 | switch (args->arg0) { | |
a489f0b5 RR |
633 | case LHCALL_LOAD_GDT_ENTRY: |
634 | load_guest_gdt_entry(cpu, args->arg1, args->arg2, args->arg3); | |
b410e7b1 JS |
635 | break; |
636 | case LHCALL_LOAD_IDT_ENTRY: | |
fc708b3e | 637 | load_guest_idt_entry(cpu, args->arg1, args->arg2, args->arg3); |
b410e7b1 JS |
638 | break; |
639 | case LHCALL_LOAD_TLS: | |
fc708b3e | 640 | guest_load_tls(cpu, args->arg1); |
b410e7b1 JS |
641 | break; |
642 | default: | |
643 | /* Bad Guest. Bad! */ | |
644 | return -EIO; | |
645 | } | |
646 | return 0; | |
647 | } | |
648 | ||
649 | /*H:126 i386-specific hypercall initialization: */ | |
73044f05 | 650 | int lguest_arch_init_hypercalls(struct lg_cpu *cpu) |
b410e7b1 JS |
651 | { |
652 | u32 tsc_speed; | |
653 | ||
2e04ef76 RR |
654 | /* |
655 | * The pointer to the Guest's "struct lguest_data" is the only argument. | |
656 | * We check that address now. | |
657 | */ | |
382ac6b3 GOC |
658 | if (!lguest_address_ok(cpu->lg, cpu->hcall->arg1, |
659 | sizeof(*cpu->lg->lguest_data))) | |
b410e7b1 JS |
660 | return -EFAULT; |
661 | ||
2e04ef76 RR |
662 | /* |
663 | * Having checked it, we simply set lg->lguest_data to point straight | |
b410e7b1 JS |
664 | * into the Launcher's memory at the right place and then use |
665 | * copy_to_user/from_user from now on, instead of lgread/write. I put | |
666 | * this in to show that I'm not immune to writing stupid | |
2e04ef76 RR |
667 | * optimizations. |
668 | */ | |
382ac6b3 | 669 | cpu->lg->lguest_data = cpu->lg->mem_base + cpu->hcall->arg1; |
b410e7b1 | 670 | |
2e04ef76 RR |
671 | /* |
672 | * We insist that the Time Stamp Counter exist and doesn't change with | |
b410e7b1 JS |
673 | * cpu frequency. Some devious chip manufacturers decided that TSC |
674 | * changes could be handled in software. I decided that time going | |
675 | * backwards might be good for benchmarks, but it's bad for users. | |
676 | * | |
677 | * We also insist that the TSC be stable: the kernel detects unreliable | |
2e04ef76 RR |
678 | * TSCs for its own purposes, and we use that here. |
679 | */ | |
b410e7b1 JS |
680 | if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable()) |
681 | tsc_speed = tsc_khz; | |
682 | else | |
683 | tsc_speed = 0; | |
382ac6b3 | 684 | if (put_user(tsc_speed, &cpu->lg->lguest_data->tsc_khz)) |
b410e7b1 JS |
685 | return -EFAULT; |
686 | ||
c18acd73 | 687 | /* The interrupt code might not like the system call vector. */ |
382ac6b3 GOC |
688 | if (!check_syscall_vector(cpu->lg)) |
689 | kill_guest(cpu, "bad syscall vector"); | |
c18acd73 | 690 | |
b410e7b1 JS |
691 | return 0; |
692 | } | |
a6bd8e13 | 693 | /*:*/ |
d612cde0 | 694 | |
2e04ef76 | 695 | /*L:030 |
d612cde0 | 696 | * Most of the Guest's registers are left alone: we used get_zeroed_page() to |
2e04ef76 RR |
697 | * allocate the structure, so they will be 0. |
698 | */ | |
a53a35a8 | 699 | void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start) |
d612cde0 | 700 | { |
a53a35a8 | 701 | struct lguest_regs *regs = cpu->regs; |
d612cde0 | 702 | |
2e04ef76 RR |
703 | /* |
704 | * There are four "segment" registers which the Guest needs to boot: | |
d612cde0 JS |
705 | * The "code segment" register (cs) refers to the kernel code segment |
706 | * __KERNEL_CS, and the "data", "extra" and "stack" segment registers | |
707 | * refer to the kernel data segment __KERNEL_DS. | |
708 | * | |
709 | * The privilege level is packed into the lower bits. The Guest runs | |
2e04ef76 RR |
710 | * at privilege level 1 (GUEST_PL). |
711 | */ | |
d612cde0 JS |
712 | regs->ds = regs->es = regs->ss = __KERNEL_DS|GUEST_PL; |
713 | regs->cs = __KERNEL_CS|GUEST_PL; | |
714 | ||
2e04ef76 RR |
715 | /* |
716 | * The "eflags" register contains miscellaneous flags. Bit 1 (0x002) | |
d612cde0 JS |
717 | * is supposed to always be "1". Bit 9 (0x200) controls whether |
718 | * interrupts are enabled. We always leave interrupts enabled while | |
2e04ef76 RR |
719 | * running the Guest. |
720 | */ | |
1adfa76a | 721 | regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; |
d612cde0 | 722 | |
2e04ef76 RR |
723 | /* |
724 | * The "Extended Instruction Pointer" register says where the Guest is | |
725 | * running. | |
726 | */ | |
d612cde0 JS |
727 | regs->eip = start; |
728 | ||
2e04ef76 RR |
729 | /* |
730 | * %esi points to our boot information, at physical address 0, so don't | |
731 | * touch it. | |
732 | */ | |
e1e72965 | 733 | |
2e04ef76 | 734 | /* There are a couple of GDT entries the Guest expects at boot. */ |
fc708b3e | 735 | setup_guest_gdt(cpu); |
d612cde0 | 736 | } |