Commit | Line | Data |
---|---|---|
5ead97c8 JF |
1 | /* |
2 | * Core of Xen paravirt_ops implementation. | |
3 | * | |
4 | * This file contains the xen_paravirt_ops structure itself, and the | |
5 | * implementations for: | |
6 | * - privileged instructions | |
7 | * - interrupt flags | |
8 | * - segment operations | |
9 | * - booting and setup | |
10 | * | |
11 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
12 | */ | |
13 | ||
14 | #include <linux/kernel.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/preempt.h> | |
18 | #include <linux/percpu.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/start_kernel.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/bootmem.h> | |
23 | #include <linux/module.h> | |
f4f97b3e JF |
24 | #include <linux/mm.h> |
25 | #include <linux/page-flags.h> | |
26 | #include <linux/highmem.h> | |
5ead97c8 JF |
27 | |
28 | #include <xen/interface/xen.h> | |
29 | #include <xen/interface/physdev.h> | |
30 | #include <xen/interface/vcpu.h> | |
31 | #include <xen/features.h> | |
32 | #include <xen/page.h> | |
33 | ||
34 | #include <asm/paravirt.h> | |
35 | #include <asm/page.h> | |
36 | #include <asm/xen/hypercall.h> | |
37 | #include <asm/xen/hypervisor.h> | |
38 | #include <asm/fixmap.h> | |
39 | #include <asm/processor.h> | |
40 | #include <asm/setup.h> | |
41 | #include <asm/desc.h> | |
42 | #include <asm/pgtable.h> | |
43 | ||
44 | #include "xen-ops.h" | |
3b827c1b | 45 | #include "mmu.h" |
5ead97c8 JF |
46 | #include "multicalls.h" |
47 | ||
48 | EXPORT_SYMBOL_GPL(hypercall_page); | |
49 | ||
50 | DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode); | |
51 | ||
52 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); | |
53 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | |
54 | DEFINE_PER_CPU(unsigned long, xen_cr3); | |
55 | ||
56 | struct start_info *xen_start_info; | |
57 | EXPORT_SYMBOL_GPL(xen_start_info); | |
58 | ||
59 | static void xen_vcpu_setup(int cpu) | |
60 | { | |
61 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | |
62 | } | |
63 | ||
64 | static void __init xen_banner(void) | |
65 | { | |
66 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", | |
67 | paravirt_ops.name); | |
68 | printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic); | |
69 | } | |
70 | ||
71 | static void xen_cpuid(unsigned int *eax, unsigned int *ebx, | |
72 | unsigned int *ecx, unsigned int *edx) | |
73 | { | |
74 | unsigned maskedx = ~0; | |
75 | ||
76 | /* | |
77 | * Mask out inconvenient features, to try and disable as many | |
78 | * unsupported kernel subsystems as possible. | |
79 | */ | |
80 | if (*eax == 1) | |
81 | maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ | |
82 | (1 << X86_FEATURE_ACPI) | /* disable ACPI */ | |
83 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ | |
84 | ||
85 | asm(XEN_EMULATE_PREFIX "cpuid" | |
86 | : "=a" (*eax), | |
87 | "=b" (*ebx), | |
88 | "=c" (*ecx), | |
89 | "=d" (*edx) | |
90 | : "0" (*eax), "2" (*ecx)); | |
91 | *edx &= maskedx; | |
92 | } | |
93 | ||
94 | static void xen_set_debugreg(int reg, unsigned long val) | |
95 | { | |
96 | HYPERVISOR_set_debugreg(reg, val); | |
97 | } | |
98 | ||
99 | static unsigned long xen_get_debugreg(int reg) | |
100 | { | |
101 | return HYPERVISOR_get_debugreg(reg); | |
102 | } | |
103 | ||
104 | static unsigned long xen_save_fl(void) | |
105 | { | |
106 | struct vcpu_info *vcpu; | |
107 | unsigned long flags; | |
108 | ||
109 | preempt_disable(); | |
110 | vcpu = x86_read_percpu(xen_vcpu); | |
111 | /* flag has opposite sense of mask */ | |
112 | flags = !vcpu->evtchn_upcall_mask; | |
113 | preempt_enable(); | |
114 | ||
115 | /* convert to IF type flag | |
116 | -0 -> 0x00000000 | |
117 | -1 -> 0xffffffff | |
118 | */ | |
119 | return (-flags) & X86_EFLAGS_IF; | |
120 | } | |
121 | ||
122 | static void xen_restore_fl(unsigned long flags) | |
123 | { | |
124 | struct vcpu_info *vcpu; | |
125 | ||
126 | preempt_disable(); | |
127 | ||
128 | /* convert from IF type flag */ | |
129 | flags = !(flags & X86_EFLAGS_IF); | |
130 | vcpu = x86_read_percpu(xen_vcpu); | |
131 | vcpu->evtchn_upcall_mask = flags; | |
132 | ||
133 | if (flags == 0) { | |
134 | /* Unmask then check (avoid races). We're only protecting | |
135 | against updates by this CPU, so there's no need for | |
136 | anything stronger. */ | |
137 | barrier(); | |
138 | ||
139 | if (unlikely(vcpu->evtchn_upcall_pending)) | |
140 | force_evtchn_callback(); | |
141 | preempt_enable(); | |
142 | } else | |
143 | preempt_enable_no_resched(); | |
144 | } | |
145 | ||
146 | static void xen_irq_disable(void) | |
147 | { | |
148 | struct vcpu_info *vcpu; | |
149 | preempt_disable(); | |
150 | vcpu = x86_read_percpu(xen_vcpu); | |
151 | vcpu->evtchn_upcall_mask = 1; | |
152 | preempt_enable_no_resched(); | |
153 | } | |
154 | ||
155 | static void xen_irq_enable(void) | |
156 | { | |
157 | struct vcpu_info *vcpu; | |
158 | ||
159 | preempt_disable(); | |
160 | vcpu = x86_read_percpu(xen_vcpu); | |
161 | vcpu->evtchn_upcall_mask = 0; | |
162 | ||
163 | /* Unmask then check (avoid races). We're only protecting | |
164 | against updates by this CPU, so there's no need for | |
165 | anything stronger. */ | |
166 | barrier(); | |
167 | ||
168 | if (unlikely(vcpu->evtchn_upcall_pending)) | |
169 | force_evtchn_callback(); | |
170 | preempt_enable(); | |
171 | } | |
172 | ||
173 | static void xen_safe_halt(void) | |
174 | { | |
175 | /* Blocking includes an implicit local_irq_enable(). */ | |
176 | if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0) | |
177 | BUG(); | |
178 | } | |
179 | ||
180 | static void xen_halt(void) | |
181 | { | |
182 | if (irqs_disabled()) | |
183 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | |
184 | else | |
185 | xen_safe_halt(); | |
186 | } | |
187 | ||
188 | static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) | |
189 | { | |
190 | switch (mode) { | |
191 | case PARAVIRT_LAZY_NONE: | |
192 | BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE); | |
193 | break; | |
194 | ||
195 | case PARAVIRT_LAZY_MMU: | |
196 | case PARAVIRT_LAZY_CPU: | |
197 | BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE); | |
198 | break; | |
199 | ||
200 | case PARAVIRT_LAZY_FLUSH: | |
201 | /* flush if necessary, but don't change state */ | |
202 | if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE) | |
203 | xen_mc_flush(); | |
204 | return; | |
205 | } | |
206 | ||
207 | xen_mc_flush(); | |
208 | x86_write_percpu(xen_lazy_mode, mode); | |
209 | } | |
210 | ||
211 | static unsigned long xen_store_tr(void) | |
212 | { | |
213 | return 0; | |
214 | } | |
215 | ||
216 | static void xen_set_ldt(const void *addr, unsigned entries) | |
217 | { | |
218 | unsigned long linear_addr = (unsigned long)addr; | |
219 | struct mmuext_op *op; | |
220 | struct multicall_space mcs = xen_mc_entry(sizeof(*op)); | |
221 | ||
222 | op = mcs.args; | |
223 | op->cmd = MMUEXT_SET_LDT; | |
224 | if (linear_addr) { | |
225 | /* ldt my be vmalloced, use arbitrary_virt_to_machine */ | |
226 | xmaddr_t maddr; | |
227 | maddr = arbitrary_virt_to_machine((unsigned long)addr); | |
228 | linear_addr = (unsigned long)maddr.maddr; | |
229 | } | |
230 | op->arg1.linear_addr = linear_addr; | |
231 | op->arg2.nr_ents = entries; | |
232 | ||
233 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
234 | ||
235 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
236 | } | |
237 | ||
238 | static void xen_load_gdt(const struct Xgt_desc_struct *dtr) | |
239 | { | |
240 | unsigned long *frames; | |
241 | unsigned long va = dtr->address; | |
242 | unsigned int size = dtr->size + 1; | |
243 | unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | |
244 | int f; | |
245 | struct multicall_space mcs; | |
246 | ||
247 | /* A GDT can be up to 64k in size, which corresponds to 8192 | |
248 | 8-byte entries, or 16 4k pages.. */ | |
249 | ||
250 | BUG_ON(size > 65536); | |
251 | BUG_ON(va & ~PAGE_MASK); | |
252 | ||
253 | mcs = xen_mc_entry(sizeof(*frames) * pages); | |
254 | frames = mcs.args; | |
255 | ||
256 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { | |
257 | frames[f] = virt_to_mfn(va); | |
258 | make_lowmem_page_readonly((void *)va); | |
259 | } | |
260 | ||
261 | MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); | |
262 | ||
263 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
264 | } | |
265 | ||
266 | static void load_TLS_descriptor(struct thread_struct *t, | |
267 | unsigned int cpu, unsigned int i) | |
268 | { | |
269 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | |
270 | xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); | |
271 | struct multicall_space mc = __xen_mc_entry(0); | |
272 | ||
273 | MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); | |
274 | } | |
275 | ||
276 | static void xen_load_tls(struct thread_struct *t, unsigned int cpu) | |
277 | { | |
278 | xen_mc_batch(); | |
279 | ||
280 | load_TLS_descriptor(t, cpu, 0); | |
281 | load_TLS_descriptor(t, cpu, 1); | |
282 | load_TLS_descriptor(t, cpu, 2); | |
283 | ||
284 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
285 | } | |
286 | ||
287 | static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, | |
288 | u32 low, u32 high) | |
289 | { | |
290 | unsigned long lp = (unsigned long)&dt[entrynum]; | |
291 | xmaddr_t mach_lp = virt_to_machine(lp); | |
292 | u64 entry = (u64)high << 32 | low; | |
293 | ||
294 | xen_mc_flush(); | |
295 | if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) | |
296 | BUG(); | |
297 | } | |
298 | ||
299 | static int cvt_gate_to_trap(int vector, u32 low, u32 high, | |
300 | struct trap_info *info) | |
301 | { | |
302 | u8 type, dpl; | |
303 | ||
304 | type = (high >> 8) & 0x1f; | |
305 | dpl = (high >> 13) & 3; | |
306 | ||
307 | if (type != 0xf && type != 0xe) | |
308 | return 0; | |
309 | ||
310 | info->vector = vector; | |
311 | info->address = (high & 0xffff0000) | (low & 0x0000ffff); | |
312 | info->cs = low >> 16; | |
313 | info->flags = dpl; | |
314 | /* interrupt gates clear IF */ | |
315 | if (type == 0xe) | |
316 | info->flags |= 4; | |
317 | ||
318 | return 1; | |
319 | } | |
320 | ||
321 | /* Locations of each CPU's IDT */ | |
322 | static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc); | |
323 | ||
324 | /* Set an IDT entry. If the entry is part of the current IDT, then | |
325 | also update Xen. */ | |
326 | static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, | |
327 | u32 low, u32 high) | |
328 | { | |
329 | ||
330 | int cpu = smp_processor_id(); | |
331 | unsigned long p = (unsigned long)&dt[entrynum]; | |
332 | unsigned long start = per_cpu(idt_desc, cpu).address; | |
333 | unsigned long end = start + per_cpu(idt_desc, cpu).size + 1; | |
334 | ||
335 | xen_mc_flush(); | |
336 | ||
337 | write_dt_entry(dt, entrynum, low, high); | |
338 | ||
339 | if (p >= start && (p + 8) <= end) { | |
340 | struct trap_info info[2]; | |
341 | ||
342 | info[1].address = 0; | |
343 | ||
344 | if (cvt_gate_to_trap(entrynum, low, high, &info[0])) | |
345 | if (HYPERVISOR_set_trap_table(info)) | |
346 | BUG(); | |
347 | } | |
348 | } | |
349 | ||
350 | /* Load a new IDT into Xen. In principle this can be per-CPU, so we | |
351 | hold a spinlock to protect the static traps[] array (static because | |
352 | it avoids allocation, and saves stack space). */ | |
353 | static void xen_load_idt(const struct Xgt_desc_struct *desc) | |
354 | { | |
355 | static DEFINE_SPINLOCK(lock); | |
356 | static struct trap_info traps[257]; | |
357 | ||
358 | int cpu = smp_processor_id(); | |
359 | unsigned in, out, count; | |
360 | ||
361 | per_cpu(idt_desc, cpu) = *desc; | |
362 | ||
363 | count = (desc->size+1) / 8; | |
364 | BUG_ON(count > 256); | |
365 | ||
366 | spin_lock(&lock); | |
367 | for (in = out = 0; in < count; in++) { | |
368 | const u32 *entry = (u32 *)(desc->address + in * 8); | |
369 | ||
370 | if (cvt_gate_to_trap(in, entry[0], entry[1], &traps[out])) | |
371 | out++; | |
372 | } | |
373 | traps[out].address = 0; | |
374 | ||
375 | xen_mc_flush(); | |
376 | if (HYPERVISOR_set_trap_table(traps)) | |
377 | BUG(); | |
378 | ||
379 | spin_unlock(&lock); | |
380 | } | |
381 | ||
382 | /* Write a GDT descriptor entry. Ignore LDT descriptors, since | |
383 | they're handled differently. */ | |
384 | static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |
385 | u32 low, u32 high) | |
386 | { | |
387 | switch ((high >> 8) & 0xff) { | |
388 | case DESCTYPE_LDT: | |
389 | case DESCTYPE_TSS: | |
390 | /* ignore */ | |
391 | break; | |
392 | ||
393 | default: { | |
394 | xmaddr_t maddr = virt_to_machine(&dt[entry]); | |
395 | u64 desc = (u64)high << 32 | low; | |
396 | ||
397 | xen_mc_flush(); | |
398 | if (HYPERVISOR_update_descriptor(maddr.maddr, desc)) | |
399 | BUG(); | |
400 | } | |
401 | ||
402 | } | |
403 | } | |
404 | ||
405 | static void xen_load_esp0(struct tss_struct *tss, | |
406 | struct thread_struct *thread) | |
407 | { | |
408 | struct multicall_space mcs = xen_mc_entry(0); | |
409 | MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0); | |
410 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
411 | } | |
412 | ||
413 | static void xen_set_iopl_mask(unsigned mask) | |
414 | { | |
415 | struct physdev_set_iopl set_iopl; | |
416 | ||
417 | /* Force the change at ring 0. */ | |
418 | set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; | |
419 | HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | |
420 | } | |
421 | ||
422 | static void xen_io_delay(void) | |
423 | { | |
424 | } | |
425 | ||
426 | #ifdef CONFIG_X86_LOCAL_APIC | |
427 | static unsigned long xen_apic_read(unsigned long reg) | |
428 | { | |
429 | return 0; | |
430 | } | |
431 | #endif | |
432 | ||
433 | static void xen_flush_tlb(void) | |
434 | { | |
435 | struct mmuext_op op; | |
436 | ||
437 | op.cmd = MMUEXT_TLB_FLUSH_LOCAL; | |
438 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
439 | BUG(); | |
440 | } | |
441 | ||
442 | static void xen_flush_tlb_single(unsigned long addr) | |
443 | { | |
444 | struct mmuext_op op; | |
445 | ||
446 | op.cmd = MMUEXT_INVLPG_LOCAL; | |
447 | op.arg1.linear_addr = addr & PAGE_MASK; | |
448 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
449 | BUG(); | |
450 | } | |
451 | ||
452 | static unsigned long xen_read_cr2(void) | |
453 | { | |
454 | return x86_read_percpu(xen_vcpu)->arch.cr2; | |
455 | } | |
456 | ||
457 | static void xen_write_cr4(unsigned long cr4) | |
458 | { | |
459 | /* never allow TSC to be disabled */ | |
460 | native_write_cr4(cr4 & ~X86_CR4_TSD); | |
461 | } | |
462 | ||
463 | /* | |
464 | * Page-directory addresses above 4GB do not fit into architectural %cr3. | |
465 | * When accessing %cr3, or equivalent field in vcpu_guest_context, guests | |
466 | * must use the following accessor macros to pack/unpack valid MFNs. | |
467 | * | |
468 | * Note that Xen is using the fact that the pagetable base is always | |
469 | * page-aligned, and putting the 12 MSB of the address into the 12 LSB | |
470 | * of cr3. | |
471 | */ | |
472 | #define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20)) | |
473 | #define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20)) | |
474 | ||
475 | static unsigned long xen_read_cr3(void) | |
476 | { | |
477 | return x86_read_percpu(xen_cr3); | |
478 | } | |
479 | ||
480 | static void xen_write_cr3(unsigned long cr3) | |
481 | { | |
482 | if (cr3 == x86_read_percpu(xen_cr3)) { | |
483 | /* just a simple tlb flush */ | |
484 | xen_flush_tlb(); | |
485 | return; | |
486 | } | |
487 | ||
488 | x86_write_percpu(xen_cr3, cr3); | |
489 | ||
490 | ||
491 | { | |
492 | struct mmuext_op *op; | |
493 | struct multicall_space mcs = xen_mc_entry(sizeof(*op)); | |
494 | unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3)); | |
495 | ||
496 | op = mcs.args; | |
497 | op->cmd = MMUEXT_NEW_BASEPTR; | |
498 | op->arg1.mfn = mfn; | |
499 | ||
500 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
501 | ||
502 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
503 | } | |
504 | } | |
505 | ||
f4f97b3e JF |
506 | /* Early in boot, while setting up the initial pagetable, assume |
507 | everything is pinned. */ | |
9a4029fd | 508 | static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn) |
5ead97c8 | 509 | { |
f4f97b3e | 510 | BUG_ON(mem_map); /* should only be used early */ |
5ead97c8 JF |
511 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); |
512 | } | |
513 | ||
f4f97b3e JF |
514 | /* This needs to make sure the new pte page is pinned iff its being |
515 | attached to a pinned pagetable. */ | |
516 | static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) | |
5ead97c8 | 517 | { |
f4f97b3e | 518 | struct page *page = pfn_to_page(pfn); |
5ead97c8 | 519 | |
f4f97b3e JF |
520 | if (PagePinned(virt_to_page(mm->pgd))) { |
521 | SetPagePinned(page); | |
522 | ||
523 | if (!PageHighMem(page)) | |
524 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | |
525 | else | |
526 | /* make sure there are no stray mappings of | |
527 | this page */ | |
528 | kmap_flush_unused(); | |
529 | } | |
5ead97c8 JF |
530 | } |
531 | ||
f4f97b3e | 532 | /* This should never happen until we're OK to use struct page */ |
5ead97c8 JF |
533 | static void xen_release_pt(u32 pfn) |
534 | { | |
f4f97b3e JF |
535 | struct page *page = pfn_to_page(pfn); |
536 | ||
537 | if (PagePinned(page)) { | |
538 | if (!PageHighMem(page)) | |
539 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | |
540 | } | |
5ead97c8 JF |
541 | } |
542 | ||
f4f97b3e JF |
543 | #ifdef CONFIG_HIGHPTE |
544 | static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) | |
5ead97c8 | 545 | { |
f4f97b3e JF |
546 | pgprot_t prot = PAGE_KERNEL; |
547 | ||
548 | if (PagePinned(page)) | |
549 | prot = PAGE_KERNEL_RO; | |
550 | ||
551 | if (0 && PageHighMem(page)) | |
552 | printk("mapping highpte %lx type %d prot %s\n", | |
553 | page_to_pfn(page), type, | |
554 | (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); | |
555 | ||
556 | return kmap_atomic_prot(page, type, prot); | |
5ead97c8 | 557 | } |
f4f97b3e | 558 | #endif |
5ead97c8 | 559 | |
9a4029fd JF |
560 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
561 | { | |
562 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | |
563 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | |
564 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | |
565 | pte_val_ma(pte)); | |
566 | ||
567 | return pte; | |
568 | } | |
569 | ||
570 | /* Init-time set_pte while constructing initial pagetables, which | |
571 | doesn't allow RO pagetable pages to be remapped RW */ | |
572 | static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) | |
573 | { | |
574 | pte = mask_rw_pte(ptep, pte); | |
575 | ||
576 | xen_set_pte(ptep, pte); | |
577 | } | |
578 | ||
5ead97c8 JF |
579 | static __init void xen_pagetable_setup_start(pgd_t *base) |
580 | { | |
581 | pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; | |
582 | ||
9a4029fd JF |
583 | /* special set_pte for pagetable initialization */ |
584 | paravirt_ops.set_pte = xen_set_pte_init; | |
585 | ||
5ead97c8 JF |
586 | init_mm.pgd = base; |
587 | /* | |
588 | * copy top-level of Xen-supplied pagetable into place. For | |
589 | * !PAE we can use this as-is, but for PAE it is a stand-in | |
590 | * while we copy the pmd pages. | |
591 | */ | |
592 | memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | |
593 | ||
594 | if (PTRS_PER_PMD > 1) { | |
595 | int i; | |
596 | /* | |
597 | * For PAE, need to allocate new pmds, rather than | |
598 | * share Xen's, since Xen doesn't like pmd's being | |
599 | * shared between address spaces. | |
600 | */ | |
601 | for (i = 0; i < PTRS_PER_PGD; i++) { | |
602 | if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) { | |
603 | pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | |
604 | ||
605 | memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]), | |
606 | PAGE_SIZE); | |
607 | ||
f4f97b3e | 608 | make_lowmem_page_readonly(pmd); |
5ead97c8 JF |
609 | |
610 | set_pgd(&base[i], __pgd(1 + __pa(pmd))); | |
611 | } else | |
612 | pgd_clear(&base[i]); | |
613 | } | |
614 | } | |
615 | ||
616 | /* make sure zero_page is mapped RO so we can use it in pagetables */ | |
617 | make_lowmem_page_readonly(empty_zero_page); | |
618 | make_lowmem_page_readonly(base); | |
619 | /* | |
620 | * Switch to new pagetable. This is done before | |
621 | * pagetable_init has done anything so that the new pages | |
622 | * added to the table can be prepared properly for Xen. | |
623 | */ | |
624 | xen_write_cr3(__pa(base)); | |
625 | } | |
626 | ||
627 | static __init void xen_pagetable_setup_done(pgd_t *base) | |
628 | { | |
f4f97b3e JF |
629 | /* This will work as long as patching hasn't happened yet |
630 | (which it hasn't) */ | |
631 | paravirt_ops.alloc_pt = xen_alloc_pt; | |
9a4029fd | 632 | paravirt_ops.set_pte = xen_set_pte; |
f4f97b3e | 633 | |
5ead97c8 JF |
634 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
635 | /* | |
636 | * Create a mapping for the shared info page. | |
637 | * Should be set_fixmap(), but shared_info is a machine | |
638 | * address with no corresponding pseudo-phys address. | |
639 | */ | |
5ead97c8 JF |
640 | set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP), |
641 | PFN_DOWN(xen_start_info->shared_info), | |
642 | PAGE_KERNEL); | |
5ead97c8 JF |
643 | |
644 | HYPERVISOR_shared_info = | |
645 | (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); | |
646 | ||
647 | } else | |
648 | HYPERVISOR_shared_info = | |
649 | (struct shared_info *)__va(xen_start_info->shared_info); | |
650 | ||
f4f97b3e JF |
651 | /* Actually pin the pagetable down, but we can't set PG_pinned |
652 | yet because the page structures don't exist yet. */ | |
653 | { | |
654 | struct mmuext_op op; | |
655 | #ifdef CONFIG_X86_PAE | |
656 | op.cmd = MMUEXT_PIN_L3_TABLE; | |
657 | #else | |
658 | op.cmd = MMUEXT_PIN_L3_TABLE; | |
659 | #endif | |
660 | op.arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(base))); | |
661 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
662 | BUG(); | |
663 | } | |
5ead97c8 JF |
664 | |
665 | xen_vcpu_setup(smp_processor_id()); | |
666 | } | |
667 | ||
668 | static const struct paravirt_ops xen_paravirt_ops __initdata = { | |
669 | .paravirt_enabled = 1, | |
670 | .shared_kernel_pmd = 0, | |
671 | ||
672 | .name = "Xen", | |
673 | .banner = xen_banner, | |
674 | ||
675 | .patch = paravirt_patch_default, | |
676 | ||
677 | .memory_setup = xen_memory_setup, | |
678 | .arch_setup = xen_arch_setup, | |
e46cdb66 | 679 | .init_IRQ = xen_init_IRQ, |
f4f97b3e | 680 | .post_allocator_init = xen_mark_init_mm_pinned, |
5ead97c8 | 681 | |
15c84731 JF |
682 | .time_init = xen_time_init, |
683 | .set_wallclock = xen_set_wallclock, | |
684 | .get_wallclock = xen_get_wallclock, | |
685 | .get_cpu_khz = xen_cpu_khz, | |
ab550288 | 686 | .sched_clock = xen_sched_clock, |
15c84731 | 687 | |
5ead97c8 JF |
688 | .cpuid = xen_cpuid, |
689 | ||
690 | .set_debugreg = xen_set_debugreg, | |
691 | .get_debugreg = xen_get_debugreg, | |
692 | ||
693 | .clts = native_clts, | |
694 | ||
695 | .read_cr0 = native_read_cr0, | |
696 | .write_cr0 = native_write_cr0, | |
697 | ||
698 | .read_cr2 = xen_read_cr2, | |
699 | .write_cr2 = native_write_cr2, | |
700 | ||
701 | .read_cr3 = xen_read_cr3, | |
702 | .write_cr3 = xen_write_cr3, | |
703 | ||
704 | .read_cr4 = native_read_cr4, | |
705 | .read_cr4_safe = native_read_cr4_safe, | |
706 | .write_cr4 = xen_write_cr4, | |
707 | ||
708 | .save_fl = xen_save_fl, | |
709 | .restore_fl = xen_restore_fl, | |
710 | .irq_disable = xen_irq_disable, | |
711 | .irq_enable = xen_irq_enable, | |
712 | .safe_halt = xen_safe_halt, | |
713 | .halt = xen_halt, | |
714 | .wbinvd = native_wbinvd, | |
715 | ||
716 | .read_msr = native_read_msr_safe, | |
717 | .write_msr = native_write_msr_safe, | |
718 | .read_tsc = native_read_tsc, | |
719 | .read_pmc = native_read_pmc, | |
720 | ||
721 | .iret = (void *)&hypercall_page[__HYPERVISOR_iret], | |
722 | .irq_enable_sysexit = NULL, /* never called */ | |
723 | ||
724 | .load_tr_desc = paravirt_nop, | |
725 | .set_ldt = xen_set_ldt, | |
726 | .load_gdt = xen_load_gdt, | |
727 | .load_idt = xen_load_idt, | |
728 | .load_tls = xen_load_tls, | |
729 | ||
730 | .store_gdt = native_store_gdt, | |
731 | .store_idt = native_store_idt, | |
732 | .store_tr = xen_store_tr, | |
733 | ||
734 | .write_ldt_entry = xen_write_ldt_entry, | |
735 | .write_gdt_entry = xen_write_gdt_entry, | |
736 | .write_idt_entry = xen_write_idt_entry, | |
737 | .load_esp0 = xen_load_esp0, | |
738 | ||
739 | .set_iopl_mask = xen_set_iopl_mask, | |
740 | .io_delay = xen_io_delay, | |
741 | ||
742 | #ifdef CONFIG_X86_LOCAL_APIC | |
743 | .apic_write = paravirt_nop, | |
744 | .apic_write_atomic = paravirt_nop, | |
745 | .apic_read = xen_apic_read, | |
746 | .setup_boot_clock = paravirt_nop, | |
747 | .setup_secondary_clock = paravirt_nop, | |
748 | .startup_ipi_hook = paravirt_nop, | |
749 | #endif | |
750 | ||
751 | .flush_tlb_user = xen_flush_tlb, | |
752 | .flush_tlb_kernel = xen_flush_tlb, | |
753 | .flush_tlb_single = xen_flush_tlb_single, | |
754 | ||
755 | .pte_update = paravirt_nop, | |
756 | .pte_update_defer = paravirt_nop, | |
757 | ||
758 | .pagetable_setup_start = xen_pagetable_setup_start, | |
759 | .pagetable_setup_done = xen_pagetable_setup_done, | |
760 | ||
f4f97b3e | 761 | .alloc_pt = xen_alloc_pt_init, |
5ead97c8 | 762 | .release_pt = xen_release_pt, |
f4f97b3e JF |
763 | .alloc_pd = paravirt_nop, |
764 | .alloc_pd_clone = paravirt_nop, | |
765 | .release_pd = paravirt_nop, | |
766 | ||
767 | #ifdef CONFIG_HIGHPTE | |
768 | .kmap_atomic_pte = xen_kmap_atomic_pte, | |
769 | #endif | |
5ead97c8 | 770 | |
9a4029fd | 771 | .set_pte = NULL, /* see xen_pagetable_setup_* */ |
3b827c1b JF |
772 | .set_pte_at = xen_set_pte_at, |
773 | .set_pmd = xen_set_pmd, | |
774 | ||
775 | .pte_val = xen_pte_val, | |
776 | .pgd_val = xen_pgd_val, | |
777 | ||
778 | .make_pte = xen_make_pte, | |
779 | .make_pgd = xen_make_pgd, | |
780 | ||
781 | #ifdef CONFIG_X86_PAE | |
782 | .set_pte_atomic = xen_set_pte_atomic, | |
783 | .set_pte_present = xen_set_pte_at, | |
784 | .set_pud = xen_set_pud, | |
785 | .pte_clear = xen_pte_clear, | |
786 | .pmd_clear = xen_pmd_clear, | |
787 | ||
788 | .make_pmd = xen_make_pmd, | |
789 | .pmd_val = xen_pmd_val, | |
790 | #endif /* PAE */ | |
791 | ||
792 | .activate_mm = xen_activate_mm, | |
793 | .dup_mmap = xen_dup_mmap, | |
794 | .exit_mmap = xen_exit_mmap, | |
795 | ||
5ead97c8 JF |
796 | .set_lazy_mode = xen_set_lazy_mode, |
797 | }; | |
798 | ||
799 | /* First C function to be called on Xen boot */ | |
800 | asmlinkage void __init xen_start_kernel(void) | |
801 | { | |
802 | pgd_t *pgd; | |
803 | ||
804 | if (!xen_start_info) | |
805 | return; | |
806 | ||
807 | BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0); | |
808 | ||
809 | /* Install Xen paravirt ops */ | |
810 | paravirt_ops = xen_paravirt_ops; | |
811 | ||
812 | xen_setup_features(); | |
813 | ||
814 | /* Get mfn list */ | |
815 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | |
816 | phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list; | |
817 | ||
818 | pgd = (pgd_t *)xen_start_info->pt_base; | |
819 | ||
820 | init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; | |
821 | ||
822 | init_mm.pgd = pgd; /* use the Xen pagetables to start */ | |
823 | ||
824 | /* keep using Xen gdt for now; no urgent need to change it */ | |
825 | ||
826 | x86_write_percpu(xen_cr3, __pa(pgd)); | |
827 | xen_vcpu_setup(0); | |
828 | ||
829 | paravirt_ops.kernel_rpl = 1; | |
830 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) | |
831 | paravirt_ops.kernel_rpl = 0; | |
832 | ||
833 | /* set the limit of our address space */ | |
834 | reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE); | |
835 | ||
836 | /* set up basic CPUID stuff */ | |
837 | cpu_detect(&new_cpu_data); | |
838 | new_cpu_data.hard_math = 1; | |
839 | new_cpu_data.x86_capability[0] = cpuid_edx(1); | |
840 | ||
841 | /* Poke various useful things into boot_params */ | |
842 | LOADER_TYPE = (9 << 4) | 0; | |
843 | INITRD_START = xen_start_info->mod_start ? __pa(xen_start_info->mod_start) : 0; | |
844 | INITRD_SIZE = xen_start_info->mod_len; | |
845 | ||
846 | /* Start the world */ | |
847 | start_kernel(); | |
848 | } |