Commit | Line | Data |
---|---|---|
5ead97c8 JF |
1 | /* |
2 | * Core of Xen paravirt_ops implementation. | |
3 | * | |
4 | * This file contains the xen_paravirt_ops structure itself, and the | |
5 | * implementations for: | |
6 | * - privileged instructions | |
7 | * - interrupt flags | |
8 | * - segment operations | |
9 | * - booting and setup | |
10 | * | |
11 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
12 | */ | |
13 | ||
14 | #include <linux/kernel.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/preempt.h> | |
f120f13e | 18 | #include <linux/hardirq.h> |
5ead97c8 JF |
19 | #include <linux/percpu.h> |
20 | #include <linux/delay.h> | |
21 | #include <linux/start_kernel.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/bootmem.h> | |
24 | #include <linux/module.h> | |
f4f97b3e JF |
25 | #include <linux/mm.h> |
26 | #include <linux/page-flags.h> | |
27 | #include <linux/highmem.h> | |
b8c2d3df | 28 | #include <linux/console.h> |
5ead97c8 JF |
29 | |
30 | #include <xen/interface/xen.h> | |
31 | #include <xen/interface/physdev.h> | |
32 | #include <xen/interface/vcpu.h> | |
fefa629a | 33 | #include <xen/interface/sched.h> |
5ead97c8 JF |
34 | #include <xen/features.h> |
35 | #include <xen/page.h> | |
084a2a4e | 36 | #include <xen/hvc-console.h> |
5ead97c8 JF |
37 | |
38 | #include <asm/paravirt.h> | |
39 | #include <asm/page.h> | |
40 | #include <asm/xen/hypercall.h> | |
41 | #include <asm/xen/hypervisor.h> | |
42 | #include <asm/fixmap.h> | |
43 | #include <asm/processor.h> | |
44 | #include <asm/setup.h> | |
45 | #include <asm/desc.h> | |
46 | #include <asm/pgtable.h> | |
f87e4cac | 47 | #include <asm/tlbflush.h> |
fefa629a | 48 | #include <asm/reboot.h> |
eba0045f | 49 | #include <asm/pgalloc.h> |
5ead97c8 JF |
50 | |
51 | #include "xen-ops.h" | |
3b827c1b | 52 | #include "mmu.h" |
5ead97c8 JF |
53 | #include "multicalls.h" |
54 | ||
55 | EXPORT_SYMBOL_GPL(hypercall_page); | |
56 | ||
5ead97c8 JF |
57 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); |
58 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | |
9f79991d JF |
59 | |
60 | /* | |
61 | * Note about cr3 (pagetable base) values: | |
62 | * | |
63 | * xen_cr3 contains the current logical cr3 value; it contains the | |
64 | * last set cr3. This may not be the current effective cr3, because | |
65 | * its update may be being lazily deferred. However, a vcpu looking | |
66 | * at its own cr3 can use this value knowing that it everything will | |
67 | * be self-consistent. | |
68 | * | |
69 | * xen_current_cr3 contains the actual vcpu cr3; it is set once the | |
70 | * hypercall to set the vcpu cr3 is complete (so it may be a little | |
71 | * out of date, but it will never be set early). If one vcpu is | |
72 | * looking at another vcpu's cr3 value, it should use this variable. | |
73 | */ | |
74 | DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ | |
75 | DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ | |
5ead97c8 JF |
76 | |
77 | struct start_info *xen_start_info; | |
78 | EXPORT_SYMBOL_GPL(xen_start_info); | |
79 | ||
a0d695c8 | 80 | struct shared_info xen_dummy_shared_info; |
60223a32 JF |
81 | |
82 | /* | |
83 | * Point at some empty memory to start with. We map the real shared_info | |
84 | * page as soon as fixmap is up and running. | |
85 | */ | |
a0d695c8 | 86 | struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; |
60223a32 JF |
87 | |
88 | /* | |
89 | * Flag to determine whether vcpu info placement is available on all | |
90 | * VCPUs. We assume it is to start with, and then set it to zero on | |
91 | * the first failure. This is because it can succeed on some VCPUs | |
92 | * and not others, since it can involve hypervisor memory allocation, | |
93 | * or because the guest failed to guarantee all the appropriate | |
94 | * constraints on all VCPUs (ie buffer can't cross a page boundary). | |
95 | * | |
96 | * Note that any particular CPU may be using a placed vcpu structure, | |
97 | * but we can only optimise if the all are. | |
98 | * | |
99 | * 0: not available, 1: available | |
100 | */ | |
04c44a08 | 101 | static int have_vcpu_info_placement = 1; |
60223a32 | 102 | |
9c7a7942 | 103 | static void xen_vcpu_setup(int cpu) |
5ead97c8 | 104 | { |
60223a32 JF |
105 | struct vcpu_register_vcpu_info info; |
106 | int err; | |
107 | struct vcpu_info *vcpup; | |
108 | ||
a0d695c8 | 109 | BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); |
5ead97c8 | 110 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; |
60223a32 JF |
111 | |
112 | if (!have_vcpu_info_placement) | |
113 | return; /* already tested, not available */ | |
114 | ||
115 | vcpup = &per_cpu(xen_vcpu_info, cpu); | |
116 | ||
117 | info.mfn = virt_to_mfn(vcpup); | |
118 | info.offset = offset_in_page(vcpup); | |
119 | ||
e3d26976 | 120 | printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", |
60223a32 JF |
121 | cpu, vcpup, info.mfn, info.offset); |
122 | ||
123 | /* Check to see if the hypervisor will put the vcpu_info | |
124 | structure where we want it, which allows direct access via | |
125 | a percpu-variable. */ | |
126 | err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); | |
127 | ||
128 | if (err) { | |
129 | printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); | |
130 | have_vcpu_info_placement = 0; | |
131 | } else { | |
132 | /* This cpu is using the registered vcpu info, even if | |
133 | later ones fail to. */ | |
134 | per_cpu(xen_vcpu, cpu) = vcpup; | |
6487673b | 135 | |
60223a32 JF |
136 | printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n", |
137 | cpu, vcpup); | |
138 | } | |
5ead97c8 JF |
139 | } |
140 | ||
9c7a7942 JF |
141 | /* |
142 | * On restore, set the vcpu placement up again. | |
143 | * If it fails, then we're in a bad state, since | |
144 | * we can't back out from using it... | |
145 | */ | |
146 | void xen_vcpu_restore(void) | |
147 | { | |
148 | if (have_vcpu_info_placement) { | |
149 | int cpu; | |
150 | ||
151 | for_each_online_cpu(cpu) { | |
152 | bool other_cpu = (cpu != smp_processor_id()); | |
153 | ||
154 | if (other_cpu && | |
155 | HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) | |
156 | BUG(); | |
157 | ||
158 | xen_vcpu_setup(cpu); | |
159 | ||
160 | if (other_cpu && | |
161 | HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) | |
162 | BUG(); | |
163 | } | |
164 | ||
165 | BUG_ON(!have_vcpu_info_placement); | |
166 | } | |
167 | } | |
168 | ||
5ead97c8 JF |
169 | static void __init xen_banner(void) |
170 | { | |
171 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", | |
93b1eab3 | 172 | pv_info.name); |
e57778a1 JF |
173 | printk(KERN_INFO "Hypervisor signature: %s%s\n", |
174 | xen_start_info->magic, | |
175 | xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); | |
5ead97c8 JF |
176 | } |
177 | ||
65ea5b03 PA |
178 | static void xen_cpuid(unsigned int *ax, unsigned int *bx, |
179 | unsigned int *cx, unsigned int *dx) | |
5ead97c8 JF |
180 | { |
181 | unsigned maskedx = ~0; | |
182 | ||
183 | /* | |
184 | * Mask out inconvenient features, to try and disable as many | |
185 | * unsupported kernel subsystems as possible. | |
186 | */ | |
65ea5b03 | 187 | if (*ax == 1) |
5ead97c8 JF |
188 | maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ |
189 | (1 << X86_FEATURE_ACPI) | /* disable ACPI */ | |
dbe9e994 JF |
190 | (1 << X86_FEATURE_MCE) | /* disable MCE */ |
191 | (1 << X86_FEATURE_MCA) | /* disable MCA */ | |
5ead97c8 JF |
192 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ |
193 | ||
194 | asm(XEN_EMULATE_PREFIX "cpuid" | |
65ea5b03 PA |
195 | : "=a" (*ax), |
196 | "=b" (*bx), | |
197 | "=c" (*cx), | |
198 | "=d" (*dx) | |
199 | : "0" (*ax), "2" (*cx)); | |
200 | *dx &= maskedx; | |
5ead97c8 JF |
201 | } |
202 | ||
203 | static void xen_set_debugreg(int reg, unsigned long val) | |
204 | { | |
205 | HYPERVISOR_set_debugreg(reg, val); | |
206 | } | |
207 | ||
208 | static unsigned long xen_get_debugreg(int reg) | |
209 | { | |
210 | return HYPERVISOR_get_debugreg(reg); | |
211 | } | |
212 | ||
213 | static unsigned long xen_save_fl(void) | |
214 | { | |
215 | struct vcpu_info *vcpu; | |
216 | unsigned long flags; | |
217 | ||
5ead97c8 | 218 | vcpu = x86_read_percpu(xen_vcpu); |
f120f13e | 219 | |
5ead97c8 JF |
220 | /* flag has opposite sense of mask */ |
221 | flags = !vcpu->evtchn_upcall_mask; | |
5ead97c8 JF |
222 | |
223 | /* convert to IF type flag | |
224 | -0 -> 0x00000000 | |
225 | -1 -> 0xffffffff | |
226 | */ | |
227 | return (-flags) & X86_EFLAGS_IF; | |
228 | } | |
229 | ||
230 | static void xen_restore_fl(unsigned long flags) | |
231 | { | |
232 | struct vcpu_info *vcpu; | |
233 | ||
5ead97c8 JF |
234 | /* convert from IF type flag */ |
235 | flags = !(flags & X86_EFLAGS_IF); | |
f120f13e JF |
236 | |
237 | /* There's a one instruction preempt window here. We need to | |
238 | make sure we're don't switch CPUs between getting the vcpu | |
239 | pointer and updating the mask. */ | |
240 | preempt_disable(); | |
5ead97c8 JF |
241 | vcpu = x86_read_percpu(xen_vcpu); |
242 | vcpu->evtchn_upcall_mask = flags; | |
f120f13e | 243 | preempt_enable_no_resched(); |
5ead97c8 | 244 | |
f120f13e JF |
245 | /* Doesn't matter if we get preempted here, because any |
246 | pending event will get dealt with anyway. */ | |
5ead97c8 | 247 | |
f120f13e JF |
248 | if (flags == 0) { |
249 | preempt_check_resched(); | |
250 | barrier(); /* unmask then check (avoid races) */ | |
5ead97c8 JF |
251 | if (unlikely(vcpu->evtchn_upcall_pending)) |
252 | force_evtchn_callback(); | |
f120f13e | 253 | } |
5ead97c8 JF |
254 | } |
255 | ||
256 | static void xen_irq_disable(void) | |
257 | { | |
f120f13e JF |
258 | /* There's a one instruction preempt window here. We need to |
259 | make sure we're don't switch CPUs between getting the vcpu | |
260 | pointer and updating the mask. */ | |
5ead97c8 | 261 | preempt_disable(); |
f120f13e | 262 | x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; |
5ead97c8 JF |
263 | preempt_enable_no_resched(); |
264 | } | |
265 | ||
266 | static void xen_irq_enable(void) | |
267 | { | |
268 | struct vcpu_info *vcpu; | |
269 | ||
239d1fc0 JF |
270 | /* We don't need to worry about being preempted here, since |
271 | either a) interrupts are disabled, so no preemption, or b) | |
272 | the caller is confused and is trying to re-enable interrupts | |
273 | on an indeterminate processor. */ | |
274 | ||
5ead97c8 JF |
275 | vcpu = x86_read_percpu(xen_vcpu); |
276 | vcpu->evtchn_upcall_mask = 0; | |
277 | ||
f120f13e JF |
278 | /* Doesn't matter if we get preempted here, because any |
279 | pending event will get dealt with anyway. */ | |
5ead97c8 | 280 | |
f120f13e | 281 | barrier(); /* unmask then check (avoid races) */ |
5ead97c8 JF |
282 | if (unlikely(vcpu->evtchn_upcall_pending)) |
283 | force_evtchn_callback(); | |
5ead97c8 JF |
284 | } |
285 | ||
286 | static void xen_safe_halt(void) | |
287 | { | |
288 | /* Blocking includes an implicit local_irq_enable(). */ | |
349c709f | 289 | if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0) |
5ead97c8 JF |
290 | BUG(); |
291 | } | |
292 | ||
293 | static void xen_halt(void) | |
294 | { | |
295 | if (irqs_disabled()) | |
296 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | |
297 | else | |
298 | xen_safe_halt(); | |
299 | } | |
300 | ||
8965c1c0 | 301 | static void xen_leave_lazy(void) |
5ead97c8 | 302 | { |
8965c1c0 | 303 | paravirt_leave_lazy(paravirt_get_lazy_mode()); |
5ead97c8 | 304 | xen_mc_flush(); |
5ead97c8 JF |
305 | } |
306 | ||
307 | static unsigned long xen_store_tr(void) | |
308 | { | |
309 | return 0; | |
310 | } | |
311 | ||
312 | static void xen_set_ldt(const void *addr, unsigned entries) | |
313 | { | |
5ead97c8 JF |
314 | struct mmuext_op *op; |
315 | struct multicall_space mcs = xen_mc_entry(sizeof(*op)); | |
316 | ||
317 | op = mcs.args; | |
318 | op->cmd = MMUEXT_SET_LDT; | |
4dbf7af6 | 319 | op->arg1.linear_addr = (unsigned long)addr; |
5ead97c8 JF |
320 | op->arg2.nr_ents = entries; |
321 | ||
322 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
323 | ||
324 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
325 | } | |
326 | ||
6b68f01b | 327 | static void xen_load_gdt(const struct desc_ptr *dtr) |
5ead97c8 JF |
328 | { |
329 | unsigned long *frames; | |
330 | unsigned long va = dtr->address; | |
331 | unsigned int size = dtr->size + 1; | |
332 | unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | |
333 | int f; | |
334 | struct multicall_space mcs; | |
335 | ||
336 | /* A GDT can be up to 64k in size, which corresponds to 8192 | |
337 | 8-byte entries, or 16 4k pages.. */ | |
338 | ||
339 | BUG_ON(size > 65536); | |
340 | BUG_ON(va & ~PAGE_MASK); | |
341 | ||
342 | mcs = xen_mc_entry(sizeof(*frames) * pages); | |
343 | frames = mcs.args; | |
344 | ||
345 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { | |
346 | frames[f] = virt_to_mfn(va); | |
347 | make_lowmem_page_readonly((void *)va); | |
348 | } | |
349 | ||
350 | MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); | |
351 | ||
352 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
353 | } | |
354 | ||
355 | static void load_TLS_descriptor(struct thread_struct *t, | |
356 | unsigned int cpu, unsigned int i) | |
357 | { | |
358 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | |
359 | xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); | |
360 | struct multicall_space mc = __xen_mc_entry(0); | |
361 | ||
362 | MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); | |
363 | } | |
364 | ||
365 | static void xen_load_tls(struct thread_struct *t, unsigned int cpu) | |
366 | { | |
367 | xen_mc_batch(); | |
368 | ||
369 | load_TLS_descriptor(t, cpu, 0); | |
370 | load_TLS_descriptor(t, cpu, 1); | |
371 | load_TLS_descriptor(t, cpu, 2); | |
372 | ||
373 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
8b84ad94 JF |
374 | |
375 | /* | |
376 | * XXX sleazy hack: If we're being called in a lazy-cpu zone, | |
377 | * it means we're in a context switch, and %gs has just been | |
378 | * saved. This means we can zero it out to prevent faults on | |
379 | * exit from the hypervisor if the next process has no %gs. | |
380 | * Either way, it has been saved, and the new value will get | |
381 | * loaded properly. This will go away as soon as Xen has been | |
382 | * modified to not save/restore %gs for normal hypercalls. | |
383 | */ | |
8965c1c0 | 384 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) |
8b84ad94 | 385 | loadsegment(gs, 0); |
5ead97c8 JF |
386 | } |
387 | ||
a8fc1089 EH |
388 | #ifdef CONFIG_X86_64 |
389 | static void xen_load_gs_index(unsigned int idx) | |
390 | { | |
391 | if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx)) | |
392 | BUG(); | |
393 | } | |
394 | #endif | |
395 | ||
5ead97c8 | 396 | static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, |
75b8bb3e | 397 | const void *ptr) |
5ead97c8 JF |
398 | { |
399 | unsigned long lp = (unsigned long)&dt[entrynum]; | |
400 | xmaddr_t mach_lp = virt_to_machine(lp); | |
75b8bb3e | 401 | u64 entry = *(u64 *)ptr; |
5ead97c8 | 402 | |
f120f13e JF |
403 | preempt_disable(); |
404 | ||
5ead97c8 JF |
405 | xen_mc_flush(); |
406 | if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) | |
407 | BUG(); | |
f120f13e JF |
408 | |
409 | preempt_enable(); | |
5ead97c8 JF |
410 | } |
411 | ||
e176d367 | 412 | static int cvt_gate_to_trap(int vector, const gate_desc *val, |
5ead97c8 JF |
413 | struct trap_info *info) |
414 | { | |
e176d367 | 415 | if (val->type != 0xf && val->type != 0xe) |
5ead97c8 JF |
416 | return 0; |
417 | ||
418 | info->vector = vector; | |
e176d367 EH |
419 | info->address = gate_offset(*val); |
420 | info->cs = gate_segment(*val); | |
421 | info->flags = val->dpl; | |
5ead97c8 | 422 | /* interrupt gates clear IF */ |
e176d367 | 423 | if (val->type == 0xe) |
5ead97c8 JF |
424 | info->flags |= 4; |
425 | ||
426 | return 1; | |
427 | } | |
428 | ||
429 | /* Locations of each CPU's IDT */ | |
6b68f01b | 430 | static DEFINE_PER_CPU(struct desc_ptr, idt_desc); |
5ead97c8 JF |
431 | |
432 | /* Set an IDT entry. If the entry is part of the current IDT, then | |
433 | also update Xen. */ | |
8d947344 | 434 | static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) |
5ead97c8 | 435 | { |
5ead97c8 | 436 | unsigned long p = (unsigned long)&dt[entrynum]; |
f120f13e JF |
437 | unsigned long start, end; |
438 | ||
439 | preempt_disable(); | |
440 | ||
441 | start = __get_cpu_var(idt_desc).address; | |
442 | end = start + __get_cpu_var(idt_desc).size + 1; | |
5ead97c8 JF |
443 | |
444 | xen_mc_flush(); | |
445 | ||
8d947344 | 446 | native_write_idt_entry(dt, entrynum, g); |
5ead97c8 JF |
447 | |
448 | if (p >= start && (p + 8) <= end) { | |
449 | struct trap_info info[2]; | |
450 | ||
451 | info[1].address = 0; | |
452 | ||
e176d367 | 453 | if (cvt_gate_to_trap(entrynum, g, &info[0])) |
5ead97c8 JF |
454 | if (HYPERVISOR_set_trap_table(info)) |
455 | BUG(); | |
456 | } | |
f120f13e JF |
457 | |
458 | preempt_enable(); | |
5ead97c8 JF |
459 | } |
460 | ||
6b68f01b | 461 | static void xen_convert_trap_info(const struct desc_ptr *desc, |
f87e4cac | 462 | struct trap_info *traps) |
5ead97c8 | 463 | { |
5ead97c8 JF |
464 | unsigned in, out, count; |
465 | ||
e176d367 | 466 | count = (desc->size+1) / sizeof(gate_desc); |
5ead97c8 JF |
467 | BUG_ON(count > 256); |
468 | ||
5ead97c8 | 469 | for (in = out = 0; in < count; in++) { |
e176d367 | 470 | gate_desc *entry = (gate_desc*)(desc->address) + in; |
5ead97c8 | 471 | |
e176d367 | 472 | if (cvt_gate_to_trap(in, entry, &traps[out])) |
5ead97c8 JF |
473 | out++; |
474 | } | |
475 | traps[out].address = 0; | |
f87e4cac JF |
476 | } |
477 | ||
478 | void xen_copy_trap_info(struct trap_info *traps) | |
479 | { | |
6b68f01b | 480 | const struct desc_ptr *desc = &__get_cpu_var(idt_desc); |
f87e4cac JF |
481 | |
482 | xen_convert_trap_info(desc, traps); | |
f87e4cac JF |
483 | } |
484 | ||
485 | /* Load a new IDT into Xen. In principle this can be per-CPU, so we | |
486 | hold a spinlock to protect the static traps[] array (static because | |
487 | it avoids allocation, and saves stack space). */ | |
6b68f01b | 488 | static void xen_load_idt(const struct desc_ptr *desc) |
f87e4cac JF |
489 | { |
490 | static DEFINE_SPINLOCK(lock); | |
491 | static struct trap_info traps[257]; | |
f87e4cac JF |
492 | |
493 | spin_lock(&lock); | |
494 | ||
f120f13e JF |
495 | __get_cpu_var(idt_desc) = *desc; |
496 | ||
f87e4cac | 497 | xen_convert_trap_info(desc, traps); |
5ead97c8 JF |
498 | |
499 | xen_mc_flush(); | |
500 | if (HYPERVISOR_set_trap_table(traps)) | |
501 | BUG(); | |
502 | ||
503 | spin_unlock(&lock); | |
504 | } | |
505 | ||
506 | /* Write a GDT descriptor entry. Ignore LDT descriptors, since | |
507 | they're handled differently. */ | |
508 | static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |
014b15be | 509 | const void *desc, int type) |
5ead97c8 | 510 | { |
f120f13e JF |
511 | preempt_disable(); |
512 | ||
014b15be GOC |
513 | switch (type) { |
514 | case DESC_LDT: | |
515 | case DESC_TSS: | |
5ead97c8 JF |
516 | /* ignore */ |
517 | break; | |
518 | ||
519 | default: { | |
520 | xmaddr_t maddr = virt_to_machine(&dt[entry]); | |
5ead97c8 JF |
521 | |
522 | xen_mc_flush(); | |
014b15be | 523 | if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) |
5ead97c8 JF |
524 | BUG(); |
525 | } | |
526 | ||
527 | } | |
f120f13e JF |
528 | |
529 | preempt_enable(); | |
5ead97c8 JF |
530 | } |
531 | ||
faca6227 | 532 | static void xen_load_sp0(struct tss_struct *tss, |
f120f13e | 533 | struct thread_struct *thread) |
5ead97c8 JF |
534 | { |
535 | struct multicall_space mcs = xen_mc_entry(0); | |
faca6227 | 536 | MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); |
5ead97c8 JF |
537 | xen_mc_issue(PARAVIRT_LAZY_CPU); |
538 | } | |
539 | ||
540 | static void xen_set_iopl_mask(unsigned mask) | |
541 | { | |
542 | struct physdev_set_iopl set_iopl; | |
543 | ||
544 | /* Force the change at ring 0. */ | |
545 | set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; | |
546 | HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | |
547 | } | |
548 | ||
549 | static void xen_io_delay(void) | |
550 | { | |
551 | } | |
552 | ||
553 | #ifdef CONFIG_X86_LOCAL_APIC | |
42e0a9aa | 554 | static u32 xen_apic_read(unsigned long reg) |
5ead97c8 JF |
555 | { |
556 | return 0; | |
557 | } | |
f87e4cac | 558 | |
42e0a9aa | 559 | static void xen_apic_write(unsigned long reg, u32 val) |
f87e4cac JF |
560 | { |
561 | /* Warn to see if there's any stray references */ | |
562 | WARN_ON(1); | |
563 | } | |
5ead97c8 JF |
564 | #endif |
565 | ||
566 | static void xen_flush_tlb(void) | |
567 | { | |
d66bf8fc | 568 | struct mmuext_op *op; |
41e332b2 JF |
569 | struct multicall_space mcs; |
570 | ||
571 | preempt_disable(); | |
572 | ||
573 | mcs = xen_mc_entry(sizeof(*op)); | |
5ead97c8 | 574 | |
d66bf8fc JF |
575 | op = mcs.args; |
576 | op->cmd = MMUEXT_TLB_FLUSH_LOCAL; | |
577 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
578 | ||
579 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
41e332b2 JF |
580 | |
581 | preempt_enable(); | |
5ead97c8 JF |
582 | } |
583 | ||
584 | static void xen_flush_tlb_single(unsigned long addr) | |
585 | { | |
d66bf8fc | 586 | struct mmuext_op *op; |
41e332b2 JF |
587 | struct multicall_space mcs; |
588 | ||
589 | preempt_disable(); | |
5ead97c8 | 590 | |
41e332b2 | 591 | mcs = xen_mc_entry(sizeof(*op)); |
d66bf8fc JF |
592 | op = mcs.args; |
593 | op->cmd = MMUEXT_INVLPG_LOCAL; | |
594 | op->arg1.linear_addr = addr & PAGE_MASK; | |
595 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
596 | ||
597 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
41e332b2 JF |
598 | |
599 | preempt_enable(); | |
5ead97c8 JF |
600 | } |
601 | ||
f87e4cac JF |
602 | static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm, |
603 | unsigned long va) | |
604 | { | |
d66bf8fc JF |
605 | struct { |
606 | struct mmuext_op op; | |
607 | cpumask_t mask; | |
608 | } *args; | |
f87e4cac | 609 | cpumask_t cpumask = *cpus; |
d66bf8fc | 610 | struct multicall_space mcs; |
f87e4cac JF |
611 | |
612 | /* | |
613 | * A couple of (to be removed) sanity checks: | |
614 | * | |
615 | * - current CPU must not be in mask | |
616 | * - mask must exist :) | |
617 | */ | |
618 | BUG_ON(cpus_empty(cpumask)); | |
619 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | |
620 | BUG_ON(!mm); | |
621 | ||
622 | /* If a CPU which we ran on has gone down, OK. */ | |
623 | cpus_and(cpumask, cpumask, cpu_online_map); | |
624 | if (cpus_empty(cpumask)) | |
625 | return; | |
626 | ||
d66bf8fc JF |
627 | mcs = xen_mc_entry(sizeof(*args)); |
628 | args = mcs.args; | |
629 | args->mask = cpumask; | |
630 | args->op.arg2.vcpumask = &args->mask; | |
631 | ||
f87e4cac | 632 | if (va == TLB_FLUSH_ALL) { |
d66bf8fc | 633 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; |
f87e4cac | 634 | } else { |
d66bf8fc JF |
635 | args->op.cmd = MMUEXT_INVLPG_MULTI; |
636 | args->op.arg1.linear_addr = va; | |
f87e4cac JF |
637 | } |
638 | ||
d66bf8fc JF |
639 | MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); |
640 | ||
641 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
f87e4cac JF |
642 | } |
643 | ||
7b1333aa JF |
644 | static void xen_clts(void) |
645 | { | |
646 | struct multicall_space mcs; | |
647 | ||
648 | mcs = xen_mc_entry(0); | |
649 | ||
650 | MULTI_fpu_taskswitch(mcs.mc, 0); | |
651 | ||
652 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
653 | } | |
654 | ||
655 | static void xen_write_cr0(unsigned long cr0) | |
656 | { | |
657 | struct multicall_space mcs; | |
658 | ||
659 | /* Only pay attention to cr0.TS; everything else is | |
660 | ignored. */ | |
661 | mcs = xen_mc_entry(0); | |
662 | ||
663 | MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); | |
664 | ||
665 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
666 | } | |
667 | ||
60223a32 JF |
668 | static void xen_write_cr2(unsigned long cr2) |
669 | { | |
670 | x86_read_percpu(xen_vcpu)->arch.cr2 = cr2; | |
671 | } | |
672 | ||
5ead97c8 JF |
673 | static unsigned long xen_read_cr2(void) |
674 | { | |
675 | return x86_read_percpu(xen_vcpu)->arch.cr2; | |
676 | } | |
677 | ||
60223a32 JF |
678 | static unsigned long xen_read_cr2_direct(void) |
679 | { | |
680 | return x86_read_percpu(xen_vcpu_info.arch.cr2); | |
681 | } | |
682 | ||
5ead97c8 JF |
683 | static void xen_write_cr4(unsigned long cr4) |
684 | { | |
2956a351 JF |
685 | cr4 &= ~X86_CR4_PGE; |
686 | cr4 &= ~X86_CR4_PSE; | |
687 | ||
688 | native_write_cr4(cr4); | |
5ead97c8 JF |
689 | } |
690 | ||
5ead97c8 JF |
691 | static unsigned long xen_read_cr3(void) |
692 | { | |
693 | return x86_read_percpu(xen_cr3); | |
694 | } | |
695 | ||
9f79991d JF |
696 | static void set_current_cr3(void *v) |
697 | { | |
698 | x86_write_percpu(xen_current_cr3, (unsigned long)v); | |
699 | } | |
700 | ||
5ead97c8 JF |
701 | static void xen_write_cr3(unsigned long cr3) |
702 | { | |
9f79991d JF |
703 | struct mmuext_op *op; |
704 | struct multicall_space mcs; | |
705 | unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3)); | |
706 | ||
f120f13e JF |
707 | BUG_ON(preemptible()); |
708 | ||
9f79991d | 709 | mcs = xen_mc_entry(sizeof(*op)); /* disables interrupts */ |
5ead97c8 | 710 | |
9f79991d JF |
711 | /* Update while interrupts are disabled, so its atomic with |
712 | respect to ipis */ | |
5ead97c8 JF |
713 | x86_write_percpu(xen_cr3, cr3); |
714 | ||
9f79991d JF |
715 | op = mcs.args; |
716 | op->cmd = MMUEXT_NEW_BASEPTR; | |
717 | op->arg1.mfn = mfn; | |
5ead97c8 | 718 | |
9f79991d | 719 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); |
5ead97c8 | 720 | |
9f79991d JF |
721 | /* Update xen_update_cr3 once the batch has actually |
722 | been submitted. */ | |
723 | xen_mc_callback(set_current_cr3, (void *)cr3); | |
5ead97c8 | 724 | |
9f79991d | 725 | xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ |
5ead97c8 JF |
726 | } |
727 | ||
f4f97b3e JF |
728 | /* Early in boot, while setting up the initial pagetable, assume |
729 | everything is pinned. */ | |
6944a9c8 | 730 | static __init void xen_alloc_pte_init(struct mm_struct *mm, u32 pfn) |
5ead97c8 | 731 | { |
af7ae3b9 | 732 | #ifdef CONFIG_FLATMEM |
f4f97b3e | 733 | BUG_ON(mem_map); /* should only be used early */ |
af7ae3b9 | 734 | #endif |
5ead97c8 JF |
735 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); |
736 | } | |
737 | ||
6944a9c8 | 738 | /* Early release_pte assumes that all pts are pinned, since there's |
1c70e9bd | 739 | only init_mm and anything attached to that is pinned. */ |
6944a9c8 | 740 | static void xen_release_pte_init(u32 pfn) |
1c70e9bd JF |
741 | { |
742 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | |
743 | } | |
744 | ||
f6433706 | 745 | static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) |
74260714 JF |
746 | { |
747 | struct mmuext_op op; | |
f6433706 | 748 | op.cmd = cmd; |
74260714 JF |
749 | op.arg1.mfn = pfn_to_mfn(pfn); |
750 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
751 | BUG(); | |
752 | } | |
753 | ||
f4f97b3e JF |
754 | /* This needs to make sure the new pte page is pinned iff its being |
755 | attached to a pinned pagetable. */ | |
1c70e9bd | 756 | static void xen_alloc_ptpage(struct mm_struct *mm, u32 pfn, unsigned level) |
5ead97c8 | 757 | { |
f4f97b3e | 758 | struct page *page = pfn_to_page(pfn); |
5ead97c8 | 759 | |
f4f97b3e JF |
760 | if (PagePinned(virt_to_page(mm->pgd))) { |
761 | SetPagePinned(page); | |
762 | ||
74260714 | 763 | if (!PageHighMem(page)) { |
f4f97b3e | 764 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); |
f6433706 MM |
765 | if (level == PT_PTE) |
766 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | |
74260714 | 767 | } else |
f4f97b3e JF |
768 | /* make sure there are no stray mappings of |
769 | this page */ | |
770 | kmap_flush_unused(); | |
771 | } | |
5ead97c8 JF |
772 | } |
773 | ||
6944a9c8 | 774 | static void xen_alloc_pte(struct mm_struct *mm, u32 pfn) |
1c70e9bd | 775 | { |
f6433706 | 776 | xen_alloc_ptpage(mm, pfn, PT_PTE); |
1c70e9bd JF |
777 | } |
778 | ||
6944a9c8 | 779 | static void xen_alloc_pmd(struct mm_struct *mm, u32 pfn) |
1c70e9bd | 780 | { |
f6433706 | 781 | xen_alloc_ptpage(mm, pfn, PT_PMD); |
1c70e9bd JF |
782 | } |
783 | ||
f4f97b3e | 784 | /* This should never happen until we're OK to use struct page */ |
f6433706 | 785 | static void xen_release_ptpage(u32 pfn, unsigned level) |
5ead97c8 | 786 | { |
f4f97b3e JF |
787 | struct page *page = pfn_to_page(pfn); |
788 | ||
789 | if (PagePinned(page)) { | |
74260714 | 790 | if (!PageHighMem(page)) { |
a684d69d MM |
791 | if (level == PT_PTE) |
792 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | |
f4f97b3e | 793 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
74260714 | 794 | } |
c946c7de | 795 | ClearPagePinned(page); |
f4f97b3e | 796 | } |
5ead97c8 JF |
797 | } |
798 | ||
6944a9c8 | 799 | static void xen_release_pte(u32 pfn) |
f6433706 MM |
800 | { |
801 | xen_release_ptpage(pfn, PT_PTE); | |
802 | } | |
803 | ||
6944a9c8 | 804 | static void xen_release_pmd(u32 pfn) |
f6433706 MM |
805 | { |
806 | xen_release_ptpage(pfn, PT_PMD); | |
807 | } | |
808 | ||
f6e58732 JF |
809 | #if PAGETABLE_LEVELS == 4 |
810 | static void xen_alloc_pud(struct mm_struct *mm, u32 pfn) | |
811 | { | |
812 | xen_alloc_ptpage(mm, pfn, PT_PUD); | |
813 | } | |
814 | ||
815 | static void xen_release_pud(u32 pfn) | |
816 | { | |
817 | xen_release_ptpage(pfn, PT_PUD); | |
818 | } | |
819 | #endif | |
820 | ||
f4f97b3e JF |
821 | #ifdef CONFIG_HIGHPTE |
822 | static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) | |
5ead97c8 | 823 | { |
f4f97b3e JF |
824 | pgprot_t prot = PAGE_KERNEL; |
825 | ||
826 | if (PagePinned(page)) | |
827 | prot = PAGE_KERNEL_RO; | |
828 | ||
829 | if (0 && PageHighMem(page)) | |
830 | printk("mapping highpte %lx type %d prot %s\n", | |
831 | page_to_pfn(page), type, | |
832 | (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); | |
833 | ||
834 | return kmap_atomic_prot(page, type, prot); | |
5ead97c8 | 835 | } |
f4f97b3e | 836 | #endif |
5ead97c8 | 837 | |
9a4029fd JF |
838 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
839 | { | |
840 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | |
841 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | |
842 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | |
843 | pte_val_ma(pte)); | |
844 | ||
845 | return pte; | |
846 | } | |
847 | ||
848 | /* Init-time set_pte while constructing initial pagetables, which | |
849 | doesn't allow RO pagetable pages to be remapped RW */ | |
850 | static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) | |
851 | { | |
852 | pte = mask_rw_pte(ptep, pte); | |
853 | ||
854 | xen_set_pte(ptep, pte); | |
855 | } | |
856 | ||
5ead97c8 JF |
857 | static __init void xen_pagetable_setup_start(pgd_t *base) |
858 | { | |
5ead97c8 JF |
859 | } |
860 | ||
0e91398f | 861 | void xen_setup_shared_info(void) |
5ead97c8 JF |
862 | { |
863 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | |
15664f96 JF |
864 | set_fixmap(FIX_PARAVIRT_BOOTMAP, |
865 | xen_start_info->shared_info); | |
866 | ||
867 | HYPERVISOR_shared_info = | |
868 | (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); | |
5ead97c8 JF |
869 | } else |
870 | HYPERVISOR_shared_info = | |
871 | (struct shared_info *)__va(xen_start_info->shared_info); | |
872 | ||
2e8fe719 JF |
873 | #ifndef CONFIG_SMP |
874 | /* In UP this is as good a place as any to set up shared info */ | |
875 | xen_setup_vcpu_info_placement(); | |
876 | #endif | |
d5edbc1f JF |
877 | |
878 | xen_setup_mfn_list_list(); | |
2e8fe719 JF |
879 | } |
880 | ||
881 | static __init void xen_pagetable_setup_done(pgd_t *base) | |
882 | { | |
0e91398f | 883 | xen_setup_shared_info(); |
60223a32 | 884 | } |
5ead97c8 | 885 | |
e2426cf8 JF |
886 | static __init void xen_post_allocator_init(void) |
887 | { | |
8745f8b0 | 888 | pv_mmu_ops.set_pte = xen_set_pte; |
e2426cf8 JF |
889 | pv_mmu_ops.set_pmd = xen_set_pmd; |
890 | pv_mmu_ops.set_pud = xen_set_pud; | |
f6e58732 JF |
891 | #if PAGETABLE_LEVELS == 4 |
892 | pv_mmu_ops.set_pgd = xen_set_pgd; | |
893 | #endif | |
e2426cf8 | 894 | |
8745f8b0 JF |
895 | /* This will work as long as patching hasn't happened yet |
896 | (which it hasn't) */ | |
897 | pv_mmu_ops.alloc_pte = xen_alloc_pte; | |
898 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; | |
899 | pv_mmu_ops.release_pte = xen_release_pte; | |
900 | pv_mmu_ops.release_pmd = xen_release_pmd; | |
901 | #if PAGETABLE_LEVELS == 4 | |
902 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | |
903 | pv_mmu_ops.release_pud = xen_release_pud; | |
904 | #endif | |
905 | ||
e2426cf8 JF |
906 | xen_mark_init_mm_pinned(); |
907 | } | |
908 | ||
60223a32 | 909 | /* This is called once we have the cpu_possible_map */ |
0e91398f | 910 | void xen_setup_vcpu_info_placement(void) |
60223a32 JF |
911 | { |
912 | int cpu; | |
913 | ||
914 | for_each_possible_cpu(cpu) | |
915 | xen_vcpu_setup(cpu); | |
916 | ||
917 | /* xen_vcpu_setup managed to place the vcpu_info within the | |
918 | percpu area for all cpus, so make use of it */ | |
5b09b287 | 919 | #ifdef CONFIG_X86_32 |
60223a32 JF |
920 | if (have_vcpu_info_placement) { |
921 | printk(KERN_INFO "Xen: using vcpu_info placement\n"); | |
922 | ||
93b1eab3 JF |
923 | pv_irq_ops.save_fl = xen_save_fl_direct; |
924 | pv_irq_ops.restore_fl = xen_restore_fl_direct; | |
925 | pv_irq_ops.irq_disable = xen_irq_disable_direct; | |
926 | pv_irq_ops.irq_enable = xen_irq_enable_direct; | |
927 | pv_mmu_ops.read_cr2 = xen_read_cr2_direct; | |
60223a32 | 928 | } |
5b09b287 | 929 | #endif |
5ead97c8 JF |
930 | } |
931 | ||
ab144f5e AK |
932 | static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf, |
933 | unsigned long addr, unsigned len) | |
6487673b JF |
934 | { |
935 | char *start, *end, *reloc; | |
936 | unsigned ret; | |
937 | ||
938 | start = end = reloc = NULL; | |
939 | ||
93b1eab3 JF |
940 | #define SITE(op, x) \ |
941 | case PARAVIRT_PATCH(op.x): \ | |
6487673b JF |
942 | if (have_vcpu_info_placement) { \ |
943 | start = (char *)xen_##x##_direct; \ | |
944 | end = xen_##x##_direct_end; \ | |
945 | reloc = xen_##x##_direct_reloc; \ | |
946 | } \ | |
947 | goto patch_site | |
948 | ||
949 | switch (type) { | |
5b09b287 | 950 | #ifdef CONFIG_X86_32 |
93b1eab3 JF |
951 | SITE(pv_irq_ops, irq_enable); |
952 | SITE(pv_irq_ops, irq_disable); | |
953 | SITE(pv_irq_ops, save_fl); | |
954 | SITE(pv_irq_ops, restore_fl); | |
5b09b287 | 955 | #endif /* CONFIG_X86_32 */ |
6487673b JF |
956 | #undef SITE |
957 | ||
958 | patch_site: | |
959 | if (start == NULL || (end-start) > len) | |
960 | goto default_patch; | |
961 | ||
ab144f5e | 962 | ret = paravirt_patch_insns(insnbuf, len, start, end); |
6487673b JF |
963 | |
964 | /* Note: because reloc is assigned from something that | |
965 | appears to be an array, gcc assumes it's non-null, | |
966 | but doesn't know its relationship with start and | |
967 | end. */ | |
968 | if (reloc > start && reloc < end) { | |
969 | int reloc_off = reloc - start; | |
ab144f5e AK |
970 | long *relocp = (long *)(insnbuf + reloc_off); |
971 | long delta = start - (char *)addr; | |
6487673b JF |
972 | |
973 | *relocp += delta; | |
974 | } | |
975 | break; | |
976 | ||
977 | default_patch: | |
978 | default: | |
ab144f5e AK |
979 | ret = paravirt_patch_default(type, clobbers, insnbuf, |
980 | addr, len); | |
6487673b JF |
981 | break; |
982 | } | |
983 | ||
984 | return ret; | |
985 | } | |
986 | ||
aeaaa59c JF |
987 | static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot) |
988 | { | |
989 | pte_t pte; | |
990 | ||
991 | phys >>= PAGE_SHIFT; | |
992 | ||
993 | switch (idx) { | |
994 | case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: | |
995 | #ifdef CONFIG_X86_F00F_BUG | |
996 | case FIX_F00F_IDT: | |
997 | #endif | |
15664f96 | 998 | #ifdef CONFIG_X86_32 |
aeaaa59c JF |
999 | case FIX_WP_TEST: |
1000 | case FIX_VDSO: | |
15664f96 JF |
1001 | case FIX_KMAP_BEGIN ... FIX_KMAP_END: |
1002 | #else | |
1003 | case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: | |
1004 | #endif | |
aeaaa59c JF |
1005 | #ifdef CONFIG_X86_LOCAL_APIC |
1006 | case FIX_APIC_BASE: /* maps dummy local APIC */ | |
1007 | #endif | |
1008 | pte = pfn_pte(phys, prot); | |
1009 | break; | |
1010 | ||
1011 | default: | |
1012 | pte = mfn_pte(phys, prot); | |
1013 | break; | |
1014 | } | |
1015 | ||
1016 | __native_set_fixmap(idx, pte); | |
1017 | } | |
1018 | ||
93b1eab3 | 1019 | static const struct pv_info xen_info __initdata = { |
5ead97c8 JF |
1020 | .paravirt_enabled = 1, |
1021 | .shared_kernel_pmd = 0, | |
1022 | ||
1023 | .name = "Xen", | |
93b1eab3 | 1024 | }; |
5ead97c8 | 1025 | |
93b1eab3 | 1026 | static const struct pv_init_ops xen_init_ops __initdata = { |
6487673b | 1027 | .patch = xen_patch, |
5ead97c8 | 1028 | |
93b1eab3 | 1029 | .banner = xen_banner, |
5ead97c8 JF |
1030 | .memory_setup = xen_memory_setup, |
1031 | .arch_setup = xen_arch_setup, | |
e2426cf8 | 1032 | .post_allocator_init = xen_post_allocator_init, |
93b1eab3 | 1033 | }; |
5ead97c8 | 1034 | |
93b1eab3 | 1035 | static const struct pv_time_ops xen_time_ops __initdata = { |
15c84731 | 1036 | .time_init = xen_time_init, |
93b1eab3 | 1037 | |
15c84731 JF |
1038 | .set_wallclock = xen_set_wallclock, |
1039 | .get_wallclock = xen_get_wallclock, | |
e93ef949 | 1040 | .get_tsc_khz = xen_tsc_khz, |
ab550288 | 1041 | .sched_clock = xen_sched_clock, |
93b1eab3 | 1042 | }; |
15c84731 | 1043 | |
93b1eab3 | 1044 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { |
5ead97c8 JF |
1045 | .cpuid = xen_cpuid, |
1046 | ||
1047 | .set_debugreg = xen_set_debugreg, | |
1048 | .get_debugreg = xen_get_debugreg, | |
1049 | ||
7b1333aa | 1050 | .clts = xen_clts, |
5ead97c8 JF |
1051 | |
1052 | .read_cr0 = native_read_cr0, | |
7b1333aa | 1053 | .write_cr0 = xen_write_cr0, |
5ead97c8 | 1054 | |
5ead97c8 JF |
1055 | .read_cr4 = native_read_cr4, |
1056 | .read_cr4_safe = native_read_cr4_safe, | |
1057 | .write_cr4 = xen_write_cr4, | |
1058 | ||
5ead97c8 JF |
1059 | .wbinvd = native_wbinvd, |
1060 | ||
1061 | .read_msr = native_read_msr_safe, | |
1062 | .write_msr = native_write_msr_safe, | |
1063 | .read_tsc = native_read_tsc, | |
1064 | .read_pmc = native_read_pmc, | |
1065 | ||
81e103f1 | 1066 | .iret = xen_iret, |
d75cd22f | 1067 | .irq_enable_sysexit = xen_sysexit, |
5ead97c8 JF |
1068 | |
1069 | .load_tr_desc = paravirt_nop, | |
1070 | .set_ldt = xen_set_ldt, | |
1071 | .load_gdt = xen_load_gdt, | |
1072 | .load_idt = xen_load_idt, | |
1073 | .load_tls = xen_load_tls, | |
a8fc1089 EH |
1074 | #ifdef CONFIG_X86_64 |
1075 | .load_gs_index = xen_load_gs_index, | |
1076 | #endif | |
5ead97c8 JF |
1077 | |
1078 | .store_gdt = native_store_gdt, | |
1079 | .store_idt = native_store_idt, | |
1080 | .store_tr = xen_store_tr, | |
1081 | ||
1082 | .write_ldt_entry = xen_write_ldt_entry, | |
1083 | .write_gdt_entry = xen_write_gdt_entry, | |
1084 | .write_idt_entry = xen_write_idt_entry, | |
faca6227 | 1085 | .load_sp0 = xen_load_sp0, |
5ead97c8 JF |
1086 | |
1087 | .set_iopl_mask = xen_set_iopl_mask, | |
1088 | .io_delay = xen_io_delay, | |
1089 | ||
952d1d70 JF |
1090 | /* Xen takes care of %gs when switching to usermode for us */ |
1091 | .swapgs = paravirt_nop, | |
1092 | ||
8965c1c0 JF |
1093 | .lazy_mode = { |
1094 | .enter = paravirt_enter_lazy_cpu, | |
1095 | .leave = xen_leave_lazy, | |
1096 | }, | |
93b1eab3 JF |
1097 | }; |
1098 | ||
0725cbb9 JF |
1099 | static void __init __xen_init_IRQ(void) |
1100 | { | |
1101 | #ifdef CONFIG_X86_64 | |
1102 | int i; | |
1103 | ||
1104 | /* Create identity vector->irq map */ | |
1105 | for(i = 0; i < NR_VECTORS; i++) { | |
1106 | int cpu; | |
1107 | ||
1108 | for_each_possible_cpu(cpu) | |
1109 | per_cpu(vector_irq, cpu)[i] = i; | |
1110 | } | |
1111 | #endif /* CONFIG_X86_64 */ | |
1112 | ||
1113 | xen_init_IRQ(); | |
1114 | } | |
1115 | ||
93b1eab3 | 1116 | static const struct pv_irq_ops xen_irq_ops __initdata = { |
0725cbb9 | 1117 | .init_IRQ = __xen_init_IRQ, |
93b1eab3 JF |
1118 | .save_fl = xen_save_fl, |
1119 | .restore_fl = xen_restore_fl, | |
1120 | .irq_disable = xen_irq_disable, | |
1121 | .irq_enable = xen_irq_enable, | |
1122 | .safe_halt = xen_safe_halt, | |
1123 | .halt = xen_halt, | |
fab58420 | 1124 | #ifdef CONFIG_X86_64 |
997409d3 | 1125 | .adjust_exception_frame = xen_adjust_exception_frame, |
fab58420 | 1126 | #endif |
93b1eab3 | 1127 | }; |
5ead97c8 | 1128 | |
93b1eab3 | 1129 | static const struct pv_apic_ops xen_apic_ops __initdata = { |
5ead97c8 | 1130 | #ifdef CONFIG_X86_LOCAL_APIC |
f87e4cac JF |
1131 | .apic_write = xen_apic_write, |
1132 | .apic_write_atomic = xen_apic_write, | |
5ead97c8 JF |
1133 | .apic_read = xen_apic_read, |
1134 | .setup_boot_clock = paravirt_nop, | |
1135 | .setup_secondary_clock = paravirt_nop, | |
1136 | .startup_ipi_hook = paravirt_nop, | |
1137 | #endif | |
93b1eab3 JF |
1138 | }; |
1139 | ||
1140 | static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |
1141 | .pagetable_setup_start = xen_pagetable_setup_start, | |
1142 | .pagetable_setup_done = xen_pagetable_setup_done, | |
1143 | ||
1144 | .read_cr2 = xen_read_cr2, | |
1145 | .write_cr2 = xen_write_cr2, | |
1146 | ||
1147 | .read_cr3 = xen_read_cr3, | |
1148 | .write_cr3 = xen_write_cr3, | |
5ead97c8 JF |
1149 | |
1150 | .flush_tlb_user = xen_flush_tlb, | |
1151 | .flush_tlb_kernel = xen_flush_tlb, | |
1152 | .flush_tlb_single = xen_flush_tlb_single, | |
f87e4cac | 1153 | .flush_tlb_others = xen_flush_tlb_others, |
5ead97c8 JF |
1154 | |
1155 | .pte_update = paravirt_nop, | |
1156 | .pte_update_defer = paravirt_nop, | |
1157 | ||
eba0045f JF |
1158 | .pgd_alloc = __paravirt_pgd_alloc, |
1159 | .pgd_free = paravirt_nop, | |
1160 | ||
6944a9c8 JF |
1161 | .alloc_pte = xen_alloc_pte_init, |
1162 | .release_pte = xen_release_pte_init, | |
1163 | .alloc_pmd = xen_alloc_pte_init, | |
1164 | .alloc_pmd_clone = paravirt_nop, | |
1165 | .release_pmd = xen_release_pte_init, | |
f4f97b3e JF |
1166 | |
1167 | #ifdef CONFIG_HIGHPTE | |
1168 | .kmap_atomic_pte = xen_kmap_atomic_pte, | |
1169 | #endif | |
5ead97c8 | 1170 | |
22911b3f JF |
1171 | #ifdef CONFIG_X86_64 |
1172 | .set_pte = xen_set_pte, | |
1173 | #else | |
851fa3c4 | 1174 | .set_pte = xen_set_pte_init, |
22911b3f | 1175 | #endif |
3b827c1b | 1176 | .set_pte_at = xen_set_pte_at, |
e2426cf8 | 1177 | .set_pmd = xen_set_pmd_hyper, |
3b827c1b | 1178 | |
08b882c6 JF |
1179 | .ptep_modify_prot_start = __ptep_modify_prot_start, |
1180 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, | |
1181 | ||
3b827c1b | 1182 | .pte_val = xen_pte_val, |
a15af1c9 | 1183 | .pte_flags = native_pte_val, |
3b827c1b JF |
1184 | .pgd_val = xen_pgd_val, |
1185 | ||
1186 | .make_pte = xen_make_pte, | |
1187 | .make_pgd = xen_make_pgd, | |
1188 | ||
f6e58732 | 1189 | #ifdef CONFIG_X86_PAE |
3b827c1b JF |
1190 | .set_pte_atomic = xen_set_pte_atomic, |
1191 | .set_pte_present = xen_set_pte_at, | |
3b827c1b JF |
1192 | .pte_clear = xen_pte_clear, |
1193 | .pmd_clear = xen_pmd_clear, | |
f6e58732 JF |
1194 | #endif /* CONFIG_X86_PAE */ |
1195 | .set_pud = xen_set_pud_hyper, | |
3b827c1b JF |
1196 | |
1197 | .make_pmd = xen_make_pmd, | |
1198 | .pmd_val = xen_pmd_val, | |
3b827c1b | 1199 | |
f6e58732 JF |
1200 | #if PAGETABLE_LEVELS == 4 |
1201 | .pud_val = xen_pud_val, | |
1202 | .make_pud = xen_make_pud, | |
1203 | .set_pgd = xen_set_pgd_hyper, | |
1204 | ||
1205 | .alloc_pud = xen_alloc_pte_init, | |
1206 | .release_pud = xen_release_pte_init, | |
1207 | #endif /* PAGETABLE_LEVELS == 4 */ | |
1208 | ||
3b827c1b JF |
1209 | .activate_mm = xen_activate_mm, |
1210 | .dup_mmap = xen_dup_mmap, | |
1211 | .exit_mmap = xen_exit_mmap, | |
1212 | ||
8965c1c0 JF |
1213 | .lazy_mode = { |
1214 | .enter = paravirt_enter_lazy_mmu, | |
1215 | .leave = xen_leave_lazy, | |
1216 | }, | |
aeaaa59c JF |
1217 | |
1218 | .set_fixmap = xen_set_fixmap, | |
5ead97c8 JF |
1219 | }; |
1220 | ||
fefa629a JF |
1221 | static void xen_reboot(int reason) |
1222 | { | |
349c709f JF |
1223 | struct sched_shutdown r = { .reason = reason }; |
1224 | ||
fefa629a JF |
1225 | #ifdef CONFIG_SMP |
1226 | smp_send_stop(); | |
1227 | #endif | |
1228 | ||
349c709f | 1229 | if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) |
fefa629a JF |
1230 | BUG(); |
1231 | } | |
1232 | ||
1233 | static void xen_restart(char *msg) | |
1234 | { | |
1235 | xen_reboot(SHUTDOWN_reboot); | |
1236 | } | |
1237 | ||
1238 | static void xen_emergency_restart(void) | |
1239 | { | |
1240 | xen_reboot(SHUTDOWN_reboot); | |
1241 | } | |
1242 | ||
1243 | static void xen_machine_halt(void) | |
1244 | { | |
1245 | xen_reboot(SHUTDOWN_poweroff); | |
1246 | } | |
1247 | ||
1248 | static void xen_crash_shutdown(struct pt_regs *regs) | |
1249 | { | |
1250 | xen_reboot(SHUTDOWN_crash); | |
1251 | } | |
1252 | ||
1253 | static const struct machine_ops __initdata xen_machine_ops = { | |
1254 | .restart = xen_restart, | |
1255 | .halt = xen_machine_halt, | |
1256 | .power_off = xen_machine_halt, | |
1257 | .shutdown = xen_machine_halt, | |
1258 | .crash_shutdown = xen_crash_shutdown, | |
1259 | .emergency_restart = xen_emergency_restart, | |
1260 | }; | |
1261 | ||
6487673b | 1262 | |
fb1d8404 JF |
1263 | static void __init xen_reserve_top(void) |
1264 | { | |
f5d36de0 | 1265 | #ifdef CONFIG_X86_32 |
fb1d8404 JF |
1266 | unsigned long top = HYPERVISOR_VIRT_START; |
1267 | struct xen_platform_parameters pp; | |
1268 | ||
1269 | if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) | |
1270 | top = pp.virt_start; | |
1271 | ||
1272 | reserve_top_address(-top + 2 * PAGE_SIZE); | |
f5d36de0 | 1273 | #endif /* CONFIG_X86_32 */ |
fb1d8404 JF |
1274 | } |
1275 | ||
084a2a4e JF |
1276 | /* |
1277 | * Like __va(), but returns address in the kernel mapping (which is | |
1278 | * all we have until the physical memory mapping has been set up. | |
1279 | */ | |
1280 | static void *__ka(phys_addr_t paddr) | |
1281 | { | |
39dbc5bd | 1282 | #ifdef CONFIG_X86_64 |
084a2a4e | 1283 | return (void *)(paddr + __START_KERNEL_map); |
39dbc5bd JF |
1284 | #else |
1285 | return __va(paddr); | |
1286 | #endif | |
084a2a4e JF |
1287 | } |
1288 | ||
1289 | /* Convert a machine address to physical address */ | |
1290 | static unsigned long m2p(phys_addr_t maddr) | |
1291 | { | |
1292 | phys_addr_t paddr; | |
1293 | ||
1294 | maddr &= PTE_MASK; | |
1295 | paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; | |
1296 | ||
1297 | return paddr; | |
1298 | } | |
1299 | ||
1300 | /* Convert a machine address to kernel virtual */ | |
1301 | static void *m2v(phys_addr_t maddr) | |
1302 | { | |
1303 | return __ka(m2p(maddr)); | |
1304 | } | |
1305 | ||
39dbc5bd | 1306 | #ifdef CONFIG_X86_64 |
084a2a4e JF |
1307 | static void walk(pgd_t *pgd, unsigned long addr) |
1308 | { | |
1309 | unsigned l4idx = pgd_index(addr); | |
1310 | unsigned l3idx = pud_index(addr); | |
1311 | unsigned l2idx = pmd_index(addr); | |
1312 | unsigned l1idx = pte_index(addr); | |
1313 | pgd_t l4; | |
1314 | pud_t l3; | |
1315 | pmd_t l2; | |
1316 | pte_t l1; | |
1317 | ||
1318 | xen_raw_printk("walk %p, %lx -> %d %d %d %d\n", | |
1319 | pgd, addr, l4idx, l3idx, l2idx, l1idx); | |
1320 | ||
1321 | l4 = pgd[l4idx]; | |
1322 | xen_raw_printk(" l4: %016lx\n", l4.pgd); | |
1323 | xen_raw_printk(" %016lx\n", pgd_val(l4)); | |
1324 | ||
1325 | l3 = ((pud_t *)(m2v(l4.pgd)))[l3idx]; | |
1326 | xen_raw_printk(" l3: %016lx\n", l3.pud); | |
1327 | xen_raw_printk(" %016lx\n", pud_val(l3)); | |
1328 | ||
1329 | l2 = ((pmd_t *)(m2v(l3.pud)))[l2idx]; | |
1330 | xen_raw_printk(" l2: %016lx\n", l2.pmd); | |
1331 | xen_raw_printk(" %016lx\n", pmd_val(l2)); | |
1332 | ||
1333 | l1 = ((pte_t *)(m2v(l2.pmd)))[l1idx]; | |
1334 | xen_raw_printk(" l1: %016lx\n", l1.pte); | |
1335 | xen_raw_printk(" %016lx\n", pte_val(l1)); | |
1336 | } | |
39dbc5bd | 1337 | #endif |
084a2a4e JF |
1338 | |
1339 | static void set_page_prot(void *addr, pgprot_t prot) | |
1340 | { | |
1341 | unsigned long pfn = __pa(addr) >> PAGE_SHIFT; | |
1342 | pte_t pte = pfn_pte(pfn, prot); | |
1343 | ||
39dbc5bd | 1344 | xen_raw_printk("addr=%p pfn=%lx mfn=%lx prot=%016llx pte=%016llx\n", |
084a2a4e JF |
1345 | addr, pfn, get_phys_to_machine(pfn), |
1346 | pgprot_val(prot), pte.pte); | |
1347 | ||
1348 | if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) | |
1349 | BUG(); | |
1350 | } | |
1351 | ||
d114e198 JF |
1352 | /* |
1353 | * Identity map, in addition to plain kernel map. This needs to be | |
1354 | * large enough to allocate page table pages to allocate the rest. | |
1355 | * Each page can map 2MB. | |
1356 | */ | |
1357 | static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss; | |
1358 | ||
39dbc5bd | 1359 | static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) |
d114e198 JF |
1360 | { |
1361 | unsigned pmdidx, pteidx; | |
1362 | unsigned ident_pte; | |
1363 | unsigned long pfn; | |
1364 | ||
1365 | ident_pte = 0; | |
1366 | pfn = 0; | |
1367 | for(pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { | |
1368 | pte_t *pte_page; | |
1369 | ||
d114e198 | 1370 | /* Reuse or allocate a page of ptes */ |
39dbc5bd JF |
1371 | if (pmd_present(pmd[pmdidx])) |
1372 | pte_page = m2v(pmd[pmdidx].pmd); | |
d114e198 JF |
1373 | else { |
1374 | /* Check for free pte pages */ | |
1375 | if (ident_pte == ARRAY_SIZE(level1_ident_pgt)) | |
1376 | break; | |
1377 | ||
1378 | pte_page = &level1_ident_pgt[ident_pte]; | |
1379 | ident_pte += PTRS_PER_PTE; | |
1380 | ||
39dbc5bd | 1381 | pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); |
d114e198 JF |
1382 | } |
1383 | ||
1384 | /* Install mappings */ | |
1385 | for(pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { | |
1386 | pte_t pte; | |
1387 | ||
1388 | if (pfn > max_pfn_mapped) | |
1389 | max_pfn_mapped = pfn; | |
1390 | ||
1391 | if (!pte_none(pte_page[pteidx])) | |
1392 | continue; | |
1393 | ||
1394 | pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); | |
1395 | pte_page[pteidx] = pte; | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | for(pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) | |
1400 | set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); | |
39dbc5bd JF |
1401 | |
1402 | set_page_prot(pmd, PAGE_KERNEL_RO); | |
1403 | } | |
1404 | ||
1405 | #ifdef CONFIG_X86_64 | |
1406 | static void convert_pfn_mfn(void *v) | |
1407 | { | |
1408 | pte_t *pte = v; | |
1409 | int i; | |
1410 | ||
1411 | /* All levels are converted the same way, so just treat them | |
1412 | as ptes. */ | |
1413 | for(i = 0; i < PTRS_PER_PTE; i++) | |
1414 | pte[i] = xen_make_pte(pte[i].pte); | |
d114e198 JF |
1415 | } |
1416 | ||
084a2a4e JF |
1417 | /* |
1418 | * Set up the inital kernel pagetable. | |
1419 | * | |
1420 | * We can construct this by grafting the Xen provided pagetable into | |
1421 | * head_64.S's preconstructed pagetables. We copy the Xen L2's into | |
1422 | * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This | |
1423 | * means that only the kernel has a physical mapping to start with - | |
1424 | * but that's enough to get __va working. We need to fill in the rest | |
1425 | * of the physical mapping once some sort of allocator has been set | |
1426 | * up. | |
1427 | */ | |
d114e198 | 1428 | static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
084a2a4e JF |
1429 | { |
1430 | pud_t *l3; | |
1431 | pmd_t *l2; | |
1432 | ||
1433 | /* Zap identity mapping */ | |
1434 | init_level4_pgt[0] = __pgd(0); | |
1435 | ||
1436 | /* Pre-constructed entries are in pfn, so convert to mfn */ | |
1437 | convert_pfn_mfn(init_level4_pgt); | |
1438 | convert_pfn_mfn(level3_ident_pgt); | |
1439 | convert_pfn_mfn(level3_kernel_pgt); | |
1440 | ||
1441 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); | |
1442 | l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); | |
1443 | ||
1444 | memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); | |
1445 | memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); | |
1446 | ||
1447 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); | |
1448 | l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); | |
1449 | memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); | |
1450 | ||
d114e198 | 1451 | /* Set up identity map */ |
39dbc5bd | 1452 | xen_map_identity_early(level2_ident_pgt, max_pfn); |
d114e198 | 1453 | |
084a2a4e JF |
1454 | /* Make pagetable pieces RO */ |
1455 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); | |
1456 | set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); | |
1457 | set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); | |
084a2a4e JF |
1458 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); |
1459 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | |
1460 | ||
1461 | /* Pin down new L4 */ | |
39dbc5bd JF |
1462 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, |
1463 | PFN_DOWN(__pa_symbol(init_level4_pgt))); | |
084a2a4e JF |
1464 | |
1465 | /* Unpin Xen-provided one */ | |
1466 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | |
1467 | ||
1468 | /* Switch over */ | |
1469 | pgd = init_level4_pgt; | |
1470 | xen_write_cr3(__pa(pgd)); | |
1471 | ||
d114e198 JF |
1472 | reserve_early(__pa(xen_start_info->pt_base), |
1473 | __pa(xen_start_info->pt_base + | |
1474 | xen_start_info->nr_pt_frames * PAGE_SIZE), | |
1475 | "XEN PAGETABLES"); | |
084a2a4e JF |
1476 | |
1477 | return pgd; | |
1478 | } | |
39dbc5bd JF |
1479 | #else /* !CONFIG_X86_64 */ |
1480 | static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss; | |
1481 | ||
d114e198 | 1482 | static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
084a2a4e | 1483 | { |
39dbc5bd JF |
1484 | pmd_t *kernel_pmd; |
1485 | ||
084a2a4e JF |
1486 | init_pg_tables_start = __pa(pgd); |
1487 | init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; | |
1488 | max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024); | |
1489 | ||
39dbc5bd JF |
1490 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); |
1491 | memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); | |
d114e198 | 1492 | |
39dbc5bd JF |
1493 | xen_map_identity_early(level2_kernel_pgt, max_pfn); |
1494 | ||
1495 | memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); | |
1496 | set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], | |
1497 | __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); | |
1498 | ||
1499 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | |
1500 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | |
1501 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); | |
1502 | ||
1503 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | |
1504 | ||
1505 | xen_write_cr3(__pa(swapper_pg_dir)); | |
1506 | ||
1507 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); | |
1508 | ||
1509 | return swapper_pg_dir; | |
084a2a4e JF |
1510 | } |
1511 | #endif /* CONFIG_X86_64 */ | |
1512 | ||
5ead97c8 JF |
1513 | /* First C function to be called on Xen boot */ |
1514 | asmlinkage void __init xen_start_kernel(void) | |
1515 | { | |
1516 | pgd_t *pgd; | |
1517 | ||
1518 | if (!xen_start_info) | |
1519 | return; | |
1520 | ||
7999f4b4 | 1521 | BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0); |
5ead97c8 | 1522 | |
e57778a1 JF |
1523 | xen_setup_features(); |
1524 | ||
5ead97c8 | 1525 | /* Install Xen paravirt ops */ |
93b1eab3 JF |
1526 | pv_info = xen_info; |
1527 | pv_init_ops = xen_init_ops; | |
1528 | pv_time_ops = xen_time_ops; | |
1529 | pv_cpu_ops = xen_cpu_ops; | |
1530 | pv_irq_ops = xen_irq_ops; | |
1531 | pv_apic_ops = xen_apic_ops; | |
1532 | pv_mmu_ops = xen_mmu_ops; | |
93b1eab3 | 1533 | |
e57778a1 JF |
1534 | if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { |
1535 | pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start; | |
1536 | pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit; | |
1537 | } | |
1538 | ||
fefa629a JF |
1539 | machine_ops = xen_machine_ops; |
1540 | ||
f5d36de0 JF |
1541 | #ifdef CONFIG_X86_64 |
1542 | /* Disable until direct per-cpu data access. */ | |
1543 | have_vcpu_info_placement = 0; | |
5b09b287 | 1544 | x86_64_init_pda(); |
f5d36de0 JF |
1545 | #endif |
1546 | ||
a9e7062d | 1547 | xen_smp_init(); |
5ead97c8 | 1548 | |
5ead97c8 JF |
1549 | /* Get mfn list */ |
1550 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | |
d451bb7a | 1551 | xen_build_dynamic_phys_to_machine(); |
5ead97c8 JF |
1552 | |
1553 | pgd = (pgd_t *)xen_start_info->pt_base; | |
1554 | ||
084a2a4e JF |
1555 | /* Prevent unwanted bits from being set in PTEs. */ |
1556 | __supported_pte_mask &= ~_PAGE_GLOBAL; | |
1557 | if (!is_initial_xendomain()) | |
1558 | __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); | |
1559 | ||
1560 | /* Don't do the full vcpu_info placement stuff until we have a | |
1561 | possible map and a non-dummy shared_info. */ | |
1562 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; | |
1563 | ||
1564 | xen_raw_console_write("mapping kernel into physical memory\n"); | |
d114e198 | 1565 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); |
5ead97c8 | 1566 | |
084a2a4e | 1567 | init_mm.pgd = pgd; |
5ead97c8 JF |
1568 | |
1569 | /* keep using Xen gdt for now; no urgent need to change it */ | |
1570 | ||
93b1eab3 | 1571 | pv_info.kernel_rpl = 1; |
5ead97c8 | 1572 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) |
93b1eab3 | 1573 | pv_info.kernel_rpl = 0; |
5ead97c8 JF |
1574 | |
1575 | /* set the limit of our address space */ | |
fb1d8404 | 1576 | xen_reserve_top(); |
5ead97c8 | 1577 | |
7d087b68 | 1578 | #ifdef CONFIG_X86_32 |
5ead97c8 JF |
1579 | /* set up basic CPUID stuff */ |
1580 | cpu_detect(&new_cpu_data); | |
1581 | new_cpu_data.hard_math = 1; | |
1582 | new_cpu_data.x86_capability[0] = cpuid_edx(1); | |
7d087b68 | 1583 | #endif |
5ead97c8 JF |
1584 | |
1585 | /* Poke various useful things into boot_params */ | |
30c82645 PA |
1586 | boot_params.hdr.type_of_loader = (9 << 4) | 0; |
1587 | boot_params.hdr.ramdisk_image = xen_start_info->mod_start | |
1588 | ? __pa(xen_start_info->mod_start) : 0; | |
1589 | boot_params.hdr.ramdisk_size = xen_start_info->mod_len; | |
b7c3c5c1 | 1590 | boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line); |
5ead97c8 | 1591 | |
9e124fe1 | 1592 | if (!is_initial_xendomain()) { |
83abc70a | 1593 | add_preferred_console("xenboot", 0, NULL); |
9e124fe1 | 1594 | add_preferred_console("tty", 0, NULL); |
b8c2d3df | 1595 | add_preferred_console("hvc", 0, NULL); |
9e124fe1 | 1596 | } |
b8c2d3df | 1597 | |
084a2a4e JF |
1598 | xen_raw_console_write("about to get started...\n"); |
1599 | ||
1600 | #if 0 | |
1601 | xen_raw_printk("&boot_params=%p __pa(&boot_params)=%lx __va(__pa(&boot_params))=%lx\n", | |
1602 | &boot_params, __pa_symbol(&boot_params), | |
1603 | __va(__pa_symbol(&boot_params))); | |
1604 | ||
1605 | walk(pgd, &boot_params); | |
1606 | walk(pgd, __va(__pa(&boot_params))); | |
1607 | #endif | |
1608 | ||
5ead97c8 | 1609 | /* Start the world */ |
f5d36de0 | 1610 | #ifdef CONFIG_X86_32 |
f0d43100 | 1611 | i386_start_kernel(); |
f5d36de0 | 1612 | #else |
084a2a4e | 1613 | x86_64_start_reservations((char *)__pa_symbol(&boot_params)); |
f5d36de0 | 1614 | #endif |
5ead97c8 | 1615 | } |