| X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
-#define CR8_RESEVED_BITS (~0x0fULL)
+#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
#define EFER_RESERVED_BITS 0xfffffffffffff2fe
#ifdef CONFIG_X86_64
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
- if (!vcpu->vmcs)
+ if (!vcpu->valid)
return;
vcpu_load(vcpu);
static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
{
- if (!vcpu->vmcs)
+ if (!vcpu->valid)
return;
vcpu_load(vcpu);
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
int i;
- u64 pdpte;
u64 *pdpt;
int ret;
struct page *page;
+ u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
spin_lock(&vcpu->kvm->lock);
page = gfn_to_page(vcpu->kvm, pdpt_gfn);
- /* FIXME: !page - emulate? 0xff? */
+ if (!page) {
+ ret = 0;
+ goto out;
+ }
+
pdpt = kmap_atomic(page, KM_USER0);
+ memcpy(pdpte, pdpt+offset, sizeof(pdpte));
+ kunmap_atomic(pdpt, KM_USER0);
- ret = 1;
- for (i = 0; i < 4; ++i) {
- pdpte = pdpt[offset + i];
- if ((pdpte & 1) && (pdpte & 0xfffffff0000001e6ull)) {
+ for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
+ if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
ret = 0;
goto out;
}
}
+ ret = 1;
- for (i = 0; i < 4; ++i)
- vcpu->pdptrs[i] = pdpt[offset + i];
-
+ memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
out:
- kunmap_atomic(pdpt, KM_USER0);
spin_unlock(&vcpu->kvm->lock);
return ret;
&& !load_pdptrs(vcpu, vcpu->cr3)) {
printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
inject_gp(vcpu);
+ return;
}
if (cr4 & X86_CR4_VMXE) {
void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
- if ( cr8 & CR8_RESEVED_BITS) {
+ if (cr8 & CR8_RESERVED_BITS) {
printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
inject_gp(vcpu);
return;
{
struct page *page;
void *virt;
- unsigned offset = offset_in_page(gpa);
if (((gpa + bytes - 1) >> PAGE_SHIFT) != (gpa >> PAGE_SHIFT))
return 0;
return 0;
mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
virt = kmap_atomic(page, KM_USER0);
- kvm_mmu_pte_write(vcpu, gpa, virt + offset, val, bytes);
+ kvm_mmu_pte_write(vcpu, gpa, val, bytes);
memcpy(virt + offset_in_page(gpa), val, bytes);
kunmap_atomic(virt, KM_USER0);
return 1;
r = x86_emulate_memop(&emulate_ctxt, &emulate_ops);
if ((r || vcpu->mmio_is_write) && run) {
+ run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = vcpu->mmio_phys_addr;
memcpy(run->mmio.data, vcpu->mmio_data, 8);
run->mmio.len = vcpu->mmio_size;
mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
- para_state = kmap_atomic(para_state_page, KM_USER0);
+ para_state = kmap(para_state_page);
printk(KERN_DEBUG ".... guest version: %d\n", para_state->guest_version);
printk(KERN_DEBUG ".... size: %d\n", para_state->size);
para_state->ret = 0;
err_kunmap_skip:
- kunmap_atomic(para_state, KM_USER0);
+ kunmap(para_state_page);
return 0;
err_gp:
return 1;
}
EXPORT_SYMBOL_GPL(kvm_resched);
-void load_msrs(struct vmx_msr_entry *e, int n)
-{
- int i;
-
- for (i = 0; i < n; ++i)
- wrmsrl(e[i].index, e[i].data);
-}
-EXPORT_SYMBOL_GPL(load_msrs);
-
-void save_msrs(struct vmx_msr_entry *e, int n)
-{
- int i;
-
- for (i = 0; i < n; ++i)
- rdmsrl(e[i].index, e[i].data);
-}
-EXPORT_SYMBOL_GPL(save_msrs);
-
void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
{
int i;
/*
* Read-modify-write. Back to userspace.
*/
- kvm_run->exit_reason = KVM_EXIT_MMIO;
r = 0;
goto out;
}
mutex_lock(&vcpu->mutex);
- if (vcpu->vmcs) {
+ if (vcpu->valid) {
mutex_unlock(&vcpu->mutex);
return -EEXIST;
}
kvm->nvcpus = n + 1;
spin_unlock(&kvm_lock);
+ vcpu->valid = 1;
+
return r;
out_free_vcpus: