2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
33 #define PT_LEVEL_BITS PT64_LEVEL_BITS
35 #define PT_MAX_FULL_LEVELS 4
36 #define CMPXCHG cmpxchg
38 #define CMPXCHG cmpxchg64
39 #define PT_MAX_FULL_LEVELS 2
42 #define pt_element_t u32
43 #define guest_walker guest_walker32
44 #define FNAME(name) paging##32_##name
45 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
46 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
47 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
48 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
49 #define PT_LEVEL_BITS PT32_LEVEL_BITS
50 #define PT_MAX_FULL_LEVELS 2
51 #define CMPXCHG cmpxchg
53 #error Invalid PTTYPE value
56 #define gpte_to_gfn FNAME(gpte_to_gfn)
57 #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
60 * The guest_walker structure emulates the behavior of the hardware page
65 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
66 pt_element_t ptes[PT_MAX_FULL_LEVELS];
67 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
74 static gfn_t gpte_to_gfn(pt_element_t gpte)
76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
79 static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
81 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
84 static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
85 gfn_t table_gfn, unsigned index,
86 pt_element_t orig_pte, pt_element_t new_pte)
92 page = gfn_to_page(kvm, table_gfn);
94 table = kmap_atomic(page, KM_USER0);
95 ret = CMPXCHG(&table[index], orig_pte, new_pte);
96 kunmap_atomic(table, KM_USER0);
98 kvm_release_page_dirty(page);
100 return (ret != orig_pte);
103 static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
107 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
110 access &= ~(gpte >> PT64_NX_SHIFT);
116 * Fetch a guest pte for a guest virtual address
118 static int FNAME(walk_addr)(struct guest_walker *walker,
119 struct kvm_vcpu *vcpu, gva_t addr,
120 int write_fault, int user_fault, int fetch_fault)
124 unsigned index, pt_access, pte_access;
128 pgprintk("%s: addr %lx\n", __func__, addr);
130 walker->level = vcpu->arch.mmu.root_level;
131 pte = vcpu->arch.cr3;
133 if (!is_long_mode(vcpu)) {
134 pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
135 if (!is_present_pte(pte))
140 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
141 (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
146 index = PT_INDEX(addr, walker->level);
148 table_gfn = gpte_to_gfn(pte);
149 pte_gpa = gfn_to_gpa(table_gfn);
150 pte_gpa += index * sizeof(pt_element_t);
151 walker->table_gfn[walker->level - 1] = table_gfn;
152 walker->pte_gpa[walker->level - 1] = pte_gpa;
153 pgprintk("%s: table_gfn[%d] %lx\n", __func__,
154 walker->level - 1, table_gfn);
156 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
158 if (!is_present_pte(pte))
161 rsvd_fault = is_rsvd_bits_set(vcpu, pte, walker->level);
165 if (write_fault && !is_writeble_pte(pte))
166 if (user_fault || is_write_protection(vcpu))
169 if (user_fault && !(pte & PT_USER_MASK))
173 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
177 if (!(pte & PT_ACCESSED_MASK)) {
178 mark_page_dirty(vcpu->kvm, table_gfn);
179 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
180 index, pte, pte|PT_ACCESSED_MASK))
182 pte |= PT_ACCESSED_MASK;
185 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
187 walker->ptes[walker->level - 1] = pte;
189 if (walker->level == PT_PAGE_TABLE_LEVEL) {
190 walker->gfn = gpte_to_gfn(pte);
194 if (walker->level == PT_DIRECTORY_LEVEL
195 && (pte & PT_PAGE_SIZE_MASK)
196 && (PTTYPE == 64 || is_pse(vcpu))) {
197 walker->gfn = gpte_to_gfn_pde(pte);
198 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
199 if (PTTYPE == 32 && is_cpuid_PSE36())
200 walker->gfn += pse36_gfn_delta(pte);
204 pt_access = pte_access;
208 if (write_fault && !is_dirty_pte(pte)) {
211 mark_page_dirty(vcpu->kvm, table_gfn);
212 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
216 pte |= PT_DIRTY_MASK;
217 walker->ptes[walker->level - 1] = pte;
220 walker->pt_access = pt_access;
221 walker->pte_access = pte_access;
222 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
223 __func__, (u64)pte, pt_access, pte_access);
227 walker->error_code = 0;
231 walker->error_code = PFERR_PRESENT_MASK;
235 walker->error_code |= PFERR_WRITE_MASK;
237 walker->error_code |= PFERR_USER_MASK;
239 walker->error_code |= PFERR_FETCH_MASK;
241 walker->error_code |= PFERR_RSVD_MASK;
245 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
246 u64 *spte, const void *pte)
251 int largepage = vcpu->arch.update_pte.largepage;
253 gpte = *(const pt_element_t *)pte;
254 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
255 if (!is_present_pte(gpte))
256 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
259 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
260 pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
261 if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
263 pfn = vcpu->arch.update_pte.pfn;
264 if (is_error_pfn(pfn))
266 if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
269 mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
270 gpte & PT_DIRTY_MASK, NULL, largepage,
271 gpte & PT_GLOBAL_MASK, gpte_to_gfn(gpte),
276 * Fetch a shadow pte for a specific level in the paging hierarchy.
278 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
279 struct guest_walker *gw,
280 int user_fault, int write_fault, int largepage,
281 int *ptwrite, pfn_t pfn)
283 unsigned access = gw->pt_access;
284 struct kvm_mmu_page *shadow_page;
290 pt_element_t curr_pte;
291 struct kvm_shadow_walk_iterator iterator;
293 if (!is_present_pte(gw->ptes[gw->level - 1]))
296 for_each_shadow_entry(vcpu, addr, iterator) {
297 level = iterator.level;
298 sptep = iterator.sptep;
299 if (level == PT_PAGE_TABLE_LEVEL
300 || (largepage && level == PT_DIRECTORY_LEVEL)) {
301 mmu_set_spte(vcpu, sptep, access,
302 gw->pte_access & access,
303 user_fault, write_fault,
304 gw->ptes[gw->level-1] & PT_DIRTY_MASK,
306 gw->ptes[gw->level-1] & PT_GLOBAL_MASK,
307 gw->gfn, pfn, false);
311 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
314 if (is_large_pte(*sptep)) {
315 rmap_remove(vcpu->kvm, sptep);
316 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
317 kvm_flush_remote_tlbs(vcpu->kvm);
320 if (level == PT_DIRECTORY_LEVEL
321 && gw->level == PT_DIRECTORY_LEVEL) {
323 if (!is_dirty_pte(gw->ptes[level - 1]))
324 access &= ~ACC_WRITE_MASK;
325 table_gfn = gpte_to_gfn(gw->ptes[level - 1]);
328 table_gfn = gw->table_gfn[level - 2];
330 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
331 direct, access, sptep);
333 r = kvm_read_guest_atomic(vcpu->kvm,
334 gw->pte_gpa[level - 2],
335 &curr_pte, sizeof(curr_pte));
336 if (r || curr_pte != gw->ptes[level - 2]) {
337 kvm_mmu_put_page(shadow_page, sptep);
338 kvm_release_pfn_clean(pfn);
344 spte = __pa(shadow_page->spt)
345 | PT_PRESENT_MASK | PT_ACCESSED_MASK
346 | PT_WRITABLE_MASK | PT_USER_MASK;
354 * Page fault handler. There are several causes for a page fault:
355 * - there is no shadow pte for the guest pte
356 * - write access through a shadow pte marked read only so that we can set
358 * - write access to a shadow pte marked read only so we can update the page
359 * dirty bitmap, when userspace requests it
360 * - mmio access; in this case we will never install a present shadow pte
361 * - normal guest page fault due to the guest pte marked not present, not
362 * writable, or not executable
364 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
365 * a negative value on error.
367 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
370 int write_fault = error_code & PFERR_WRITE_MASK;
371 int user_fault = error_code & PFERR_USER_MASK;
372 int fetch_fault = error_code & PFERR_FETCH_MASK;
373 struct guest_walker walker;
379 unsigned long mmu_seq;
381 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
382 kvm_mmu_audit(vcpu, "pre page fault");
384 r = mmu_topup_memory_caches(vcpu);
389 * Look up the guest pte for the faulting address.
391 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
395 * The page is not mapped by the guest. Let the guest handle it.
398 pgprintk("%s: guest page fault\n", __func__);
399 inject_page_fault(vcpu, addr, walker.error_code);
400 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
404 if (walker.level == PT_DIRECTORY_LEVEL) {
406 large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
407 if (is_largepage_backed(vcpu, large_gfn)) {
408 walker.gfn = large_gfn;
412 mmu_seq = vcpu->kvm->mmu_notifier_seq;
414 pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
417 if (is_error_pfn(pfn)) {
418 pgprintk("gfn %lx is mmio\n", walker.gfn);
419 kvm_release_pfn_clean(pfn);
423 spin_lock(&vcpu->kvm->mmu_lock);
424 if (mmu_notifier_retry(vcpu, mmu_seq))
426 kvm_mmu_free_some_pages(vcpu);
427 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
428 largepage, &write_pt, pfn);
430 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
431 shadow_pte, *shadow_pte, write_pt);
434 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
436 ++vcpu->stat.pf_fixed;
437 kvm_mmu_audit(vcpu, "post page fault (fixed)");
438 spin_unlock(&vcpu->kvm->mmu_lock);
443 spin_unlock(&vcpu->kvm->mmu_lock);
444 kvm_release_pfn_clean(pfn);
448 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
450 struct kvm_shadow_walk_iterator iterator;
457 spin_lock(&vcpu->kvm->mmu_lock);
459 for_each_shadow_entry(vcpu, gva, iterator) {
460 level = iterator.level;
461 sptep = iterator.sptep;
463 /* FIXME: properly handle invlpg on large guest pages */
464 if (level == PT_PAGE_TABLE_LEVEL ||
465 ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
466 struct kvm_mmu_page *sp = page_header(__pa(sptep));
468 pte_gpa = (sp->gfn << PAGE_SHIFT);
469 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
471 if (is_shadow_present_pte(*sptep)) {
472 rmap_remove(vcpu->kvm, sptep);
473 if (is_large_pte(*sptep))
474 --vcpu->kvm->stat.lpages;
477 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
481 if (!is_shadow_present_pte(*sptep))
486 kvm_flush_remote_tlbs(vcpu->kvm);
487 spin_unlock(&vcpu->kvm->mmu_lock);
491 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
492 sizeof(pt_element_t)))
494 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
495 if (mmu_topup_memory_caches(vcpu))
497 kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
498 sizeof(pt_element_t), 0);
502 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
504 struct guest_walker walker;
505 gpa_t gpa = UNMAPPED_GVA;
508 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
511 gpa = gfn_to_gpa(walker.gfn);
512 gpa |= vaddr & ~PAGE_MASK;
518 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
519 struct kvm_mmu_page *sp)
522 pt_element_t pt[256 / sizeof(pt_element_t)];
526 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
527 nonpaging_prefetch_page(vcpu, sp);
531 pte_gpa = gfn_to_gpa(sp->gfn);
533 offset = sp->role.quadrant << PT64_LEVEL_BITS;
534 pte_gpa += offset * sizeof(pt_element_t);
537 for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
538 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
539 pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
540 for (j = 0; j < ARRAY_SIZE(pt); ++j)
541 if (r || is_present_pte(pt[j]))
542 sp->spt[i+j] = shadow_trap_nonpresent_pte;
544 sp->spt[i+j] = shadow_notrap_nonpresent_pte;
549 * Using the cached information from sp->gfns is safe because:
550 * - The spte has a reference to the struct page, so the pfn for a given gfn
551 * can't change unless all sptes pointing to it are nuked first.
552 * - Alias changes zap the entire shadow cache.
554 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
556 int i, offset, nr_present;
558 offset = nr_present = 0;
561 offset = sp->role.quadrant << PT64_LEVEL_BITS;
563 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
567 gfn_t gfn = sp->gfns[i];
569 if (!is_shadow_present_pte(sp->spt[i]))
572 pte_gpa = gfn_to_gpa(sp->gfn);
573 pte_gpa += (i+offset) * sizeof(pt_element_t);
575 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
576 sizeof(pt_element_t)))
579 if (gpte_to_gfn(gpte) != gfn || !is_present_pte(gpte) ||
580 !(gpte & PT_ACCESSED_MASK)) {
583 rmap_remove(vcpu->kvm, &sp->spt[i]);
584 if (is_present_pte(gpte))
585 nonpresent = shadow_trap_nonpresent_pte;
587 nonpresent = shadow_notrap_nonpresent_pte;
588 set_shadow_pte(&sp->spt[i], nonpresent);
593 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
594 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
595 is_dirty_pte(gpte), 0, gpte & PT_GLOBAL_MASK, gfn,
596 spte_to_pfn(sp->spt[i]), true, false);
605 #undef PT_BASE_ADDR_MASK
608 #undef PT_DIR_BASE_ADDR_MASK
610 #undef PT_MAX_FULL_LEVELS
612 #undef gpte_to_gfn_pde