2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
22 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
23 * so the code in this file is compiled twice, once per pte size.
27 #define pt_element_t u64
28 #define guest_walker guest_walker64
29 #define FNAME(name) paging##64_##name
30 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
31 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
48 #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 #define PT_LEVEL_BITS PT32_LEVEL_BITS
51 #define PT_MAX_FULL_LEVELS 2
52 #define CMPXCHG cmpxchg
54 #error Invalid PTTYPE value
57 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
58 #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
61 * The guest_walker structure emulates the behavior of the hardware page
66 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
67 pt_element_t ptes[PT_MAX_FULL_LEVELS];
68 pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
69 gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
73 struct x86_exception fault;
76 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
78 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
81 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
82 pt_element_t __user *ptep_user, unsigned index,
83 pt_element_t orig_pte, pt_element_t new_pte)
90 npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
91 /* Check if the user is doing something meaningless. */
92 if (unlikely(npages != 1))
95 table = kmap_atomic(page);
96 ret = CMPXCHG(&table[index], orig_pte, new_pte);
99 kvm_release_page_dirty(page);
101 return (ret != orig_pte);
104 static bool FNAME(is_last_gpte)(struct guest_walker *walker,
105 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
108 if (walker->level == PT_PAGE_TABLE_LEVEL)
111 if ((walker->level == PT_DIRECTORY_LEVEL) && is_large_pte(gpte) &&
112 (PTTYPE == 64 || is_pse(vcpu)))
115 if ((walker->level == PT_PDPE_LEVEL) && is_large_pte(gpte) &&
116 (mmu->root_level == PT64_ROOT_LEVEL))
123 * Fetch a guest pte for a guest virtual address
125 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
126 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
127 gva_t addr, u32 access)
130 pt_element_t __user *uninitialized_var(ptep_user);
132 unsigned index, pt_access, uninitialized_var(pte_access);
134 bool eperm, last_gpte;
136 const int write_fault = access & PFERR_WRITE_MASK;
137 const int user_fault = access & PFERR_USER_MASK;
138 const int fetch_fault = access & PFERR_FETCH_MASK;
141 trace_kvm_mmu_pagetable_walk(addr, access);
144 walker->level = mmu->root_level;
145 pte = mmu->get_cr3(vcpu);
148 if (walker->level == PT32E_ROOT_LEVEL) {
149 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
150 trace_kvm_mmu_paging_element(pte, walker->level);
151 if (!is_present_gpte(pte))
156 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
157 (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
163 unsigned long host_addr;
165 index = PT_INDEX(addr, walker->level);
167 table_gfn = gpte_to_gfn(pte);
168 offset = index * sizeof(pt_element_t);
169 pte_gpa = gfn_to_gpa(table_gfn) + offset;
170 walker->table_gfn[walker->level - 1] = table_gfn;
171 walker->pte_gpa[walker->level - 1] = pte_gpa;
173 real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
174 PFERR_USER_MASK|PFERR_WRITE_MASK);
175 if (unlikely(real_gfn == UNMAPPED_GVA))
177 real_gfn = gpa_to_gfn(real_gfn);
179 host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
180 if (unlikely(kvm_is_error_hva(host_addr)))
183 ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
184 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
187 trace_kvm_mmu_paging_element(pte, walker->level);
189 if (unlikely(!is_present_gpte(pte)))
192 if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
194 errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
198 if (!check_write_user_access(vcpu, write_fault, user_fault,
203 if (unlikely(fetch_fault && (pte & PT64_NX_MASK)))
207 last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
209 pte_access = pt_access & gpte_access(vcpu, pte);
210 /* check if the kernel is fetching from user page */
211 if (unlikely(pte_access & PT_USER_MASK) &&
212 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
213 if (fetch_fault && !user_fault)
217 if (!eperm && unlikely(!(pte & PT_ACCESSED_MASK))) {
219 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
221 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
222 pte, pte|PT_ACCESSED_MASK);
223 if (unlikely(ret < 0))
228 mark_page_dirty(vcpu->kvm, table_gfn);
229 pte |= PT_ACCESSED_MASK;
232 walker->ptes[walker->level - 1] = pte;
235 int lvl = walker->level;
240 gfn = gpte_to_gfn_lvl(pte, lvl);
241 gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
244 walker->level == PT_DIRECTORY_LEVEL &&
246 gfn += pse36_gfn_delta(pte);
248 ac = write_fault | fetch_fault | user_fault;
250 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn),
252 if (real_gpa == UNMAPPED_GVA)
255 walker->gfn = real_gpa >> PAGE_SHIFT;
260 pt_access &= gpte_access(vcpu, pte);
264 if (unlikely(eperm)) {
265 errcode |= PFERR_PRESENT_MASK;
270 protect_clean_gpte(&pte_access, pte);
271 else if (unlikely(!is_dirty_gpte(pte))) {
274 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
275 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
276 pte, pte|PT_DIRTY_MASK);
277 if (unlikely(ret < 0))
282 mark_page_dirty(vcpu->kvm, table_gfn);
283 pte |= PT_DIRTY_MASK;
284 walker->ptes[walker->level - 1] = pte;
287 walker->pt_access = pt_access;
288 walker->pte_access = pte_access;
289 pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
290 __func__, (u64)pte, pte_access, pt_access);
294 errcode |= write_fault | user_fault;
295 if (fetch_fault && (mmu->nx ||
296 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
297 errcode |= PFERR_FETCH_MASK;
299 walker->fault.vector = PF_VECTOR;
300 walker->fault.error_code_valid = true;
301 walker->fault.error_code = errcode;
302 walker->fault.address = addr;
303 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
305 trace_kvm_mmu_walker_error(walker->fault.error_code);
309 static int FNAME(walk_addr)(struct guest_walker *walker,
310 struct kvm_vcpu *vcpu, gva_t addr, u32 access)
312 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
316 static int FNAME(walk_addr_nested)(struct guest_walker *walker,
317 struct kvm_vcpu *vcpu, gva_t addr,
320 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
324 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
325 struct kvm_mmu_page *sp, u64 *spte,
328 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
331 if (!is_present_gpte(gpte))
334 if (!(gpte & PT_ACCESSED_MASK))
340 drop_spte(vcpu->kvm, spte);
344 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
345 u64 *spte, const void *pte)
351 gpte = *(const pt_element_t *)pte;
352 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
355 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
356 pte_access = sp->role.access & gpte_access(vcpu, gpte);
357 protect_clean_gpte(&pte_access, gpte);
358 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
359 if (mmu_invalid_pfn(pfn))
363 * we call mmu_set_spte() with host_writable = true because that
364 * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
366 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
367 NULL, PT_PAGE_TABLE_LEVEL,
368 gpte_to_gfn(gpte), pfn, true, true);
371 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
372 struct guest_walker *gw, int level)
374 pt_element_t curr_pte;
375 gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
379 if (level == PT_PAGE_TABLE_LEVEL) {
380 mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
381 base_gpa = pte_gpa & ~mask;
382 index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
384 r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
385 gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
386 curr_pte = gw->prefetch_ptes[index];
388 r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
389 &curr_pte, sizeof(curr_pte));
391 return r || curr_pte != gw->ptes[level - 1];
394 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
397 struct kvm_mmu_page *sp;
398 pt_element_t *gptep = gw->prefetch_ptes;
402 sp = page_header(__pa(sptep));
404 if (sp->role.level > PT_PAGE_TABLE_LEVEL)
408 return __direct_pte_prefetch(vcpu, sp, sptep);
410 i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
413 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
422 if (is_shadow_present_pte(*spte))
427 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
430 pte_access = sp->role.access & gpte_access(vcpu, gpte);
431 protect_clean_gpte(&pte_access, gpte);
432 gfn = gpte_to_gfn(gpte);
433 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
434 pte_access & ACC_WRITE_MASK);
435 if (mmu_invalid_pfn(pfn))
438 mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
439 NULL, PT_PAGE_TABLE_LEVEL, gfn,
445 * Fetch a shadow pte for a specific level in the paging hierarchy.
447 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
448 struct guest_walker *gw,
449 int user_fault, int write_fault, int hlevel,
450 int *emulate, pfn_t pfn, bool map_writable,
453 unsigned access = gw->pt_access;
454 struct kvm_mmu_page *sp = NULL;
456 unsigned direct_access;
457 struct kvm_shadow_walk_iterator it;
459 if (!is_present_gpte(gw->ptes[gw->level - 1]))
462 direct_access = gw->pte_access;
464 top_level = vcpu->arch.mmu.root_level;
465 if (top_level == PT32E_ROOT_LEVEL)
466 top_level = PT32_ROOT_LEVEL;
468 * Verify that the top-level gpte is still there. Since the page
469 * is a root page, it is either write protected (and cannot be
470 * changed from now on) or it is invalid (in which case, we don't
471 * really care if it changes underneath us after this point).
473 if (FNAME(gpte_changed)(vcpu, gw, top_level))
474 goto out_gpte_changed;
476 for (shadow_walk_init(&it, vcpu, addr);
477 shadow_walk_okay(&it) && it.level > gw->level;
478 shadow_walk_next(&it)) {
481 clear_sp_write_flooding_count(it.sptep);
482 drop_large_spte(vcpu, it.sptep);
485 if (!is_shadow_present_pte(*it.sptep)) {
486 table_gfn = gw->table_gfn[it.level - 2];
487 sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
488 false, access, it.sptep);
492 * Verify that the gpte in the page we've just write
493 * protected is still there.
495 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
496 goto out_gpte_changed;
499 link_shadow_page(it.sptep, sp);
503 shadow_walk_okay(&it) && it.level > hlevel;
504 shadow_walk_next(&it)) {
507 clear_sp_write_flooding_count(it.sptep);
508 validate_direct_spte(vcpu, it.sptep, direct_access);
510 drop_large_spte(vcpu, it.sptep);
512 if (is_shadow_present_pte(*it.sptep))
515 direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
517 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
518 true, direct_access, it.sptep);
519 link_shadow_page(it.sptep, sp);
522 clear_sp_write_flooding_count(it.sptep);
523 mmu_set_spte(vcpu, it.sptep, access, gw->pte_access,
524 user_fault, write_fault, emulate, it.level,
525 gw->gfn, pfn, prefault, map_writable);
526 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
532 kvm_mmu_put_page(sp, it.sptep);
533 kvm_release_pfn_clean(pfn);
538 * Page fault handler. There are several causes for a page fault:
539 * - there is no shadow pte for the guest pte
540 * - write access through a shadow pte marked read only so that we can set
542 * - write access to a shadow pte marked read only so we can update the page
543 * dirty bitmap, when userspace requests it
544 * - mmio access; in this case we will never install a present shadow pte
545 * - normal guest page fault due to the guest pte marked not present, not
546 * writable, or not executable
548 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
549 * a negative value on error.
551 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
554 int write_fault = error_code & PFERR_WRITE_MASK;
555 int user_fault = error_code & PFERR_USER_MASK;
556 struct guest_walker walker;
561 int level = PT_PAGE_TABLE_LEVEL;
563 unsigned long mmu_seq;
566 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
568 if (unlikely(error_code & PFERR_RSVD_MASK))
569 return handle_mmio_page_fault(vcpu, addr, error_code,
570 mmu_is_nested(vcpu));
572 r = mmu_topup_memory_caches(vcpu);
577 * Look up the guest pte for the faulting address.
579 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
582 * The page is not mapped by the guest. Let the guest handle it.
585 pgprintk("%s: guest page fault\n", __func__);
587 inject_page_fault(vcpu, &walker.fault);
592 if (walker.level >= PT_DIRECTORY_LEVEL)
593 force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
596 if (!force_pt_level) {
597 level = min(walker.level, mapping_level(vcpu, walker.gfn));
598 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
601 mmu_seq = vcpu->kvm->mmu_notifier_seq;
604 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
608 if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
609 walker.gfn, pfn, walker.pte_access, &r))
612 spin_lock(&vcpu->kvm->mmu_lock);
613 if (mmu_notifier_retry(vcpu, mmu_seq))
616 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
617 kvm_mmu_free_some_pages(vcpu);
619 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
620 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
621 level, &emulate, pfn, map_writable, prefault);
623 pgprintk("%s: shadow pte %p %llx emulate %d\n", __func__,
624 sptep, *sptep, emulate);
626 ++vcpu->stat.pf_fixed;
627 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
628 spin_unlock(&vcpu->kvm->mmu_lock);
633 spin_unlock(&vcpu->kvm->mmu_lock);
634 kvm_release_pfn_clean(pfn);
638 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
642 WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
645 offset = sp->role.quadrant << PT64_LEVEL_BITS;
647 return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
650 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
652 struct kvm_shadow_walk_iterator iterator;
653 struct kvm_mmu_page *sp;
657 vcpu_clear_mmio_info(vcpu, gva);
660 * No need to check return value here, rmap_can_add() can
661 * help us to skip pte prefetch later.
663 mmu_topup_memory_caches(vcpu);
665 spin_lock(&vcpu->kvm->mmu_lock);
666 for_each_shadow_entry(vcpu, gva, iterator) {
667 level = iterator.level;
668 sptep = iterator.sptep;
670 sp = page_header(__pa(sptep));
671 if (is_last_spte(*sptep, level)) {
678 pte_gpa = FNAME(get_level1_sp_gpa)(sp);
679 pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
681 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
682 kvm_flush_remote_tlbs(vcpu->kvm);
684 if (!rmap_can_add(vcpu))
687 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
688 sizeof(pt_element_t)))
691 FNAME(update_pte)(vcpu, sp, sptep, &gpte);
694 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
697 spin_unlock(&vcpu->kvm->mmu_lock);
700 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
701 struct x86_exception *exception)
703 struct guest_walker walker;
704 gpa_t gpa = UNMAPPED_GVA;
707 r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
710 gpa = gfn_to_gpa(walker.gfn);
711 gpa |= vaddr & ~PAGE_MASK;
712 } else if (exception)
713 *exception = walker.fault;
718 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
720 struct x86_exception *exception)
722 struct guest_walker walker;
723 gpa_t gpa = UNMAPPED_GVA;
726 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
729 gpa = gfn_to_gpa(walker.gfn);
730 gpa |= vaddr & ~PAGE_MASK;
731 } else if (exception)
732 *exception = walker.fault;
738 * Using the cached information from sp->gfns is safe because:
739 * - The spte has a reference to the struct page, so the pfn for a given gfn
740 * can't change unless all sptes pointing to it are nuked first.
743 * We should flush all tlbs if spte is dropped even though guest is
744 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
745 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
746 * used by guest then tlbs are not flushed, so guest is allowed to access the
748 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
750 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
752 int i, nr_present = 0;
756 /* direct kvm_mmu_page can not be unsync. */
757 BUG_ON(sp->role.direct);
759 first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
761 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
770 pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
772 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
773 sizeof(pt_element_t)))
776 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
777 vcpu->kvm->tlbs_dirty++;
781 gfn = gpte_to_gfn(gpte);
782 pte_access = sp->role.access;
783 pte_access &= gpte_access(vcpu, gpte);
784 protect_clean_gpte(&pte_access, gpte);
786 if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))
789 if (gfn != sp->gfns[i]) {
790 drop_spte(vcpu->kvm, &sp->spt[i]);
791 vcpu->kvm->tlbs_dirty++;
797 host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
799 set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
800 PT_PAGE_TABLE_LEVEL, gfn,
801 spte_to_pfn(sp->spt[i]), true, false,
811 #undef PT_BASE_ADDR_MASK
813 #undef PT_LVL_ADDR_MASK
814 #undef PT_LVL_OFFSET_MASK
816 #undef PT_MAX_FULL_LEVELS
818 #undef gpte_to_gfn_lvl