kvm arm: Move fake PGD handling to arch specific files
authorSuzuki K Poulose <suzuki.poulose@arm.com>
Tue, 1 Mar 2016 10:03:06 +0000 (10:03 +0000)
committerChristoffer Dall <christoffer.dall@linaro.org>
Thu, 21 Apr 2016 12:56:44 +0000 (14:56 +0200)
Rearrange the code for fake pgd handling, which is applicable
only for arm64. This will later be removed once we introduce
the stage2 page table walker macros.

Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/mmu.c
arch/arm64/include/asm/kvm_mmu.h

index da44be9db4fab3ed973d03abd6060cf8b0f8ae29..c2b2b27b7da14ba87c022be23f68da2ab374d377 100644 (file)
@@ -161,8 +161,6 @@ static inline bool kvm_page_empty(void *ptr)
 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
 #define kvm_pud_table_empty(kvm, pudp) (0)
 
-#define KVM_PREALLOC_LEVEL     0
-
 static inline void *kvm_get_hwpgd(struct kvm *kvm)
 {
        return kvm->arch.pgd;
@@ -173,6 +171,15 @@ static inline unsigned int kvm_get_hwpgd_size(void)
        return PTRS_PER_S2_PGD * sizeof(pgd_t);
 }
 
+static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
+{
+       return hwpgd;
+}
+
+static inline void kvm_free_fake_pgd(pgd_t *pgd)
+{
+}
+
 struct kvm;
 
 #define kvm_flush_dcache_to_poc(a,l)   __cpuc_flush_dcache_area((a), (l))
index 58dbd5c439df45bc10497954db0e61b433646cd6..774d00b8066bef282a80815bc76730792ac52cae 100644 (file)
@@ -684,47 +684,16 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
        if (!hwpgd)
                return -ENOMEM;
 
-       /* When the kernel uses more levels of page tables than the
+       /*
+        * When the kernel uses more levels of page tables than the
         * guest, we allocate a fake PGD and pre-populate it to point
         * to the next-level page table, which will be the real
         * initial page table pointed to by the VTTBR.
-        *
-        * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
-        * the PMD and the kernel will use folded pud.
-        * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
-        * pages.
         */
-       if (KVM_PREALLOC_LEVEL > 0) {
-               int i;
-
-               /*
-                * Allocate fake pgd for the page table manipulation macros to
-                * work.  This is not used by the hardware and we have no
-                * alignment requirement for this allocation.
-                */
-               pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
-                               GFP_KERNEL | __GFP_ZERO);
-
-               if (!pgd) {
-                       kvm_free_hwpgd(hwpgd);
-                       return -ENOMEM;
-               }
-
-               /* Plug the HW PGD into the fake one. */
-               for (i = 0; i < PTRS_PER_S2_PGD; i++) {
-                       if (KVM_PREALLOC_LEVEL == 1)
-                               pgd_populate(NULL, pgd + i,
-                                            (pud_t *)hwpgd + i * PTRS_PER_PUD);
-                       else if (KVM_PREALLOC_LEVEL == 2)
-                               pud_populate(NULL, pud_offset(pgd, 0) + i,
-                                            (pmd_t *)hwpgd + i * PTRS_PER_PMD);
-               }
-       } else {
-               /*
-                * Allocate actual first-level Stage-2 page table used by the
-                * hardware for Stage-2 page table walks.
-                */
-               pgd = (pgd_t *)hwpgd;
+       pgd = kvm_setup_fake_pgd(hwpgd);
+       if (IS_ERR(pgd)) {
+               kvm_free_hwpgd(hwpgd);
+               return PTR_ERR(pgd);
        }
 
        kvm_clean_pgd(pgd);
@@ -831,9 +800,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
 
        unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
        kvm_free_hwpgd(kvm_get_hwpgd(kvm));
-       if (KVM_PREALLOC_LEVEL > 0)
-               kfree(kvm->arch.pgd);
-
+       kvm_free_fake_pgd(kvm->arch.pgd);
        kvm->arch.pgd = NULL;
 }
 
index 22732a5e31197b4f653d58fb456e7ffb72b6e631..9a3409f7b37a3f928a5d877e7b9c0b15434955e7 100644 (file)
@@ -208,6 +208,49 @@ static inline unsigned int kvm_get_hwpgd_size(void)
        return PTRS_PER_S2_PGD * sizeof(pgd_t);
 }
 
+/*
+ * Allocate fake pgd for the host kernel page table macros to work.
+ * This is not used by the hardware and we have no alignment
+ * requirement for this allocation.
+ */
+static inline pgd_t *kvm_setup_fake_pgd(pgd_t *hwpgd)
+{
+       int i;
+       pgd_t *pgd;
+
+       if (!KVM_PREALLOC_LEVEL)
+               return hwpgd;
+
+       /*
+        * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
+        * the PMD and the kernel will use folded pud.
+        * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
+        * pages.
+        */
+
+       pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
+                       GFP_KERNEL | __GFP_ZERO);
+       if (!pgd)
+               return ERR_PTR(-ENOMEM);
+
+       /* Plug the HW PGD into the fake one. */
+       for (i = 0; i < PTRS_PER_S2_PGD; i++) {
+               if (KVM_PREALLOC_LEVEL == 1)
+                       pgd_populate(NULL, pgd + i,
+                                    (pud_t *)hwpgd + i * PTRS_PER_PUD);
+               else if (KVM_PREALLOC_LEVEL == 2)
+                       pud_populate(NULL, pud_offset(pgd, 0) + i,
+                                    (pmd_t *)hwpgd + i * PTRS_PER_PMD);
+       }
+
+       return pgd;
+}
+
+static inline void kvm_free_fake_pgd(pgd_t *pgd)
+{
+       if (KVM_PREALLOC_LEVEL > 0)
+               kfree(pgd);
+}
 static inline bool kvm_page_empty(void *ptr)
 {
        struct page *ptr_page = virt_to_page(ptr);