KVM: arm64: nv: Enforce S2 alignment when contiguous bit is set
authorMarc Zyngier <maz@kernel.org>
Sat, 10 Aug 2024 17:42:41 +0000 (18:42 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 30 Aug 2024 11:04:20 +0000 (12:04 +0100)
Despite KVM not using the contiguous bit for anything related to
TLBs, the spec does require that the alignment defined by the
contiguous bit for the page size and the level is enforced.

Add the required checks to offset the point where PA and VA merge.

Fixes: 61e30b9eef7f ("KVM: arm64: nv: Implement nested Stage-2 page table walk logic")
Reported-by: Alexandru Elisei <alexandru.elisei@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_nested.h
arch/arm64/kvm/nested.c

index 5b06c31035a246b49acfa2cd2991d05df92a374d..6e163501f13ea1ba48208d97a2301cc346d50513 100644 (file)
@@ -205,4 +205,26 @@ static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
        return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
 }
 
+/* Adjust alignment for the contiguous bit as per StageOA() */
+#define contiguous_bit_shift(d, wi, l)                                 \
+       ({                                                              \
+               u8 shift = 0;                                           \
+                                                                       \
+               if ((d) & PTE_CONT) {                                   \
+                       switch (BIT((wi)->pgshift)) {                   \
+                       case SZ_4K:                                     \
+                               shift = 4;                              \
+                               break;                                  \
+                       case SZ_16K:                                    \
+                               shift = (l) == 2 ? 5 : 7;               \
+                               break;                                  \
+                       case SZ_64K:                                    \
+                               shift = 5;                              \
+                               break;                                  \
+                       }                                               \
+               }                                                       \
+                                                                       \
+               shift;                                                  \
+       })
+
 #endif /* __ARM64_KVM_NESTED_H */
index de789e0f1ae9cb6e1bc7ba0bd7bfe691df0143af..49a7832a3fb1d8c293405757fed7b624d652dd46 100644 (file)
@@ -282,11 +282,6 @@ static int walk_nested_s2_pgd(phys_addr_t ipa,
                return 1;
        }
 
-       /*
-        * We don't use the contiguous bit in the stage-2 ptes, so skip check
-        * for misprogramming of the contiguous bit.
-        */
-
        if (check_output_size(wi, desc)) {
                out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
                out->upper_attr = desc;
@@ -299,6 +294,8 @@ static int walk_nested_s2_pgd(phys_addr_t ipa,
                return 1;
        }
 
+       addr_bottom += contiguous_bit_shift(desc, wi, level);
+
        /* Calculate and return the result */
        paddr = (desc & GENMASK_ULL(47, addr_bottom)) |
                (ipa & GENMASK_ULL(addr_bottom - 1, 0));