KVM: PPC: Book3S HV Nested: Make nested HFSCR state accessible
authorNicholas Piggin <npiggin@gmail.com>
Wed, 11 Aug 2021 16:00:40 +0000 (02:00 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 25 Aug 2021 06:37:18 +0000 (16:37 +1000)
When the L0 runs a nested L2, there are several permutations of HFSCR
that can be relevant. The HFSCR that the L1 vcpu L1 requested, the
HFSCR that the L1 vcpu may use, and the HFSCR that is actually being
used to run the L2.

The L1 requested HFSCR is not accessible outside the nested hcall
handler, so copy that into a new kvm_nested_guest.hfscr field.

The permitted HFSCR is taken from the HFSCR that the L1 runs with,
which is also not accessible while the hcall is being made. Move
this into a new kvm_vcpu_arch.hfscr_permitted field.

These will be used by the next patch to improve facility handling
for nested guests, and later by facility demand faulting patches.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210811160134.904987-7-npiggin@gmail.com
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_nested.c

index eaf3a562bf1edfacac70936890a162fa5f65fb20..19b6942c6969a354610af046ba2b21b784be745f 100644 (file)
@@ -39,6 +39,7 @@ struct kvm_nested_guest {
        pgd_t *shadow_pgtable;          /* our page table for this guest */
        u64 l1_gr_to_hr;                /* L1's addr of part'n-scoped table */
        u64 process_table;              /* process table entry for this guest */
+       u64 hfscr;                      /* HFSCR that the L1 requested for this nested guest */
        long refcnt;                    /* number of pointers to this struct */
        struct mutex tlb_lock;          /* serialize page faults and tlbies */
        struct kvm_nested_guest *next;
index 9f52f282b1aa494eafa72e2dfd07d633d25e041a..a779f7849cfbbdd1168c8f89c1d0235174cb07c6 100644 (file)
@@ -811,6 +811,8 @@ struct kvm_vcpu_arch {
 
        u32 online;
 
+       u64 hfscr_permitted;    /* A mask of permitted HFSCR facilities */
+
        /* For support of nested guests */
        struct kvm_nested_guest *nested;
        u32 nested_vcpu_id;
index e7df8a3ca62c241578a8e20da31cd860faaa54ef..9d31267d26b35c60bddddb3458cbc6b91982a6f7 100644 (file)
@@ -2715,6 +2715,8 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
        if (cpu_has_feature(CPU_FTR_TM_COMP))
                vcpu->arch.hfscr |= HFSCR_TM;
 
+       vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
+
        kvmppc_mmu_book3s_hv_init(vcpu);
 
        vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
index 1eb4e989edc79dd94ec3f444f24c246d73b6161c..5ad5014c6f68be7d3985e00b585609680f55c9b2 100644 (file)
@@ -272,10 +272,10 @@ static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
                                      (vc->lpcr & ~mask) | (*lpcr & mask));
 
        /*
-        * Don't let L1 enable features for L2 which we've disabled for L1,
+        * Don't let L1 enable features for L2 which we don't allow for L1,
         * but preserve the interrupt cause field.
         */
-       vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
+       vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr_permitted);
 
        /* Don't let data address watchpoint match in hypervisor state */
        vcpu->arch.dawrx0 = l2_hv->dawrx0 & ~DAWRX_HYP;
@@ -362,6 +362,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
        /* set L1 state to L2 state */
        vcpu->arch.nested = l2;
        vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
+       l2->hfscr = l2_hv.hfscr;
        vcpu->arch.regs = l2_regs;
 
        /* Guest must always run with ME enabled, HV disabled. */