arm64/sme: Implement ZA context switching
authorMark Brown <broonie@kernel.org>
Tue, 19 Apr 2022 11:22:23 +0000 (12:22 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 22 Apr 2022 17:51:02 +0000 (18:51 +0100)
Allocate space for storing ZA on first access to SME and use that to save
and restore ZA state when context switching. We do this by using the vector
form of the LDR and STR ZA instructions, these do not require streaming
mode and have implementation recommendations that they avoid contention
issues in shared SMCU implementations.

Since ZA is architecturally guaranteed to be zeroed when enabled we do not
need to explicitly zero ZA, either we will be restoring from a saved copy
or trapping on first use of SME so we know that ZA must be disabled.

Signed-off-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20220419112247.711548-16-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/fpsimdmacros.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/processor.h
arch/arm64/kernel/entry-fpsimd.S
arch/arm64/kernel/fpsimd.c
arch/arm64/kvm/fpsimd.c

index cd94f5c5b5163a6f809e7437badc6023d8606f54..1a709c03bb6cdd16e598f5985e86271b0306a41c 100644 (file)
@@ -47,7 +47,8 @@ extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
 
 extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state,
                                     void *sve_state, unsigned int sve_vl,
-                                    unsigned int sme_vl, u64 *svcr);
+                                    void *za_state, unsigned int sme_vl,
+                                    u64 *svcr);
 
 extern void fpsimd_flush_task_state(struct task_struct *target);
 extern void fpsimd_save_and_flush_cpu_state(void);
@@ -90,6 +91,8 @@ extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
 extern unsigned int sve_get_vl(void);
 extern void sve_set_vq(unsigned long vq_minus_1);
 extern void sme_set_vq(unsigned long vq_minus_1);
+extern void za_save_state(void *state);
+extern void za_load_state(void const *state);
 
 struct arm64_cpu_capabilities;
 extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
index f6ab36e0cd8d4ff302c9e569e66b06a0062fa133..5e0910cf483216774bed55087db0462bacdcaa5a 100644 (file)
                ldr             w\nxtmp, [\xpfpsr, #4]
                msr             fpcr, x\nxtmp
 .endm
+
+.macro sme_save_za nxbase, xvl, nw
+       mov     w\nw, #0
+
+423:
+       _sme_str_zav \nw, \nxbase
+       add     x\nxbase, x\nxbase, \xvl
+       add     x\nw, x\nw, #1
+       cmp     \xvl, x\nw
+       bne     423b
+.endm
+
+.macro sme_load_za nxbase, xvl, nw
+       mov     w\nw, #0
+
+423:
+       _sme_ldr_zav \nw, \nxbase
+       add     x\nxbase, x\nxbase, \xvl
+       add     x\nw, x\nw, #1
+       cmp     \xvl, x\nw
+       bne     423b
+.endm
index 94a27a7520f4740e64e202599c11fa75441b4e44..8a7c442d5b57719fe85555e389cb09b5dec34ffb 100644 (file)
@@ -295,8 +295,11 @@ struct vcpu_reset_state {
 
 struct kvm_vcpu_arch {
        struct kvm_cpu_context ctxt;
+
+       /* Guest floating point state */
        void *sve_state;
        unsigned int sve_max_vl;
+       u64 svcr;
 
        /* Stage 2 paging state used by the hardware on next switch */
        struct kvm_s2_mmu *hw_mmu;
index 7542310b4e6b51e301939e690485470055926742..6a3a6c3dec90500e2d8255d5b2a623559141e901 100644 (file)
@@ -154,6 +154,7 @@ struct thread_struct {
 
        unsigned int            fpsimd_cpu;
        void                    *sve_state;     /* SVE registers, if any */
+       void                    *za_state;      /* ZA register, if any */
        unsigned int            vl[ARM64_VEC_MAX];      /* vector length */
        unsigned int            vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */
        unsigned long           fault_address;  /* fault info */
index 6f88c0f86d509d4833968c062516e7169b439544..229436f33df5affb02df5552f62adbe053b72eef 100644 (file)
@@ -99,4 +99,26 @@ SYM_FUNC_START(sme_set_vq)
        ret
 SYM_FUNC_END(sme_set_vq)
 
+/*
+ * Save the SME state
+ *
+ * x0 - pointer to buffer for state
+ */
+SYM_FUNC_START(za_save_state)
+       _sme_rdsvl      1, 1            // x1 = VL/8
+       sme_save_za 0, x1, 12
+       ret
+SYM_FUNC_END(za_save_state)
+
+/*
+ * Load the SME state
+ *
+ * x0 - pointer to buffer for state
+ */
+SYM_FUNC_START(za_load_state)
+       _sme_rdsvl      1, 1            // x1 = VL/8
+       sme_load_za 0, x1, 12
+       ret
+SYM_FUNC_END(za_load_state)
+
 #endif /* CONFIG_ARM64_SME */
index f8506a875eb2160fa24d04bd8473442e156e066c..dc38f3f2a28af14c2c3a8bef0be84df0887afa5a 100644 (file)
 struct fpsimd_last_state_struct {
        struct user_fpsimd_state *st;
        void *sve_state;
+       void *za_state;
        u64 *svcr;
        unsigned int sve_vl;
        unsigned int sme_vl;
@@ -387,11 +388,15 @@ static void task_fpsimd_load(void)
        if (system_supports_sme()) {
                unsigned long sme_vl = task_get_sme_vl(current);
 
+               /* Ensure VL is set up for restoring data */
                if (test_thread_flag(TIF_SME))
                        sme_set_vq(sve_vq_from_vl(sme_vl) - 1);
 
                write_sysreg_s(current->thread.svcr, SYS_SVCR_EL0);
 
+               if (thread_za_enabled(&current->thread))
+                       za_load_state(current->thread.za_state);
+
                if (thread_sm_enabled(&current->thread)) {
                        restore_sve_regs = true;
                        restore_ffr = system_supports_fa64();
@@ -441,11 +446,10 @@ static void fpsimd_save(void)
                u64 *svcr = last->svcr;
                *svcr = read_sysreg_s(SYS_SVCR_EL0);
 
-               if (thread_za_enabled(&current->thread)) {
-                       /* ZA state managment is not implemented yet */
-                       force_signal_inject(SIGKILL, SI_KERNEL, 0, 0);
-                       return;
-               }
+               *svcr = read_sysreg_s(SYS_SVCR_EL0);
+
+               if (*svcr & SYS_SVCR_EL0_ZA_MASK)
+                       za_save_state(last->za_state);
 
                /* If we are in streaming mode override regular SVE. */
                if (*svcr & SYS_SVCR_EL0_SM_MASK) {
@@ -1483,6 +1487,7 @@ static void fpsimd_bind_task_to_cpu(void)
        WARN_ON(!system_supports_fpsimd());
        last->st = &current->thread.uw.fpsimd_state;
        last->sve_state = current->thread.sve_state;
+       last->za_state = current->thread.za_state;
        last->sve_vl = task_get_sve_vl(current);
        last->sme_vl = task_get_sme_vl(current);
        last->svcr = &current->thread.svcr;
@@ -1500,8 +1505,8 @@ static void fpsimd_bind_task_to_cpu(void)
 }
 
 void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
-                             unsigned int sve_vl, unsigned int sme_vl,
-                             u64 *svcr)
+                             unsigned int sve_vl, void *za_state,
+                             unsigned int sme_vl, u64 *svcr)
 {
        struct fpsimd_last_state_struct *last =
                this_cpu_ptr(&fpsimd_last_state);
@@ -1512,6 +1517,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
        last->st = st;
        last->svcr = svcr;
        last->sve_state = sve_state;
+       last->za_state = za_state;
        last->sve_vl = sve_vl;
        last->sme_vl = sme_vl;
 }
index 394e583bb73e36651cac4bf4648ab1f5ca32f995..57d7ac3cfa0c8d92cfa18b04534eaa3a43ffbe80 100644 (file)
@@ -116,7 +116,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
                fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs,
                                         vcpu->arch.sve_state,
                                         vcpu->arch.sve_max_vl,
-                                        0, NULL);
+                                        NULL, 0, &vcpu->arch.svcr);
 
                clear_thread_flag(TIF_FOREIGN_FPSTATE);
                update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu));