arm64/fpsimd: Support FEAT_FPMR
authorMark Brown <broonie@kernel.org>
Wed, 6 Mar 2024 23:14:48 +0000 (23:14 +0000)
committerCatalin Marinas <catalin.marinas@arm.com>
Thu, 7 Mar 2024 17:14:53 +0000 (17:14 +0000)
FEAT_FPMR defines a new EL0 accessible register FPMR use to configure the
FP8 related features added to the architecture at the same time. Detect
support for this register and context switch it for EL0 when present.

Due to the sharing of responsibility for saving floating point state
between the host kernel and KVM FP8 support is not yet implemented in KVM
and a stub similar to that used for SVCR is provided for FPMR in order to
avoid bisection issues. To make it easier to share host state with the
hypervisor we store FPMR as a hardened usercopy field in uw (along with
some padding).

Signed-off-by: Mark Brown <broonie@kernel.org>
Link: https://lore.kernel.org/r/20240306-arm64-2023-dpisa-v5-3-c568edc8ed7f@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/fpsimd.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/processor.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kvm/fpsimd.c
arch/arm64/tools/cpucaps

index 21c824edf8ce4a6fa1c208b7a882df4b4e31eeb7..34fcdbc65d7d5684ed487d084839ab41561179e2 100644 (file)
@@ -768,6 +768,11 @@ static __always_inline bool system_supports_tpidr2(void)
        return system_supports_sme();
 }
 
+static __always_inline bool system_supports_fpmr(void)
+{
+       return alternative_has_cap_unlikely(ARM64_HAS_FPMR);
+}
+
 static __always_inline bool system_supports_cnp(void)
 {
        return alternative_has_cap_unlikely(ARM64_HAS_CNP);
index 50e5f25d3024ced8b3c107de352a69e65b6ab7f6..74afca3bd312c69b513a5af177032ef037f9b6cb 100644 (file)
@@ -89,6 +89,7 @@ struct cpu_fp_state {
        void *sve_state;
        void *sme_state;
        u64 *svcr;
+       u64 *fpmr;
        unsigned int sve_vl;
        unsigned int sme_vl;
        enum fp_type *fp_type;
@@ -154,6 +155,7 @@ extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
 extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
 extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
 extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
+extern void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__unused);
 
 extern u64 read_smcr_features(void);
 
index 21c57b812569f22532bd57c7fb17af669d3eb370..b779cbc2211cec9aaf1cbc206b5276c6e44f0c33 100644 (file)
@@ -543,6 +543,7 @@ struct kvm_vcpu_arch {
        enum fp_type fp_type;
        unsigned int sve_max_vl;
        u64 svcr;
+       u64 fpmr;
 
        /* Stage 2 paging state used by the hardware on next switch */
        struct kvm_s2_mmu *hw_mmu;
index 5b0a04810b236b2b1a903547ca3d12f9d32b6675..f77371232d8c6d542c7df057feea2e21752f34f2 100644 (file)
@@ -155,6 +155,8 @@ struct thread_struct {
        struct {
                unsigned long   tp_value;       /* TLS register */
                unsigned long   tp2_value;
+               u64             fpmr;
+               unsigned long   pad;
                struct user_fpsimd_state fpsimd_state;
        } uw;
 
@@ -253,6 +255,8 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
        BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
                     sizeof_field(struct thread_struct, uw.tp_value) +
                     sizeof_field(struct thread_struct, uw.tp2_value) +
+                    sizeof_field(struct thread_struct, uw.fpmr) +
+                    sizeof_field(struct thread_struct, uw.pad) +
                     sizeof_field(struct thread_struct, uw.fpsimd_state));
 
        *offset = offsetof(struct thread_struct, uw);
index eae59ec0f4b0b832d9ef98ebd7916616cdfdafd0..0263565f617aeb6707b232f2265f914f0881c0ef 100644 (file)
@@ -272,6 +272,7 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = {
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
@@ -2767,6 +2768,14 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .type = ARM64_CPUCAP_SYSTEM_FEATURE,
                .matches = has_lpa2,
        },
+       {
+               .desc = "FPMR",
+               .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+               .capability = ARM64_HAS_FPMR,
+               .matches = has_cpuid_feature,
+               .cpu_enable = cpu_enable_fpmr,
+               ARM64_CPUID_FIELDS(ID_AA64PFR2_EL1, FPMR, IMP)
+       },
        {},
 };
 
index a5dc6f764195847251dc25c196304cbef44d8850..8e24b5e5e192d51052ff239b7381d91d424b9975 100644 (file)
@@ -359,6 +359,9 @@ static void task_fpsimd_load(void)
        WARN_ON(preemptible());
        WARN_ON(test_thread_flag(TIF_KERNEL_FPSTATE));
 
+       if (system_supports_fpmr())
+               write_sysreg_s(current->thread.uw.fpmr, SYS_FPMR);
+
        if (system_supports_sve() || system_supports_sme()) {
                switch (current->thread.fp_type) {
                case FP_STATE_FPSIMD:
@@ -446,6 +449,9 @@ static void fpsimd_save_user_state(void)
        if (test_thread_flag(TIF_FOREIGN_FPSTATE))
                return;
 
+       if (system_supports_fpmr())
+               *(last->fpmr) = read_sysreg_s(SYS_FPMR);
+
        /*
         * If a task is in a syscall the ABI allows us to only
         * preserve the state shared with FPSIMD so don't bother
@@ -688,6 +694,12 @@ static void sve_to_fpsimd(struct task_struct *task)
        }
 }
 
+void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__always_unused p)
+{
+       write_sysreg_s(read_sysreg_s(SYS_SCTLR_EL1) | SCTLR_EL1_EnFPM_MASK,
+                      SYS_SCTLR_EL1);
+}
+
 #ifdef CONFIG_ARM64_SVE
 /*
  * Call __sve_free() directly only if you know task can't be scheduled
@@ -1680,6 +1692,7 @@ static void fpsimd_bind_task_to_cpu(void)
        last->sve_vl = task_get_sve_vl(current);
        last->sme_vl = task_get_sme_vl(current);
        last->svcr = &current->thread.svcr;
+       last->fpmr = &current->thread.uw.fpmr;
        last->fp_type = &current->thread.fp_type;
        last->to_save = FP_STATE_CURRENT;
        current->thread.fpsimd_cpu = smp_processor_id();
index 8c1d0d4853df48abf4d089bbde153bcee8d0e6d0..e3e611e30e916af5687a60b12c0898dac86cef16 100644 (file)
@@ -153,6 +153,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
                fp_state.sve_vl = vcpu->arch.sve_max_vl;
                fp_state.sme_state = NULL;
                fp_state.svcr = &vcpu->arch.svcr;
+               fp_state.fpmr = &vcpu->arch.fpmr;
                fp_state.fp_type = &vcpu->arch.fp_type;
 
                if (vcpu_has_sve(vcpu))
index b912b1409fc09aaf08705b8d75b5d221ae0d020e..63283550c8e8670f62847032c3fc8447b490bdff 100644 (file)
@@ -26,6 +26,7 @@ HAS_ECV
 HAS_ECV_CNTPOFF
 HAS_EPAN
 HAS_EVT
+HAS_FPMR
 HAS_FGT
 HAS_FPSIMD
 HAS_GENERIC_AUTH