arm64/sme: Early CPU setup for SME
authorMark Brown <broonie@kernel.org>
Tue, 19 Apr 2022 11:22:15 +0000 (12:22 +0100)
committerCatalin Marinas <catalin.marinas@arm.com>
Fri, 22 Apr 2022 17:50:47 +0000 (18:50 +0100)
SME requires similar setup to that for SVE: disable traps to EL2 and
make sure that the maximum vector length is available to EL1, for SME we
have two traps - one for SME itself and one for TPIDR2.

In addition since we currently make no active use of priority control
for SCMUs we map all SME priorities lower ELs may configure to 0, the
architecture specified minimum priority, to ensure that nothing we
manage is able to configure itself to consume excessive resources.  This
will need to be revisited should there be a need to manage SME
priorities at runtime.

Signed-off-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20220419112247.711548-8-broonie@kernel.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
arch/arm64/include/asm/el2_setup.h

index c31be7eda9df413aaef23c8a59a883efccf0047a..fabdbde0fe02d12d3ce4f29c88dd06ea62bb1827 100644 (file)
 .Lskip_sve_\@:
 .endm
 
+/* SME register access and priority mapping */
+.macro __init_el2_nvhe_sme
+       mrs     x1, id_aa64pfr1_el1
+       ubfx    x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
+       cbz     x1, .Lskip_sme_\@
+
+       bic     x0, x0, #CPTR_EL2_TSM           // Also disable SME traps
+       msr     cptr_el2, x0                    // Disable copro. traps to EL2
+       isb
+
+       mrs     x1, sctlr_el2
+       orr     x1, x1, #SCTLR_ELx_ENTP2        // Disable TPIDR2 traps
+       msr     sctlr_el2, x1
+       isb
+
+       mov     x1, #0                          // SMCR controls
+
+       mrs_s   x2, SYS_ID_AA64SMFR0_EL1
+       ubfx    x2, x2, #ID_AA64SMFR0_FA64_SHIFT, #1 // Full FP in SM?
+       cbz     x2, .Lskip_sme_fa64_\@
+
+       orr     x1, x1, SMCR_ELx_FA64_MASK
+.Lskip_sme_fa64_\@:
+
+       orr     x1, x1, #SMCR_ELx_LEN_MASK      // Enable full SME vector
+       msr_s   SYS_SMCR_EL2, x1                // length for EL1.
+
+       mrs_s   x1, SYS_SMIDR_EL1               // Priority mapping supported?
+       ubfx    x1, x1, #SYS_SMIDR_EL1_SMPS_SHIFT, #1
+       cbz     x1, .Lskip_sme_\@
+
+       msr_s   SYS_SMPRIMAP_EL2, xzr           // Make all priorities equal
+
+       mrs     x1, id_aa64mmfr1_el1            // HCRX_EL2 present?
+       ubfx    x1, x1, #ID_AA64MMFR1_HCX_SHIFT, #4
+       cbz     x1, .Lskip_sme_\@
+
+       mrs_s   x1, SYS_HCRX_EL2
+       orr     x1, x1, #HCRX_EL2_SMPME_MASK    // Enable priority mapping
+       msr_s   SYS_HCRX_EL2, x1
+
+.Lskip_sme_\@:
+.endm
+
 /* Disable any fine grained traps */
 .macro __init_el2_fgt
        mrs     x1, id_aa64mmfr0_el1
        mrs     x1, id_aa64dfr0_el1
        ubfx    x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4
        cmp     x1, #3
-       b.lt    .Lset_fgt_\@
+       b.lt    .Lset_debug_fgt_\@
        /* Disable PMSNEVFR_EL1 read and write traps */
        orr     x0, x0, #(1 << 62)
 
-.Lset_fgt_\@:
+.Lset_debug_fgt_\@:
        msr_s   SYS_HDFGRTR_EL2, x0
        msr_s   SYS_HDFGWTR_EL2, x0
-       msr_s   SYS_HFGRTR_EL2, xzr
-       msr_s   SYS_HFGWTR_EL2, xzr
+
+       mov     x0, xzr
+       mrs     x1, id_aa64pfr1_el1
+       ubfx    x1, x1, #ID_AA64PFR1_SME_SHIFT, #4
+       cbz     x1, .Lset_fgt_\@
+
+       /* Disable nVHE traps of TPIDR2 and SMPRI */
+       orr     x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
+       orr     x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
+
+.Lset_fgt_\@:
+       msr_s   SYS_HFGRTR_EL2, x0
+       msr_s   SYS_HFGWTR_EL2, x0
        msr_s   SYS_HFGITR_EL2, xzr
 
        mrs     x1, id_aa64pfr0_el1             // AMU traps UNDEF without AMU
        __init_el2_nvhe_idregs
        __init_el2_nvhe_cptr
        __init_el2_nvhe_sve
+       __init_el2_nvhe_sme
        __init_el2_fgt
        __init_el2_nvhe_prepare_eret
 .endm