x86/bugs: Add a Transient Scheduler Attacks mitigation
authorBorislav Petkov (AMD) <bp@alien8.de>
Wed, 11 Sep 2024 08:53:08 +0000 (10:53 +0200)
committerBorislav Petkov (AMD) <bp@alien8.de>
Tue, 17 Jun 2025 15:17:02 +0000 (17:17 +0200)
Add the required features detection glue to bugs.c et all in order to
support the TSA mitigation.

Co-developed-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Kim Phillips <kim.phillips@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
13 files changed:
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/admin-guide/kernel-parameters.txt
arch/x86/Kconfig
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/scattered.c
arch/x86/kvm/svm/vmenter.S
drivers/base/cpu.c
include/linux/cpu.h

index bf85f4de6862b197b28231df42ff3a3db7d25e19..ab8cd337f43aaddbfa21bcd42b52448eb5bf65c2 100644 (file)
@@ -584,6 +584,7 @@ What:               /sys/devices/system/cpu/vulnerabilities
                /sys/devices/system/cpu/vulnerabilities/spectre_v1
                /sys/devices/system/cpu/vulnerabilities/spectre_v2
                /sys/devices/system/cpu/vulnerabilities/srbds
+               /sys/devices/system/cpu/vulnerabilities/tsa
                /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
 Date:          January 2018
 Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
index f1f2c0874da9ddfc95058c464fdf5dabaf0de713..07e22ba5bfe34354033127ca21031963cd11745b 100644 (file)
                        having this key zero'ed is acceptable. E.g. in testing
                        scenarios.
 
+       tsa=            [X86] Control mitigation for Transient Scheduler
+                       Attacks on AMD CPUs. Search the following in your
+                       favourite search engine for more details:
+
+                       "Technical guidance for mitigating transient scheduler
+                       attacks".
+
+                       off             - disable the mitigation
+                       on              - enable the mitigation (default)
+                       user            - mitigate only user/kernel transitions
+                       vm              - mitigate only guest/host transitions
+
+
        tsc=            Disable clocksource stability checks for TSC.
                        Format: <string>
                        [x86] reliable: mark tsc clocksource as reliable, this
index 340e5468980e070092990efb537dd37f24ddee69..71dfe7d7c786e0c7ccef54c202662605c097341a 100644 (file)
@@ -2695,6 +2695,15 @@ config MITIGATION_ITS
          disabled, mitigation cannot be enabled via cmdline.
          See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
 
+config MITIGATION_TSA
+       bool "Mitigate Transient Scheduler Attacks"
+       depends on CPU_SUP_AMD
+       default y
+       help
+         Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
+         security vulnerability on AMD CPUs which can lead to forwarding of
+         invalid info to subsequent instructions and thus can affect their
+         timing and thereby cause a leakage.
 endif
 
 config ARCH_HAS_ADD_PAGES
index ee176236c2be99086ac9df76cf3208001a4c1dae..286d509f9363bc069587aae29762ad7051059787 100644 (file)
 #define X86_FEATURE_NO_NESTED_DATA_BP  (20*32+ 0) /* No Nested Data Breakpoints */
 #define X86_FEATURE_WRMSR_XX_BASE_NS   (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
 #define X86_FEATURE_LFENCE_RDTSC       (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
+#define X86_FEATURE_VERW_CLEAR         (20*32+ 5) /* The memory form of VERW mitigates TSA */
 #define X86_FEATURE_NULL_SEL_CLR_BASE  (20*32+ 6) /* Null Selector Clears Base */
 #define X86_FEATURE_AUTOIBRS           (20*32+ 8) /* Automatic IBRS */
 #define X86_FEATURE_NO_SMM_CTL_MSR     (20*32+ 9) /* SMM_CTL MSR is not present */
 #define X86_FEATURE_PREFER_YMM         (21*32+ 8) /* Avoid ZMM registers due to downclocking */
 #define X86_FEATURE_APX                        (21*32+ 9) /* Advanced Performance Extensions */
 #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */
+#define X86_FEATURE_TSA_SQ_NO          (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
+#define X86_FEATURE_TSA_L1_NO          (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
+#define X86_FEATURE_CLEAR_CPU_BUF_VM   (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
 
 /*
  * BUG word(s)
 #define X86_BUG_OLD_MICROCODE          X86_BUG( 1*32+ 6) /* "old_microcode" CPU has old microcode, it is surely vulnerable to something */
 #define X86_BUG_ITS                    X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
 #define X86_BUG_ITS_NATIVE_ONLY                X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
-
+#define X86_BUG_TSA                    X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
 #endif /* _ASM_X86_CPUFEATURES_H */
index cc34c3fd197b5381fe1418d8231e80d22619edb2..82bd9eb73b3ca644f47f4609d235814908a7b174 100644 (file)
@@ -80,7 +80,7 @@ static __always_inline void __mwait(u32 eax, u32 ecx)
  */
 static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
 {
-       /* No MDS buffer clear as this is AMD/HYGON only */
+       /* No need for TSA buffer clearing on AMD */
 
        /* "mwaitx %eax, %ebx, %ecx" */
        asm volatile(".byte 0x0f, 0x01, 0xfb"
index 5dcd75bb5e0ddc1a011ae29dd201890f5ac20780..10f261678749a717328cb1a3df3ff6bd89260f44 100644 (file)
  * CFLAGS.ZF.
  * Note: Only the memory operand variant of VERW clears the CPU buffers.
  */
-.macro CLEAR_CPU_BUFFERS
+.macro __CLEAR_CPU_BUFFERS feature
 #ifdef CONFIG_X86_64
-       ALTERNATIVE "", "verw x86_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF
+       ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
 #else
        /*
         * In 32bit mode, the memory operand must be a %cs reference. The data
         * segments may not be usable (vm86 mode), and the stack segment may not
         * be flat (ESPFIX32).
         */
-       ALTERNATIVE "", "verw %cs:x86_verw_sel", X86_FEATURE_CLEAR_CPU_BUF
+       ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
 #endif
 .endm
 
+#define CLEAR_CPU_BUFFERS \
+       __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
+
+#define VM_CLEAR_CPU_BUFFERS \
+       __CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
+
 #ifdef CONFIG_X86_64
 .macro CLEAR_BRANCH_HISTORY
        ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
@@ -602,7 +608,7 @@ static __always_inline void x86_clear_cpu_buffers(void)
 
 /**
  * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
- * vulnerability
+ * and TSA vulnerabilities.
  *
  * Clear CPU buffers if the corresponding static key is enabled
  */
index 93da466dfe2cb5b057291c1e3cc29b7706a29f79..23c535871a7e359d0b7d9d517606ea1ab80d25b0 100644 (file)
@@ -377,6 +377,47 @@ static void bsp_determine_snp(struct cpuinfo_x86 *c)
 #endif
 }
 
+#define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \
+       X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \
+                           step, step, ucode)
+
+static const struct x86_cpu_id amd_tsa_microcode[] = {
+       ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008),
+       ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008),
+       ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216),
+       {},
+};
+
+static void tsa_init(struct cpuinfo_x86 *c)
+{
+       if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+               return;
+
+       if (cpu_has(c, X86_FEATURE_ZEN3) ||
+           cpu_has(c, X86_FEATURE_ZEN4)) {
+               if (x86_match_min_microcode_rev(amd_tsa_microcode))
+                       setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
+               else
+                       pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode);
+       } else {
+               setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
+               setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
+       }
+}
+
 static void bsp_init_amd(struct cpuinfo_x86 *c)
 {
        if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@@ -489,6 +530,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
        }
 
        bsp_determine_snp(c);
+
+       tsa_init(c);
+
        return;
 
 warn:
index 258ed3d2b6a9730d96f78ed07e5b8931ebd51a0b..f4d3abb12317a7c93029c3a457c7a3d53e3d3b96 100644 (file)
@@ -94,6 +94,8 @@ static void __init bhi_apply_mitigation(void);
 static void __init its_select_mitigation(void);
 static void __init its_update_mitigation(void);
 static void __init its_apply_mitigation(void);
+static void __init tsa_select_mitigation(void);
+static void __init tsa_apply_mitigation(void);
 
 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
 u64 x86_spec_ctrl_base;
@@ -225,6 +227,7 @@ void __init cpu_select_mitigations(void)
        gds_select_mitigation();
        its_select_mitigation();
        bhi_select_mitigation();
+       tsa_select_mitigation();
 
        /*
         * After mitigations are selected, some may need to update their
@@ -272,6 +275,7 @@ void __init cpu_select_mitigations(void)
        gds_apply_mitigation();
        its_apply_mitigation();
        bhi_apply_mitigation();
+       tsa_apply_mitigation();
 }
 
 /*
@@ -1487,6 +1491,94 @@ static void __init its_apply_mitigation(void)
        set_return_thunk(its_return_thunk);
 }
 
+#undef pr_fmt
+#define pr_fmt(fmt)    "Transient Scheduler Attacks: " fmt
+
+enum tsa_mitigations {
+       TSA_MITIGATION_NONE,
+       TSA_MITIGATION_AUTO,
+       TSA_MITIGATION_UCODE_NEEDED,
+       TSA_MITIGATION_USER_KERNEL,
+       TSA_MITIGATION_VM,
+       TSA_MITIGATION_FULL,
+};
+
+static const char * const tsa_strings[] = {
+       [TSA_MITIGATION_NONE]           = "Vulnerable",
+       [TSA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: No microcode",
+       [TSA_MITIGATION_USER_KERNEL]    = "Mitigation: Clear CPU buffers: user/kernel boundary",
+       [TSA_MITIGATION_VM]             = "Mitigation: Clear CPU buffers: VM",
+       [TSA_MITIGATION_FULL]           = "Mitigation: Clear CPU buffers",
+};
+
+static enum tsa_mitigations tsa_mitigation __ro_after_init =
+       IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
+
+static int __init tsa_parse_cmdline(char *str)
+{
+       if (!str)
+               return -EINVAL;
+
+       if (!strcmp(str, "off"))
+               tsa_mitigation = TSA_MITIGATION_NONE;
+       else if (!strcmp(str, "on"))
+               tsa_mitigation = TSA_MITIGATION_FULL;
+       else if (!strcmp(str, "user"))
+               tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
+       else if (!strcmp(str, "vm"))
+               tsa_mitigation = TSA_MITIGATION_VM;
+       else
+               pr_err("Ignoring unknown tsa=%s option.\n", str);
+
+       return 0;
+}
+early_param("tsa", tsa_parse_cmdline);
+
+static void __init tsa_select_mitigation(void)
+{
+       if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
+               tsa_mitigation = TSA_MITIGATION_NONE;
+               return;
+       }
+
+       if (tsa_mitigation == TSA_MITIGATION_NONE)
+               return;
+
+       if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) {
+               tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
+               goto out;
+       }
+
+       if (tsa_mitigation == TSA_MITIGATION_AUTO)
+               tsa_mitigation = TSA_MITIGATION_FULL;
+
+       /*
+        * No need to set verw_clear_cpu_buf_mitigation_selected - it
+        * doesn't fit all cases here and it is not needed because this
+        * is the only VERW-based mitigation on AMD.
+        */
+out:
+       pr_info("%s\n", tsa_strings[tsa_mitigation]);
+}
+
+static void __init tsa_apply_mitigation(void)
+{
+       switch (tsa_mitigation) {
+       case TSA_MITIGATION_USER_KERNEL:
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+               break;
+       case TSA_MITIGATION_VM:
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
+               break;
+       case TSA_MITIGATION_FULL:
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
+               setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
+               break;
+       default:
+               break;
+       }
+}
+
 #undef pr_fmt
 #define pr_fmt(fmt)     "Spectre V2 : " fmt
 
@@ -2316,6 +2408,25 @@ void cpu_bugs_smt_update(void)
                break;
        }
 
+       switch (tsa_mitigation) {
+       case TSA_MITIGATION_USER_KERNEL:
+       case TSA_MITIGATION_VM:
+       case TSA_MITIGATION_AUTO:
+       case TSA_MITIGATION_FULL:
+               /*
+                * TSA-SQ can potentially lead to info leakage between
+                * SMT threads.
+                */
+               if (sched_smt_active())
+                       static_branch_enable(&cpu_buf_idle_clear);
+               else
+                       static_branch_disable(&cpu_buf_idle_clear);
+               break;
+       case TSA_MITIGATION_NONE:
+       case TSA_MITIGATION_UCODE_NEEDED:
+               break;
+       }
+
        mutex_unlock(&spec_ctrl_mutex);
 }
 
@@ -3265,6 +3376,11 @@ static ssize_t gds_show_state(char *buf)
        return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
 }
 
+static ssize_t tsa_show_state(char *buf)
+{
+       return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
+}
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
 {
@@ -3328,6 +3444,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
        case X86_BUG_ITS:
                return its_show_state(buf);
 
+       case X86_BUG_TSA:
+               return tsa_show_state(buf);
+
        default:
                break;
        }
@@ -3414,6 +3533,11 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att
 {
        return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
 }
+
+ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
+}
 #endif
 
 void __warn_thunk(void)
index 8feb8fd2957adb13b4f8ce77425809a13c9681fe..f7b9fca82bdac4e35d334a05cea193ac95c30c20 100644 (file)
@@ -1233,6 +1233,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
 #define ITS            BIT(8)
 /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
 #define ITS_NATIVE_ONLY        BIT(9)
+/* CPU is affected by Transient Scheduler Attacks */
+#define TSA            BIT(10)
 
 static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE,          X86_STEP_MAX,      SRBDS),
@@ -1280,7 +1282,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
        VULNBL_AMD(0x16, RETBLEED),
        VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
        VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
-       VULNBL_AMD(0x19, SRSO),
+       VULNBL_AMD(0x19, SRSO | TSA),
        VULNBL_AMD(0x1a, SRSO),
        {}
 };
@@ -1530,6 +1532,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                        setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
        }
 
+       if (c->x86_vendor == X86_VENDOR_AMD) {
+               if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
+                   !cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
+                       if (cpu_matches(cpu_vuln_blacklist, TSA) ||
+                           /* Enable bug on Zen guests to allow for live migration. */
+                           (cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
+                               setup_force_cpu_bug(X86_BUG_TSA);
+               }
+       }
+
        if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
                return;
 
index dbf6d71bdf18b48154cd4788843dddab091f9080..b4a1f6732a3aad4d2dc760bb35f30cde27c9a759 100644 (file)
@@ -50,6 +50,8 @@ static const struct cpuid_bit cpuid_bits[] = {
        { X86_FEATURE_MBA,                      CPUID_EBX,  6, 0x80000008, 0 },
        { X86_FEATURE_SMBA,                     CPUID_EBX,  2, 0x80000020, 0 },
        { X86_FEATURE_BMEC,                     CPUID_EBX,  3, 0x80000020, 0 },
+       { X86_FEATURE_TSA_SQ_NO,                CPUID_ECX,  1, 0x80000021, 0 },
+       { X86_FEATURE_TSA_L1_NO,                CPUID_ECX,  2, 0x80000021, 0 },
        { X86_FEATURE_AMD_WORKLOAD_CLASS,       CPUID_EAX, 22, 0x80000021, 0 },
        { X86_FEATURE_PERFMON_V2,               CPUID_EAX,  0, 0x80000022, 0 },
        { X86_FEATURE_AMD_LBR_V2,               CPUID_EAX,  1, 0x80000022, 0 },
index 0c61153b275f64e527b4a9e39857c2f95e848de5..235c4af6b692a4618ad24e530eb3a19672f69a38 100644 (file)
@@ -169,6 +169,9 @@ SYM_FUNC_START(__svm_vcpu_run)
 #endif
        mov VCPU_RDI(%_ASM_DI), %_ASM_DI
 
+       /* Clobbers EFLAGS.ZF */
+       VM_CLEAR_CPU_BUFFERS
+
        /* Enter guest mode */
 3:     vmrun %_ASM_AX
 4:
@@ -335,6 +338,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
        mov SVM_current_vmcb(%rdi), %rax
        mov KVM_VMCB_pa(%rax), %rax
 
+       /* Clobbers EFLAGS.ZF */
+       VM_CLEAR_CPU_BUFFERS
+
        /* Enter guest mode */
 1:     vmrun %rax
 2:
index 7779ab0ca7ce62b92e61f65e368fdb7b75e0384b..efc575a00edda96cd579528ef2d6080f52d4e812 100644 (file)
@@ -602,6 +602,7 @@ CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
 CPU_SHOW_VULN_FALLBACK(ghostwrite);
 CPU_SHOW_VULN_FALLBACK(old_microcode);
 CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
+CPU_SHOW_VULN_FALLBACK(tsa);
 
 static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
 static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -620,6 +621,7 @@ static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling
 static DEVICE_ATTR(ghostwrite, 0444, cpu_show_ghostwrite, NULL);
 static DEVICE_ATTR(old_microcode, 0444, cpu_show_old_microcode, NULL);
 static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
+static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
 
 static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_meltdown.attr,
@@ -639,6 +641,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
        &dev_attr_ghostwrite.attr,
        &dev_attr_old_microcode.attr,
        &dev_attr_indirect_target_selection.attr,
+       &dev_attr_tsa.attr,
        NULL
 };
 
index 96a3a0d6a60edc27db4c2b83448002782ca74c5f..6378370a952f656562ed7daf6e95cb4d3d226b65 100644 (file)
@@ -82,6 +82,7 @@ extern ssize_t cpu_show_old_microcode(struct device *dev,
                                      struct device_attribute *attr, char *buf);
 extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
                                                  struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
 
 extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,