x86/tsc: Init the TSC for Secure TSC guests
authorNikunj A Dadhania <nikunj@amd.com>
Mon, 6 Jan 2025 12:46:30 +0000 (18:16 +0530)
committerBorislav Petkov (AMD) <bp@alien8.de>
Wed, 8 Jan 2025 20:26:19 +0000 (21:26 +0100)
Use the GUEST_TSC_FREQ MSR to discover the TSC frequency instead of
relying on kvm-clock based frequency calibration.  Override both CPU and
TSC frequency calibration callbacks with securetsc_get_tsc_khz(). Since
the difference between CPU base and TSC frequency does not apply in this
case, the same callback is being used.

  [ bp: Carve out from
    https://lore.kernel.org/r/20250106124633.1418972-11-nikunj@amd.com ]

Signed-off-by: Nikunj A Dadhania <nikunj@amd.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20250106124633.1418972-11-nikunj@amd.com
arch/x86/coco/sev/core.c
arch/x86/include/asm/sev.h
arch/x86/kernel/tsc.c

index 106bdeda58c51607ba3315810e273f3bfaa23cef..65d676c0f7bc843cf72023c7bf3bbf862552b7d5 100644 (file)
@@ -103,6 +103,7 @@ static u64 secrets_pa __ro_after_init;
  */
 static u64 snp_tsc_scale __ro_after_init;
 static u64 snp_tsc_offset __ro_after_init;
+static u64 snp_tsc_freq_khz __ro_after_init;
 
 /* #VC handler runtime per-CPU data */
 struct sev_es_runtime_data {
@@ -3278,3 +3279,23 @@ void __init snp_secure_tsc_prepare(void)
 
        pr_debug("SecureTSC enabled");
 }
+
+static unsigned long securetsc_get_tsc_khz(void)
+{
+       return snp_tsc_freq_khz;
+}
+
+void __init snp_secure_tsc_init(void)
+{
+       unsigned long long tsc_freq_mhz;
+
+       if (!cc_platform_has(CC_ATTR_GUEST_SNP_SECURE_TSC))
+               return;
+
+       setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+       rdmsrl(MSR_AMD64_GUEST_TSC_FREQ, tsc_freq_mhz);
+       snp_tsc_freq_khz = (unsigned long)(tsc_freq_mhz * 1000);
+
+       x86_platform.calibrate_cpu = securetsc_get_tsc_khz;
+       x86_platform.calibrate_tsc = securetsc_get_tsc_khz;
+}
index bdcdaac4df1c59af0e932617456d5c2e89fefc99..5d9685f92e5c36bb82472637c66150f6b6fce215 100644 (file)
@@ -482,6 +482,7 @@ int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req
                           struct snp_guest_request_ioctl *rio);
 
 void __init snp_secure_tsc_prepare(void);
+void __init snp_secure_tsc_init(void);
 
 #else  /* !CONFIG_AMD_MEM_ENCRYPT */
 
@@ -524,6 +525,7 @@ static inline void snp_msg_free(struct snp_msg_desc *mdesc) { }
 static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req,
                                         struct snp_guest_request_ioctl *rio) { return -ENODEV; }
 static inline void __init snp_secure_tsc_prepare(void) { }
+static inline void __init snp_secure_tsc_init(void) { }
 
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
index 67aeaba4ba9c86017e62ba26ea98cd1502987c79..0864b314c26a2d9d4f86dfcdf8cc37fcb96f583d 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/i8259.h>
 #include <asm/topology.h>
 #include <asm/uv/uv.h>
+#include <asm/sev.h>
 
 unsigned int __read_mostly cpu_khz;    /* TSC clocks / usec, not used here */
 EXPORT_SYMBOL(cpu_khz);
@@ -1515,6 +1516,9 @@ void __init tsc_early_init(void)
        /* Don't change UV TSC multi-chassis synchronization */
        if (is_early_uv_system())
                return;
+
+       snp_secure_tsc_init();
+
        if (!determine_cpu_tsc_frequencies(true))
                return;
        tsc_enable_sched_clock();