1 // SPDX-License-Identifier: GPL-2.0
3 * AMD Encrypted Register State Support
5 * Author: Joerg Roedel <jroedel@suse.de>
7 * This file is not compiled stand-alone. It contains code shared
8 * between the pre-decompression boot code and the running Linux kernel
9 * and is included directly into both code-bases.
12 #include <asm/setup_data.h>
14 #ifndef __BOOT_COMPRESSED
15 #define error(v) pr_err(v)
16 #define has_cpuflag(f) boot_cpu_has(f)
17 #define sev_printk(fmt, ...) printk(fmt, ##__VA_ARGS__)
18 #define sev_printk_rtl(fmt, ...) printk_ratelimited(fmt, ##__VA_ARGS__)
21 #define WARN(condition, format...) (!!(condition))
22 #define sev_printk(fmt, ...)
23 #define sev_printk_rtl(fmt, ...)
26 /* I/O parameters for CPUID-related helpers */
37 * Individual entries of the SNP CPUID table, as defined by the SNP
38 * Firmware ABI, Revision 0.9, Section 7.1, Table 14.
53 * SNP CPUID table, as defined by the SNP Firmware ABI, Revision 0.9,
54 * Section 8.14.2.6. Also noted there is the SNP firmware-enforced limit
55 * of 64 entries per CPUID table.
57 #define SNP_CPUID_COUNT_MAX 64
59 struct snp_cpuid_table {
63 struct snp_cpuid_fn fn[SNP_CPUID_COUNT_MAX];
67 * Since feature negotiation related variables are set early in the boot
68 * process they must reside in the .data section so as not to be zeroed
69 * out when the .bss section is later cleared.
71 * GHCB protocol version negotiated with the hypervisor.
73 static u16 ghcb_version __ro_after_init;
75 /* Copy of the SNP firmware's CPUID page. */
76 static struct snp_cpuid_table cpuid_table_copy __ro_after_init;
79 * These will be initialized based on CPUID table so that non-present
80 * all-zero leaves (for sparse tables) can be differentiated from
81 * invalid/out-of-range leaves. This is needed since all-zero leaves
82 * still need to be post-processed.
84 static u32 cpuid_std_range_max __ro_after_init;
85 static u32 cpuid_hyp_range_max __ro_after_init;
86 static u32 cpuid_ext_range_max __ro_after_init;
88 static bool __init sev_es_check_cpu_features(void)
90 if (!has_cpuflag(X86_FEATURE_RDRAND)) {
91 error("RDRAND instruction not supported - no trusted source of randomness available\n");
98 static void __head __noreturn
99 sev_es_terminate(unsigned int set, unsigned int reason)
101 u64 val = GHCB_MSR_TERM_REQ;
103 /* Tell the hypervisor what went wrong. */
104 val |= GHCB_SEV_TERM_REASON(set, reason);
106 /* Request Guest Termination from Hypervisor */
107 sev_es_wr_ghcb_msr(val);
111 asm volatile("hlt\n" : : : "memory");
115 * The hypervisor features are available from GHCB version 2 onward.
117 static u64 get_hv_features(void)
121 if (ghcb_version < 2)
124 sev_es_wr_ghcb_msr(GHCB_MSR_HV_FT_REQ);
127 val = sev_es_rd_ghcb_msr();
128 if (GHCB_RESP_CODE(val) != GHCB_MSR_HV_FT_RESP)
131 return GHCB_MSR_HV_FT_RESP_VAL(val);
134 static void snp_register_ghcb_early(unsigned long paddr)
136 unsigned long pfn = paddr >> PAGE_SHIFT;
139 sev_es_wr_ghcb_msr(GHCB_MSR_REG_GPA_REQ_VAL(pfn));
142 val = sev_es_rd_ghcb_msr();
144 /* If the response GPA is not ours then abort the guest */
145 if ((GHCB_RESP_CODE(val) != GHCB_MSR_REG_GPA_RESP) ||
146 (GHCB_MSR_REG_GPA_RESP_VAL(val) != pfn))
147 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_REGISTER);
150 static bool sev_es_negotiate_protocol(void)
154 /* Do the GHCB protocol version negotiation */
155 sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
157 val = sev_es_rd_ghcb_msr();
159 if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
162 if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTOCOL_MIN ||
163 GHCB_MSR_PROTO_MIN(val) > GHCB_PROTOCOL_MAX)
166 ghcb_version = min_t(size_t, GHCB_MSR_PROTO_MAX(val), GHCB_PROTOCOL_MAX);
171 static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
173 ghcb->save.sw_exit_code = 0;
174 __builtin_memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
177 static bool vc_decoding_needed(unsigned long exit_code)
179 /* Exceptions don't require to decode the instruction */
180 return !(exit_code >= SVM_EXIT_EXCP_BASE &&
181 exit_code <= SVM_EXIT_LAST_EXCP);
184 static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt,
185 struct pt_regs *regs,
186 unsigned long exit_code)
188 enum es_result ret = ES_OK;
190 memset(ctxt, 0, sizeof(*ctxt));
193 if (vc_decoding_needed(exit_code))
194 ret = vc_decode_insn(ctxt);
199 static void vc_finish_insn(struct es_em_ctxt *ctxt)
201 ctxt->regs->ip += ctxt->insn.length;
204 static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
208 ret = ghcb->save.sw_exit_info_1 & GENMASK_ULL(31, 0);
213 u64 info = ghcb->save.sw_exit_info_2;
214 unsigned long v = info & SVM_EVTINJ_VEC_MASK;
216 /* Check if exception information from hypervisor is sane. */
217 if ((info & SVM_EVTINJ_VALID) &&
218 ((v == X86_TRAP_GP) || (v == X86_TRAP_UD)) &&
219 ((info & SVM_EVTINJ_TYPE_MASK) == SVM_EVTINJ_TYPE_EXEPT)) {
222 if (info & SVM_EVTINJ_VALID_ERR)
223 ctxt->fi.error_code = info >> 32;
232 static enum es_result sev_es_ghcb_hv_call(struct ghcb *ghcb,
233 struct es_em_ctxt *ctxt,
234 u64 exit_code, u64 exit_info_1,
237 /* Fill in protocol and format specifiers */
238 ghcb->protocol_version = ghcb_version;
239 ghcb->ghcb_usage = GHCB_DEFAULT_USAGE;
241 ghcb_set_sw_exit_code(ghcb, exit_code);
242 ghcb_set_sw_exit_info_1(ghcb, exit_info_1);
243 ghcb_set_sw_exit_info_2(ghcb, exit_info_2);
245 sev_es_wr_ghcb_msr(__pa(ghcb));
248 return verify_exception_info(ghcb, ctxt);
251 static int __sev_cpuid_hv(u32 fn, int reg_idx, u32 *reg)
255 sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, reg_idx));
257 val = sev_es_rd_ghcb_msr();
258 if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
266 static int __sev_cpuid_hv_msr(struct cpuid_leaf *leaf)
271 * MSR protocol does not support fetching non-zero subfunctions, but is
272 * sufficient to handle current early-boot cases. Should that change,
273 * make sure to report an error rather than ignoring the index and
274 * grabbing random values. If this issue arises in the future, handling
275 * can be added here to use GHCB-page protocol for cases that occur late
276 * enough in boot that GHCB page is available.
278 if (cpuid_function_is_indexed(leaf->fn) && leaf->subfn)
281 ret = __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EAX, &leaf->eax);
282 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EBX, &leaf->ebx);
283 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_ECX, &leaf->ecx);
284 ret = ret ? : __sev_cpuid_hv(leaf->fn, GHCB_CPUID_REQ_EDX, &leaf->edx);
289 static int __sev_cpuid_hv_ghcb(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
291 u32 cr4 = native_read_cr4();
294 ghcb_set_rax(ghcb, leaf->fn);
295 ghcb_set_rcx(ghcb, leaf->subfn);
297 if (cr4 & X86_CR4_OSXSAVE)
298 /* Safe to read xcr0 */
299 ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
301 /* xgetbv will cause #UD - use reset value for xcr0 */
302 ghcb_set_xcr0(ghcb, 1);
304 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
308 if (!(ghcb_rax_is_valid(ghcb) &&
309 ghcb_rbx_is_valid(ghcb) &&
310 ghcb_rcx_is_valid(ghcb) &&
311 ghcb_rdx_is_valid(ghcb)))
314 leaf->eax = ghcb->save.rax;
315 leaf->ebx = ghcb->save.rbx;
316 leaf->ecx = ghcb->save.rcx;
317 leaf->edx = ghcb->save.rdx;
322 static int sev_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
324 return ghcb ? __sev_cpuid_hv_ghcb(ghcb, ctxt, leaf)
325 : __sev_cpuid_hv_msr(leaf);
329 * This may be called early while still running on the initial identity
330 * mapping. Use RIP-relative addressing to obtain the correct address
331 * while running with the initial identity mapping as well as the
332 * switch-over to kernel virtual addresses later.
334 static const struct snp_cpuid_table *snp_cpuid_get_table(void)
336 return &RIP_REL_REF(cpuid_table_copy);
340 * The SNP Firmware ABI, Revision 0.9, Section 7.1, details the use of
341 * XCR0_IN and XSS_IN to encode multiple versions of 0xD subfunctions 0
342 * and 1 based on the corresponding features enabled by a particular
343 * combination of XCR0 and XSS registers so that a guest can look up the
344 * version corresponding to the features currently enabled in its XCR0/XSS
345 * registers. The only values that differ between these versions/table
346 * entries is the enabled XSAVE area size advertised via EBX.
348 * While hypervisors may choose to make use of this support, it is more
349 * robust/secure for a guest to simply find the entry corresponding to the
350 * base/legacy XSAVE area size (XCR0=1 or XCR0=3), and then calculate the
351 * XSAVE area size using subfunctions 2 through 64, as documented in APM
352 * Volume 3, Rev 3.31, Appendix E.3.8, which is what is done here.
354 * Since base/legacy XSAVE area size is documented as 0x240, use that value
355 * directly rather than relying on the base size in the CPUID table.
357 * Return: XSAVE area size on success, 0 otherwise.
359 static u32 snp_cpuid_calc_xsave_size(u64 xfeatures_en, bool compacted)
361 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
362 u64 xfeatures_found = 0;
363 u32 xsave_size = 0x240;
366 for (i = 0; i < cpuid_table->count; i++) {
367 const struct snp_cpuid_fn *e = &cpuid_table->fn[i];
369 if (!(e->eax_in == 0xD && e->ecx_in > 1 && e->ecx_in < 64))
371 if (!(xfeatures_en & (BIT_ULL(e->ecx_in))))
373 if (xfeatures_found & (BIT_ULL(e->ecx_in)))
376 xfeatures_found |= (BIT_ULL(e->ecx_in));
379 xsave_size += e->eax;
381 xsave_size = max(xsave_size, e->eax + e->ebx);
385 * Either the guest set unsupported XCR0/XSS bits, or the corresponding
386 * entries in the CPUID table were not present. This is not a valid
389 if (xfeatures_found != (xfeatures_en & GENMASK_ULL(63, 2)))
396 snp_cpuid_get_validated_func(struct cpuid_leaf *leaf)
398 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
401 for (i = 0; i < cpuid_table->count; i++) {
402 const struct snp_cpuid_fn *e = &cpuid_table->fn[i];
404 if (e->eax_in != leaf->fn)
407 if (cpuid_function_is_indexed(leaf->fn) && e->ecx_in != leaf->subfn)
411 * For 0xD subfunctions 0 and 1, only use the entry corresponding
412 * to the base/legacy XSAVE area size (XCR0=1 or XCR0=3, XSS=0).
413 * See the comments above snp_cpuid_calc_xsave_size() for more
416 if (e->eax_in == 0xD && (e->ecx_in == 0 || e->ecx_in == 1))
417 if (!(e->xcr0_in == 1 || e->xcr0_in == 3) || e->xss_in)
431 static void snp_cpuid_hv(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
433 if (sev_cpuid_hv(ghcb, ctxt, leaf))
434 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID_HV);
437 static int snp_cpuid_postprocess(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
438 struct cpuid_leaf *leaf)
440 struct cpuid_leaf leaf_hv = *leaf;
444 snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
446 /* initial APIC ID */
447 leaf->ebx = (leaf_hv.ebx & GENMASK(31, 24)) | (leaf->ebx & GENMASK(23, 0));
448 /* APIC enabled bit */
449 leaf->edx = (leaf_hv.edx & BIT(9)) | (leaf->edx & ~BIT(9));
451 /* OSXSAVE enabled bit */
452 if (native_read_cr4() & X86_CR4_OSXSAVE)
453 leaf->ecx |= BIT(27);
456 /* OSPKE enabled bit */
457 leaf->ecx &= ~BIT(4);
458 if (native_read_cr4() & X86_CR4_PKE)
463 snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
465 /* extended APIC ID */
466 leaf->edx = leaf_hv.edx;
469 bool compacted = false;
470 u64 xcr0 = 1, xss = 0;
473 if (leaf->subfn != 0 && leaf->subfn != 1)
476 if (native_read_cr4() & X86_CR4_OSXSAVE)
477 xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
478 if (leaf->subfn == 1) {
479 /* Get XSS value if XSAVES is enabled. */
480 if (leaf->eax & BIT(3)) {
481 unsigned long lo, hi;
483 asm volatile("rdmsr" : "=a" (lo), "=d" (hi)
484 : "c" (MSR_IA32_XSS));
485 xss = (hi << 32) | lo;
489 * The PPR and APM aren't clear on what size should be
490 * encoded in 0xD:0x1:EBX when compaction is not enabled
491 * by either XSAVEC (feature bit 1) or XSAVES (feature
492 * bit 3) since SNP-capable hardware has these feature
493 * bits fixed as 1. KVM sets it to 0 in this case, but
494 * to avoid this becoming an issue it's safer to simply
495 * treat this as unsupported for SNP guests.
497 if (!(leaf->eax & (BIT(1) | BIT(3))))
503 xsave_size = snp_cpuid_calc_xsave_size(xcr0 | xss, compacted);
507 leaf->ebx = xsave_size;
511 snp_cpuid_hv(ghcb, ctxt, &leaf_hv);
513 /* extended APIC ID */
514 leaf->eax = leaf_hv.eax;
516 leaf->ebx = (leaf->ebx & GENMASK(31, 8)) | (leaf_hv.ebx & GENMASK(7, 0));
518 leaf->ecx = (leaf->ecx & GENMASK(31, 8)) | (leaf_hv.ecx & GENMASK(7, 0));
521 /* No fix-ups needed, use values as-is. */
529 * Returns -EOPNOTSUPP if feature not enabled. Any other non-zero return value
530 * should be treated as fatal by caller.
533 snp_cpuid(struct ghcb *ghcb, struct es_em_ctxt *ctxt, struct cpuid_leaf *leaf)
535 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
537 if (!cpuid_table->count)
540 if (!snp_cpuid_get_validated_func(leaf)) {
542 * Some hypervisors will avoid keeping track of CPUID entries
543 * where all values are zero, since they can be handled the
544 * same as out-of-range values (all-zero). This is useful here
545 * as well as it allows virtually all guest configurations to
546 * work using a single SNP CPUID table.
548 * To allow for this, there is a need to distinguish between
549 * out-of-range entries and in-range zero entries, since the
550 * CPUID table entries are only a template that may need to be
551 * augmented with additional values for things like
552 * CPU-specific information during post-processing. So if it's
553 * not in the table, set the values to zero. Then, if they are
554 * within a valid CPUID range, proceed with post-processing
555 * using zeros as the initial values. Otherwise, skip
556 * post-processing and just return zeros immediately.
558 leaf->eax = leaf->ebx = leaf->ecx = leaf->edx = 0;
560 /* Skip post-processing for out-of-range zero leafs. */
561 if (!(leaf->fn <= RIP_REL_REF(cpuid_std_range_max) ||
562 (leaf->fn >= 0x40000000 && leaf->fn <= RIP_REL_REF(cpuid_hyp_range_max)) ||
563 (leaf->fn >= 0x80000000 && leaf->fn <= RIP_REL_REF(cpuid_ext_range_max))))
567 return snp_cpuid_postprocess(ghcb, ctxt, leaf);
571 * Boot VC Handler - This is the first VC handler during boot, there is no GHCB
572 * page yet, so it only supports the MSR based communication with the
573 * hypervisor and only the CPUID exit-code.
575 void __head do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
577 unsigned int subfn = lower_bits(regs->cx, 32);
578 unsigned int fn = lower_bits(regs->ax, 32);
579 u16 opcode = *(unsigned short *)regs->ip;
580 struct cpuid_leaf leaf;
583 /* Only CPUID is supported via MSR protocol */
584 if (exit_code != SVM_EXIT_CPUID)
587 /* Is it really a CPUID insn? */
588 if (opcode != 0xa20f)
594 ret = snp_cpuid(NULL, NULL, &leaf);
598 if (ret != -EOPNOTSUPP)
601 if (__sev_cpuid_hv_msr(&leaf))
611 * This is a VC handler and the #VC is only raised when SEV-ES is
612 * active, which means SEV must be active too. Do sanity checks on the
613 * CPUID results to make sure the hypervisor does not trick the kernel
614 * into the no-sev path. This could map sensitive data unencrypted and
615 * make it accessible to the hypervisor.
617 * In particular, check for:
618 * - Availability of CPUID leaf 0x8000001f
621 * The hypervisor might still report the wrong C-bit position, but this
622 * can't be checked here.
625 if (fn == 0x80000000 && (regs->ax < 0x8000001f))
628 else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
632 /* Skip over the CPUID two-byte opcode */
638 /* Terminate the guest */
639 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
642 static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
643 unsigned long address,
646 if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
647 ctxt->fi.vector = X86_TRAP_PF;
648 ctxt->fi.error_code = X86_PF_USER;
649 ctxt->fi.cr2 = address;
651 ctxt->fi.error_code |= X86_PF_WRITE;
659 static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
660 void *src, char *buf,
661 unsigned int data_size,
665 int i, b = backwards ? -1 : 1;
666 unsigned long address = (unsigned long)src;
669 ret = vc_insn_string_check(ctxt, address, false);
673 for (i = 0; i < count; i++) {
674 void *s = src + (i * data_size * b);
675 char *d = buf + (i * data_size);
677 ret = vc_read_mem(ctxt, s, d, data_size);
685 static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
686 void *dst, char *buf,
687 unsigned int data_size,
691 int i, s = backwards ? -1 : 1;
692 unsigned long address = (unsigned long)dst;
695 ret = vc_insn_string_check(ctxt, address, true);
699 for (i = 0; i < count; i++) {
700 void *d = dst + (i * data_size * s);
701 char *b = buf + (i * data_size);
703 ret = vc_write_mem(ctxt, d, b, data_size);
711 #define IOIO_TYPE_STR BIT(2)
712 #define IOIO_TYPE_IN 1
713 #define IOIO_TYPE_INS (IOIO_TYPE_IN | IOIO_TYPE_STR)
714 #define IOIO_TYPE_OUT 0
715 #define IOIO_TYPE_OUTS (IOIO_TYPE_OUT | IOIO_TYPE_STR)
717 #define IOIO_REP BIT(3)
719 #define IOIO_ADDR_64 BIT(9)
720 #define IOIO_ADDR_32 BIT(8)
721 #define IOIO_ADDR_16 BIT(7)
723 #define IOIO_DATA_32 BIT(6)
724 #define IOIO_DATA_16 BIT(5)
725 #define IOIO_DATA_8 BIT(4)
727 #define IOIO_SEG_ES (0 << 10)
728 #define IOIO_SEG_DS (3 << 10)
730 static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
732 struct insn *insn = &ctxt->insn;
738 switch (insn->opcode.bytes[0]) {
742 *exitinfo |= IOIO_TYPE_INS;
743 *exitinfo |= IOIO_SEG_ES;
744 port = ctxt->regs->dx & 0xffff;
750 *exitinfo |= IOIO_TYPE_OUTS;
751 *exitinfo |= IOIO_SEG_DS;
752 port = ctxt->regs->dx & 0xffff;
755 /* IN immediate opcodes */
758 *exitinfo |= IOIO_TYPE_IN;
759 port = (u8)insn->immediate.value & 0xffff;
762 /* OUT immediate opcodes */
765 *exitinfo |= IOIO_TYPE_OUT;
766 port = (u8)insn->immediate.value & 0xffff;
769 /* IN register opcodes */
772 *exitinfo |= IOIO_TYPE_IN;
773 port = ctxt->regs->dx & 0xffff;
776 /* OUT register opcodes */
779 *exitinfo |= IOIO_TYPE_OUT;
780 port = ctxt->regs->dx & 0xffff;
784 return ES_DECODE_FAILED;
787 *exitinfo |= port << 16;
789 switch (insn->opcode.bytes[0]) {
796 /* Single byte opcodes */
797 *exitinfo |= IOIO_DATA_8;
801 /* Length determined by instruction parsing */
802 *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
804 size = (insn->opnd_bytes == 2) ? 2 : 4;
807 switch (insn->addr_bytes) {
809 *exitinfo |= IOIO_ADDR_16;
812 *exitinfo |= IOIO_ADDR_32;
815 *exitinfo |= IOIO_ADDR_64;
819 if (insn_has_rep_prefix(insn))
820 *exitinfo |= IOIO_REP;
822 return vc_ioio_check(ctxt, (u16)port, size);
825 static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
827 struct pt_regs *regs = ctxt->regs;
828 u64 exit_info_1, exit_info_2;
831 ret = vc_ioio_exitinfo(ctxt, &exit_info_1);
835 if (exit_info_1 & IOIO_TYPE_STR) {
839 bool df = ((regs->flags & X86_EFLAGS_DF) == X86_EFLAGS_DF);
840 unsigned int io_bytes, exit_bytes;
841 unsigned int ghcb_count, op_count;
842 unsigned long es_base;
846 * For the string variants with rep prefix the amount of in/out
847 * operations per #VC exception is limited so that the kernel
848 * has a chance to take interrupts and re-schedule while the
849 * instruction is emulated.
851 io_bytes = (exit_info_1 >> 4) & 0x7;
852 ghcb_count = sizeof(ghcb->shared_buffer) / io_bytes;
854 op_count = (exit_info_1 & IOIO_REP) ? regs->cx : 1;
855 exit_info_2 = min(op_count, ghcb_count);
856 exit_bytes = exit_info_2 * io_bytes;
858 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
860 /* Read bytes of OUTS into the shared buffer */
861 if (!(exit_info_1 & IOIO_TYPE_IN)) {
862 ret = vc_insn_string_read(ctxt,
863 (void *)(es_base + regs->si),
864 ghcb->shared_buffer, io_bytes,
871 * Issue an VMGEXIT to the HV to consume the bytes from the
872 * shared buffer or to have it write them into the shared buffer
873 * depending on the instruction: OUTS or INS.
875 sw_scratch = __pa(ghcb) + offsetof(struct ghcb, shared_buffer);
876 ghcb_set_sw_scratch(ghcb, sw_scratch);
877 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO,
878 exit_info_1, exit_info_2);
882 /* Read bytes from shared buffer into the guest's destination. */
883 if (exit_info_1 & IOIO_TYPE_IN) {
884 ret = vc_insn_string_write(ctxt,
885 (void *)(es_base + regs->di),
886 ghcb->shared_buffer, io_bytes,
892 regs->di -= exit_bytes;
894 regs->di += exit_bytes;
897 regs->si -= exit_bytes;
899 regs->si += exit_bytes;
902 if (exit_info_1 & IOIO_REP)
903 regs->cx -= exit_info_2;
905 ret = regs->cx ? ES_RETRY : ES_OK;
909 /* IN/OUT into/from rAX */
911 int bits = (exit_info_1 & 0x70) >> 1;
914 if (!(exit_info_1 & IOIO_TYPE_IN))
915 rax = lower_bits(regs->ax, bits);
917 ghcb_set_rax(ghcb, rax);
919 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_IOIO, exit_info_1, 0);
923 if (exit_info_1 & IOIO_TYPE_IN) {
924 if (!ghcb_rax_is_valid(ghcb))
926 regs->ax = lower_bits(ghcb->save.rax, bits);
933 static int vc_handle_cpuid_snp(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
935 struct pt_regs *regs = ctxt->regs;
936 struct cpuid_leaf leaf;
940 leaf.subfn = regs->cx;
941 ret = snp_cpuid(ghcb, ctxt, &leaf);
952 static enum es_result vc_handle_cpuid(struct ghcb *ghcb,
953 struct es_em_ctxt *ctxt)
955 struct pt_regs *regs = ctxt->regs;
956 u32 cr4 = native_read_cr4();
960 snp_cpuid_ret = vc_handle_cpuid_snp(ghcb, ctxt);
963 if (snp_cpuid_ret != -EOPNOTSUPP)
966 ghcb_set_rax(ghcb, regs->ax);
967 ghcb_set_rcx(ghcb, regs->cx);
969 if (cr4 & X86_CR4_OSXSAVE)
970 /* Safe to read xcr0 */
971 ghcb_set_xcr0(ghcb, xgetbv(XCR_XFEATURE_ENABLED_MASK));
973 /* xgetbv will cause #GP - use reset value for xcr0 */
974 ghcb_set_xcr0(ghcb, 1);
976 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_CPUID, 0, 0);
980 if (!(ghcb_rax_is_valid(ghcb) &&
981 ghcb_rbx_is_valid(ghcb) &&
982 ghcb_rcx_is_valid(ghcb) &&
983 ghcb_rdx_is_valid(ghcb)))
986 regs->ax = ghcb->save.rax;
987 regs->bx = ghcb->save.rbx;
988 regs->cx = ghcb->save.rcx;
989 regs->dx = ghcb->save.rdx;
994 static enum es_result vc_handle_rdtsc(struct ghcb *ghcb,
995 struct es_em_ctxt *ctxt,
996 unsigned long exit_code)
998 bool rdtscp = (exit_code == SVM_EXIT_RDTSCP);
1001 ret = sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, 0, 0);
1005 if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb) &&
1006 (!rdtscp || ghcb_rcx_is_valid(ghcb))))
1007 return ES_VMM_ERROR;
1009 ctxt->regs->ax = ghcb->save.rax;
1010 ctxt->regs->dx = ghcb->save.rdx;
1012 ctxt->regs->cx = ghcb->save.rcx;
1017 struct cc_setup_data {
1018 struct setup_data header;
1019 u32 cc_blob_address;
1023 * Search for a Confidential Computing blob passed in as a setup_data entry
1024 * via the Linux Boot Protocol.
1027 struct cc_blob_sev_info *find_cc_blob_setup_data(struct boot_params *bp)
1029 struct cc_setup_data *sd = NULL;
1030 struct setup_data *hdr;
1032 hdr = (struct setup_data *)bp->hdr.setup_data;
1035 if (hdr->type == SETUP_CC_BLOB) {
1036 sd = (struct cc_setup_data *)hdr;
1037 return (struct cc_blob_sev_info *)(unsigned long)sd->cc_blob_address;
1039 hdr = (struct setup_data *)hdr->next;
1046 * Initialize the kernel's copy of the SNP CPUID table, and set up the
1047 * pointer that will be used to access it.
1049 * Maintaining a direct mapping of the SNP CPUID table used by firmware would
1050 * be possible as an alternative, but the approach is brittle since the
1051 * mapping needs to be updated in sync with all the changes to virtual memory
1052 * layout and related mapping facilities throughout the boot process.
1054 static void __head setup_cpuid_table(const struct cc_blob_sev_info *cc_info)
1056 const struct snp_cpuid_table *cpuid_table_fw, *cpuid_table;
1059 if (!cc_info || !cc_info->cpuid_phys || cc_info->cpuid_len < PAGE_SIZE)
1060 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID);
1062 cpuid_table_fw = (const struct snp_cpuid_table *)cc_info->cpuid_phys;
1063 if (!cpuid_table_fw->count || cpuid_table_fw->count > SNP_CPUID_COUNT_MAX)
1064 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_CPUID);
1066 cpuid_table = snp_cpuid_get_table();
1067 memcpy((void *)cpuid_table, cpuid_table_fw, sizeof(*cpuid_table));
1069 /* Initialize CPUID ranges for range-checking. */
1070 for (i = 0; i < cpuid_table->count; i++) {
1071 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
1073 if (fn->eax_in == 0x0)
1074 RIP_REL_REF(cpuid_std_range_max) = fn->eax;
1075 else if (fn->eax_in == 0x40000000)
1076 RIP_REL_REF(cpuid_hyp_range_max) = fn->eax;
1077 else if (fn->eax_in == 0x80000000)
1078 RIP_REL_REF(cpuid_ext_range_max) = fn->eax;
1082 static void pvalidate_pages(struct snp_psc_desc *desc)
1084 struct psc_entry *e;
1085 unsigned long vaddr;
1091 for (i = 0; i <= desc->hdr.end_entry; i++) {
1092 e = &desc->entries[i];
1094 vaddr = (unsigned long)pfn_to_kaddr(e->gfn);
1095 size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
1096 validate = e->operation == SNP_PAGE_STATE_PRIVATE;
1098 rc = pvalidate(vaddr, size, validate);
1099 if (rc == PVALIDATE_FAIL_SIZEMISMATCH && size == RMP_PG_SIZE_2M) {
1100 unsigned long vaddr_end = vaddr + PMD_SIZE;
1102 for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) {
1103 rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
1110 WARN(1, "Failed to validate address 0x%lx ret %d", vaddr, rc);
1111 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
1116 static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
1118 int cur_entry, end_entry, ret = 0;
1119 struct snp_psc_desc *data;
1120 struct es_em_ctxt ctxt;
1122 vc_ghcb_invalidate(ghcb);
1124 /* Copy the input desc into GHCB shared buffer */
1125 data = (struct snp_psc_desc *)ghcb->shared_buffer;
1126 memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
1129 * As per the GHCB specification, the hypervisor can resume the guest
1130 * before processing all the entries. Check whether all the entries
1131 * are processed. If not, then keep retrying. Note, the hypervisor
1132 * will update the data memory directly to indicate the status, so
1133 * reference the data->hdr everywhere.
1135 * The strategy here is to wait for the hypervisor to change the page
1136 * state in the RMP table before guest accesses the memory pages. If the
1137 * page state change was not successful, then later memory access will
1138 * result in a crash.
1140 cur_entry = data->hdr.cur_entry;
1141 end_entry = data->hdr.end_entry;
1143 while (data->hdr.cur_entry <= data->hdr.end_entry) {
1144 ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
1146 /* This will advance the shared buffer data points to. */
1147 ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
1150 * Page State Change VMGEXIT can pass error code through
1153 if (WARN(ret || ghcb->save.sw_exit_info_2,
1154 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
1155 ret, ghcb->save.sw_exit_info_2)) {
1160 /* Verify that reserved bit is not set */
1161 if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
1167 * Sanity check that entry processing is not going backwards.
1168 * This will happen only if hypervisor is tricking us.
1170 if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
1171 "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
1172 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
1182 static enum es_result vc_check_opcode_bytes(struct es_em_ctxt *ctxt,
1183 unsigned long exit_code)
1185 unsigned int opcode = (unsigned int)ctxt->insn.opcode.value;
1186 u8 modrm = ctxt->insn.modrm.value;
1188 switch (exit_code) {
1192 /* handled separately */
1195 case SVM_EXIT_CPUID:
1196 if (opcode == 0xa20f)
1201 if (opcode == 0x080f)
1205 case SVM_EXIT_MONITOR:
1206 if (opcode == 0x010f && modrm == 0xc8)
1210 case SVM_EXIT_MWAIT:
1211 if (opcode == 0x010f && modrm == 0xc9)
1217 if (opcode == 0x320f ||
1223 case SVM_EXIT_RDPMC:
1224 if (opcode == 0x330f)
1228 case SVM_EXIT_RDTSC:
1229 if (opcode == 0x310f)
1233 case SVM_EXIT_RDTSCP:
1234 if (opcode == 0x010f && modrm == 0xf9)
1238 case SVM_EXIT_READ_DR7:
1239 if (opcode == 0x210f &&
1240 X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
1244 case SVM_EXIT_VMMCALL:
1245 if (opcode == 0x010f && modrm == 0xd9)
1250 case SVM_EXIT_WRITE_DR7:
1251 if (opcode == 0x230f &&
1252 X86_MODRM_REG(ctxt->insn.modrm.value) == 7)
1256 case SVM_EXIT_WBINVD:
1257 if (opcode == 0x90f)
1265 sev_printk(KERN_ERR "Wrong/unhandled opcode bytes: 0x%x, exit_code: 0x%lx, rIP: 0x%lx\n",
1266 opcode, exit_code, ctxt->regs->ip);
1268 return ES_UNSUPPORTED;