x86/mm: Validate memory when changing the C-bit
authorBrijesh Singh <brijesh.singh@amd.com>
Thu, 24 Feb 2022 16:56:01 +0000 (10:56 -0600)
committerBorislav Petkov <bp@suse.de>
Wed, 6 Apr 2022 11:24:53 +0000 (13:24 +0200)
Add the needed functionality to change pages state from shared
to private and vice-versa using the Page State Change VMGEXIT as
documented in the GHCB spec.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lore.kernel.org/r/20220307213356.2797205-22-brijesh.singh@amd.com
arch/x86/include/asm/sev-common.h
arch/x86/include/asm/sev.h
arch/x86/include/uapi/asm/svm.h
arch/x86/kernel/sev.c
arch/x86/mm/mem_encrypt_amd.c

index f077a6c95e67149e0ef0c71da9231cde7f4c4005..1aa72b5c24902a1f8f188f06c527e760a0bf082b 100644 (file)
@@ -105,6 +105,28 @@ enum psc_op {
 
 #define GHCB_HV_FT_SNP                 BIT_ULL(0)
 
+/* SNP Page State Change NAE event */
+#define VMGEXIT_PSC_MAX_ENTRY          253
+
+struct psc_hdr {
+       u16 cur_entry;
+       u16 end_entry;
+       u32 reserved;
+} __packed;
+
+struct psc_entry {
+       u64     cur_page        : 12,
+               gfn             : 40,
+               operation       : 4,
+               pagesize        : 1,
+               reserved        : 7;
+} __packed;
+
+struct snp_psc_desc {
+       struct psc_hdr hdr;
+       struct psc_entry entries[VMGEXIT_PSC_MAX_ENTRY];
+} __packed;
+
 #define GHCB_MSR_TERM_REQ              0x100
 #define GHCB_MSR_TERM_REASON_SET_POS   12
 #define GHCB_MSR_TERM_REASON_SET_MASK  0xf
index f65d257e3d4acd34f7743ce28647368fa0fd525c..feeb93e6ec97aee82bde83e4fec1a4fc64976a7b 100644 (file)
@@ -128,6 +128,8 @@ void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long padd
 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
                                        unsigned int npages);
 void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op);
+void snp_set_memory_shared(unsigned long vaddr, unsigned int npages);
+void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
 #else
 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
 static inline void sev_es_ist_exit(void) { }
@@ -142,6 +144,8 @@ early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned
 static inline void __init
 early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, unsigned int npages) { }
 static inline void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op) { }
+static inline void snp_set_memory_shared(unsigned long vaddr, unsigned int npages) { }
+static inline void snp_set_memory_private(unsigned long vaddr, unsigned int npages) { }
 #endif
 
 #endif
index b0ad00f4c1e1aa862cdc7cb3d19423bdef269d52..64404b47b773a9800fb49b06d7fe517d1b34c923 100644 (file)
 #define SVM_VMGEXIT_AP_JUMP_TABLE              0x80000005
 #define SVM_VMGEXIT_SET_AP_JUMP_TABLE          0
 #define SVM_VMGEXIT_GET_AP_JUMP_TABLE          1
+#define SVM_VMGEXIT_PSC                                0x80000010
 #define SVM_VMGEXIT_HV_FEATURES                        0x8000fffd
 #define SVM_VMGEXIT_UNSUPPORTED_EVENT          0x8000ffff
 
        { SVM_VMGEXIT_NMI_COMPLETE,     "vmgexit_nmi_complete" }, \
        { SVM_VMGEXIT_AP_HLT_LOOP,      "vmgexit_ap_hlt_loop" }, \
        { SVM_VMGEXIT_AP_JUMP_TABLE,    "vmgexit_ap_jump_table" }, \
+       { SVM_VMGEXIT_PSC,              "vmgexit_page_state_change" }, \
        { SVM_VMGEXIT_HV_FEATURES,      "vmgexit_hypervisor_feature" }, \
        { SVM_EXIT_ERR,         "invalid_guest_state" }
 
index e3fca8615fe8f86fe370c681c2e01b0e17c49799..bf4b57835694e04675d01bda554956b925ec3de9 100644 (file)
@@ -655,6 +655,174 @@ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op
                WARN(1, "invalid memory op %d\n", op);
 }
 
+static int vmgexit_psc(struct snp_psc_desc *desc)
+{
+       int cur_entry, end_entry, ret = 0;
+       struct snp_psc_desc *data;
+       struct ghcb_state state;
+       struct es_em_ctxt ctxt;
+       unsigned long flags;
+       struct ghcb *ghcb;
+
+       /*
+        * __sev_get_ghcb() needs to run with IRQs disabled because it is using
+        * a per-CPU GHCB.
+        */
+       local_irq_save(flags);
+
+       ghcb = __sev_get_ghcb(&state);
+       if (!ghcb) {
+               ret = 1;
+               goto out_unlock;
+       }
+
+       /* Copy the input desc into GHCB shared buffer */
+       data = (struct snp_psc_desc *)ghcb->shared_buffer;
+       memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
+
+       /*
+        * As per the GHCB specification, the hypervisor can resume the guest
+        * before processing all the entries. Check whether all the entries
+        * are processed. If not, then keep retrying. Note, the hypervisor
+        * will update the data memory directly to indicate the status, so
+        * reference the data->hdr everywhere.
+        *
+        * The strategy here is to wait for the hypervisor to change the page
+        * state in the RMP table before guest accesses the memory pages. If the
+        * page state change was not successful, then later memory access will
+        * result in a crash.
+        */
+       cur_entry = data->hdr.cur_entry;
+       end_entry = data->hdr.end_entry;
+
+       while (data->hdr.cur_entry <= data->hdr.end_entry) {
+               ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
+
+               /* This will advance the shared buffer data points to. */
+               ret = sev_es_ghcb_hv_call(ghcb, true, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
+
+               /*
+                * Page State Change VMGEXIT can pass error code through
+                * exit_info_2.
+                */
+               if (WARN(ret || ghcb->save.sw_exit_info_2,
+                        "SNP: PSC failed ret=%d exit_info_2=%llx\n",
+                        ret, ghcb->save.sw_exit_info_2)) {
+                       ret = 1;
+                       goto out;
+               }
+
+               /* Verify that reserved bit is not set */
+               if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
+                       ret = 1;
+                       goto out;
+               }
+
+               /*
+                * Sanity check that entry processing is not going backwards.
+                * This will happen only if hypervisor is tricking us.
+                */
+               if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
+"SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
+                        end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
+                       ret = 1;
+                       goto out;
+               }
+       }
+
+out:
+       __sev_put_ghcb(&state);
+
+out_unlock:
+       local_irq_restore(flags);
+
+       return ret;
+}
+
+static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
+                             unsigned long vaddr_end, int op)
+{
+       struct psc_hdr *hdr;
+       struct psc_entry *e;
+       unsigned long pfn;
+       int i;
+
+       hdr = &data->hdr;
+       e = data->entries;
+
+       memset(data, 0, sizeof(*data));
+       i = 0;
+
+       while (vaddr < vaddr_end) {
+               if (is_vmalloc_addr((void *)vaddr))
+                       pfn = vmalloc_to_pfn((void *)vaddr);
+               else
+                       pfn = __pa(vaddr) >> PAGE_SHIFT;
+
+               e->gfn = pfn;
+               e->operation = op;
+               hdr->end_entry = i;
+
+               /*
+                * Current SNP implementation doesn't keep track of the RMP page
+                * size so use 4K for simplicity.
+                */
+               e->pagesize = RMP_PG_SIZE_4K;
+
+               vaddr = vaddr + PAGE_SIZE;
+               e++;
+               i++;
+       }
+
+       if (vmgexit_psc(data))
+               sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
+}
+
+static void set_pages_state(unsigned long vaddr, unsigned int npages, int op)
+{
+       unsigned long vaddr_end, next_vaddr;
+       struct snp_psc_desc *desc;
+
+       desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
+       if (!desc)
+               panic("SNP: failed to allocate memory for PSC descriptor\n");
+
+       vaddr = vaddr & PAGE_MASK;
+       vaddr_end = vaddr + (npages << PAGE_SHIFT);
+
+       while (vaddr < vaddr_end) {
+               /* Calculate the last vaddr that fits in one struct snp_psc_desc. */
+               next_vaddr = min_t(unsigned long, vaddr_end,
+                                  (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
+
+               __set_pages_state(desc, vaddr, next_vaddr, op);
+
+               vaddr = next_vaddr;
+       }
+
+       kfree(desc);
+}
+
+void snp_set_memory_shared(unsigned long vaddr, unsigned int npages)
+{
+       if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+               return;
+
+       pvalidate_pages(vaddr, npages, false);
+
+       set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
+}
+
+void snp_set_memory_private(unsigned long vaddr, unsigned int npages)
+{
+       if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+               return;
+
+       set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
+
+       pvalidate_pages(vaddr, npages, true);
+}
+
 int sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
 {
        u16 startup_cs, startup_ip;
index 8539dd6f24ff27c6e811c1110ee78520927ef807..d3c88d9ef8d631a1a0e53c13fe0a3f8429e46324 100644 (file)
@@ -316,11 +316,24 @@ static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
 
 static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
 {
+       /*
+        * To maintain the security guarantees of SEV-SNP guests, make sure
+        * to invalidate the memory before encryption attribute is cleared.
+        */
+       if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
+               snp_set_memory_shared(vaddr, npages);
 }
 
 /* Return true unconditionally: return value doesn't matter for the SEV side */
 static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool enc)
 {
+       /*
+        * After memory is mapped encrypted in the page table, validate it
+        * so that it is consistent with the page table updates.
+        */
+       if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && enc)
+               snp_set_memory_private(vaddr, npages);
+
        if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
                enc_dec_hypercall(vaddr, npages, enc);