Commit | Line | Data |
---|---|---|
1cd9c22f KS |
1 | /* |
2 | * AMD Memory Encryption Support | |
3 | * | |
4 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | |
5 | * | |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #define DISABLE_BRANCH_PROFILING | |
14 | ||
aad98391 KS |
15 | /* |
16 | * Since we're dealing with identity mappings, physical and virtual | |
17 | * addresses are the same, so override these defines which are ultimately | |
18 | * used by the headers in misc.h. | |
19 | */ | |
20 | #define __pa(x) ((unsigned long)(x)) | |
21 | #define __va(x) ((void *)((unsigned long)(x))) | |
22 | ||
23 | /* | |
24 | * Special hack: we have to be careful, because no indirections are | |
25 | * allowed here, and paravirt_ops is a kind of one. As it will only run in | |
26 | * baremetal anyway, we just keep it from happening. (This list needs to | |
27 | * be extended when new paravirt and debugging variants are added.) | |
28 | */ | |
29 | #undef CONFIG_PARAVIRT | |
30 | #undef CONFIG_PARAVIRT_SPINLOCKS | |
31 | ||
32 | #include <linux/kernel.h> | |
1cd9c22f KS |
33 | #include <linux/mm.h> |
34 | #include <linux/mem_encrypt.h> | |
35 | ||
36 | #include <asm/setup.h> | |
37 | #include <asm/sections.h> | |
38 | #include <asm/cmdline.h> | |
39 | ||
40 | #include "mm_internal.h" | |
41 | ||
42 | #define PGD_FLAGS _KERNPG_TABLE_NOENC | |
43 | #define P4D_FLAGS _KERNPG_TABLE_NOENC | |
44 | #define PUD_FLAGS _KERNPG_TABLE_NOENC | |
45 | #define PMD_FLAGS _KERNPG_TABLE_NOENC | |
46 | ||
47 | #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) | |
48 | ||
49 | #define PMD_FLAGS_DEC PMD_FLAGS_LARGE | |
50 | #define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ | |
51 | (_PAGE_PAT | _PAGE_PWT)) | |
52 | ||
53 | #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) | |
54 | ||
55 | #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL) | |
56 | ||
57 | #define PTE_FLAGS_DEC PTE_FLAGS | |
58 | #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ | |
59 | (_PAGE_PAT | _PAGE_PWT)) | |
60 | ||
61 | #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC) | |
62 | ||
63 | struct sme_populate_pgd_data { | |
64 | void *pgtable_area; | |
65 | pgd_t *pgd; | |
66 | ||
67 | pmdval_t pmd_flags; | |
68 | pteval_t pte_flags; | |
69 | unsigned long paddr; | |
70 | ||
71 | unsigned long vaddr; | |
72 | unsigned long vaddr_end; | |
73 | }; | |
74 | ||
75 | static char sme_cmdline_arg[] __initdata = "mem_encrypt"; | |
76 | static char sme_cmdline_on[] __initdata = "on"; | |
77 | static char sme_cmdline_off[] __initdata = "off"; | |
78 | ||
79 | static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) | |
80 | { | |
81 | unsigned long pgd_start, pgd_end, pgd_size; | |
82 | pgd_t *pgd_p; | |
83 | ||
84 | pgd_start = ppd->vaddr & PGDIR_MASK; | |
85 | pgd_end = ppd->vaddr_end & PGDIR_MASK; | |
86 | ||
87 | pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t); | |
88 | ||
89 | pgd_p = ppd->pgd + pgd_index(ppd->vaddr); | |
90 | ||
91 | memset(pgd_p, 0, pgd_size); | |
92 | } | |
93 | ||
aad98391 | 94 | static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) |
1cd9c22f | 95 | { |
aad98391 KS |
96 | pgd_t *pgd; |
97 | p4d_t *p4d; | |
98 | pud_t *pud; | |
99 | pmd_t *pmd; | |
100 | ||
101 | pgd = ppd->pgd + pgd_index(ppd->vaddr); | |
102 | if (pgd_none(*pgd)) { | |
103 | p4d = ppd->pgtable_area; | |
104 | memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D); | |
105 | ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D; | |
106 | set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d))); | |
1cd9c22f KS |
107 | } |
108 | ||
aad98391 KS |
109 | p4d = p4d_offset(pgd, ppd->vaddr); |
110 | if (p4d_none(*p4d)) { | |
111 | pud = ppd->pgtable_area; | |
112 | memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD); | |
113 | ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD; | |
114 | set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud))); | |
1cd9c22f KS |
115 | } |
116 | ||
aad98391 KS |
117 | pud = pud_offset(p4d, ppd->vaddr); |
118 | if (pud_none(*pud)) { | |
119 | pmd = ppd->pgtable_area; | |
120 | memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD); | |
121 | ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD; | |
122 | set_pud(pud, __pud(PUD_FLAGS | __pa(pmd))); | |
1cd9c22f KS |
123 | } |
124 | ||
aad98391 KS |
125 | if (pud_large(*pud)) |
126 | return NULL; | |
127 | ||
128 | return pud; | |
1cd9c22f KS |
129 | } |
130 | ||
131 | static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) | |
132 | { | |
aad98391 KS |
133 | pud_t *pud; |
134 | pmd_t *pmd; | |
135 | ||
136 | pud = sme_prepare_pgd(ppd); | |
137 | if (!pud) | |
138 | return; | |
1cd9c22f | 139 | |
aad98391 KS |
140 | pmd = pmd_offset(pud, ppd->vaddr); |
141 | if (pmd_large(*pmd)) | |
1cd9c22f KS |
142 | return; |
143 | ||
aad98391 | 144 | set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags)); |
1cd9c22f KS |
145 | } |
146 | ||
147 | static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) | |
148 | { | |
aad98391 KS |
149 | pud_t *pud; |
150 | pmd_t *pmd; | |
151 | pte_t *pte; | |
1cd9c22f | 152 | |
aad98391 KS |
153 | pud = sme_prepare_pgd(ppd); |
154 | if (!pud) | |
1cd9c22f KS |
155 | return; |
156 | ||
aad98391 KS |
157 | pmd = pmd_offset(pud, ppd->vaddr); |
158 | if (pmd_none(*pmd)) { | |
159 | pte = ppd->pgtable_area; | |
160 | memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); | |
161 | ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; | |
162 | set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); | |
1cd9c22f KS |
163 | } |
164 | ||
aad98391 KS |
165 | if (pmd_large(*pmd)) |
166 | return; | |
167 | ||
168 | pte = pte_offset_map(pmd, ppd->vaddr); | |
169 | if (pte_none(*pte)) | |
170 | set_pte(pte, __pte(ppd->paddr | ppd->pte_flags)); | |
1cd9c22f KS |
171 | } |
172 | ||
173 | static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) | |
174 | { | |
175 | while (ppd->vaddr < ppd->vaddr_end) { | |
176 | sme_populate_pgd_large(ppd); | |
177 | ||
178 | ppd->vaddr += PMD_PAGE_SIZE; | |
179 | ppd->paddr += PMD_PAGE_SIZE; | |
180 | } | |
181 | } | |
182 | ||
183 | static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) | |
184 | { | |
185 | while (ppd->vaddr < ppd->vaddr_end) { | |
186 | sme_populate_pgd(ppd); | |
187 | ||
188 | ppd->vaddr += PAGE_SIZE; | |
189 | ppd->paddr += PAGE_SIZE; | |
190 | } | |
191 | } | |
192 | ||
193 | static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, | |
194 | pmdval_t pmd_flags, pteval_t pte_flags) | |
195 | { | |
196 | unsigned long vaddr_end; | |
197 | ||
198 | ppd->pmd_flags = pmd_flags; | |
199 | ppd->pte_flags = pte_flags; | |
200 | ||
201 | /* Save original end value since we modify the struct value */ | |
202 | vaddr_end = ppd->vaddr_end; | |
203 | ||
204 | /* If start is not 2MB aligned, create PTE entries */ | |
205 | ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); | |
206 | __sme_map_range_pte(ppd); | |
207 | ||
208 | /* Create PMD entries */ | |
209 | ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK; | |
210 | __sme_map_range_pmd(ppd); | |
211 | ||
212 | /* If end is not 2MB aligned, create PTE entries */ | |
213 | ppd->vaddr_end = vaddr_end; | |
214 | __sme_map_range_pte(ppd); | |
215 | } | |
216 | ||
217 | static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) | |
218 | { | |
219 | __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); | |
220 | } | |
221 | ||
222 | static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) | |
223 | { | |
224 | __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); | |
225 | } | |
226 | ||
227 | static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) | |
228 | { | |
229 | __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); | |
230 | } | |
231 | ||
232 | static unsigned long __init sme_pgtable_calc(unsigned long len) | |
233 | { | |
1070730c | 234 | unsigned long entries = 0, tables = 0; |
1cd9c22f KS |
235 | |
236 | /* | |
237 | * Perform a relatively simplistic calculation of the pagetable | |
238 | * entries that are needed. Those mappings will be covered mostly | |
239 | * by 2MB PMD entries so we can conservatively calculate the required | |
240 | * number of P4D, PUD and PMD structures needed to perform the | |
241 | * mappings. For mappings that are not 2MB aligned, PTE mappings | |
242 | * would be needed for the start and end portion of the address range | |
243 | * that fall outside of the 2MB alignment. This results in, at most, | |
244 | * two extra pages to hold PTE entries for each range that is mapped. | |
245 | * Incrementing the count for each covers the case where the addresses | |
246 | * cross entries. | |
247 | */ | |
1cd9c22f | 248 | |
1070730c KS |
249 | /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */ |
250 | if (PTRS_PER_P4D > 1) | |
251 | entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D; | |
252 | entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD; | |
253 | entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD; | |
254 | entries += 2 * sizeof(pte_t) * PTRS_PER_PTE; | |
1cd9c22f KS |
255 | |
256 | /* | |
257 | * Now calculate the added pagetable structures needed to populate | |
258 | * the new pagetables. | |
259 | */ | |
1cd9c22f | 260 | |
1070730c KS |
261 | if (PTRS_PER_P4D > 1) |
262 | tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D; | |
263 | tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD; | |
264 | tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD; | |
1cd9c22f | 265 | |
1070730c | 266 | return entries + tables; |
1cd9c22f KS |
267 | } |
268 | ||
ae8d1d00 | 269 | void __init sme_encrypt_kernel(struct boot_params *bp) |
1cd9c22f KS |
270 | { |
271 | unsigned long workarea_start, workarea_end, workarea_len; | |
272 | unsigned long execute_start, execute_end, execute_len; | |
273 | unsigned long kernel_start, kernel_end, kernel_len; | |
274 | unsigned long initrd_start, initrd_end, initrd_len; | |
275 | struct sme_populate_pgd_data ppd; | |
276 | unsigned long pgtable_area_len; | |
277 | unsigned long decrypted_base; | |
278 | ||
279 | if (!sme_active()) | |
280 | return; | |
281 | ||
282 | /* | |
283 | * Prepare for encrypting the kernel and initrd by building new | |
284 | * pagetables with the necessary attributes needed to encrypt the | |
285 | * kernel in place. | |
286 | * | |
287 | * One range of virtual addresses will map the memory occupied | |
288 | * by the kernel and initrd as encrypted. | |
289 | * | |
290 | * Another range of virtual addresses will map the memory occupied | |
291 | * by the kernel and initrd as decrypted and write-protected. | |
292 | * | |
293 | * The use of write-protect attribute will prevent any of the | |
294 | * memory from being cached. | |
295 | */ | |
296 | ||
297 | /* Physical addresses gives us the identity mapped virtual addresses */ | |
298 | kernel_start = __pa_symbol(_text); | |
299 | kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); | |
300 | kernel_len = kernel_end - kernel_start; | |
301 | ||
302 | initrd_start = 0; | |
303 | initrd_end = 0; | |
304 | initrd_len = 0; | |
305 | #ifdef CONFIG_BLK_DEV_INITRD | |
306 | initrd_len = (unsigned long)bp->hdr.ramdisk_size | | |
307 | ((unsigned long)bp->ext_ramdisk_size << 32); | |
308 | if (initrd_len) { | |
309 | initrd_start = (unsigned long)bp->hdr.ramdisk_image | | |
310 | ((unsigned long)bp->ext_ramdisk_image << 32); | |
311 | initrd_end = PAGE_ALIGN(initrd_start + initrd_len); | |
312 | initrd_len = initrd_end - initrd_start; | |
313 | } | |
314 | #endif | |
315 | ||
316 | /* Set the encryption workarea to be immediately after the kernel */ | |
317 | workarea_start = kernel_end; | |
318 | ||
319 | /* | |
320 | * Calculate required number of workarea bytes needed: | |
321 | * executable encryption area size: | |
322 | * stack page (PAGE_SIZE) | |
323 | * encryption routine page (PAGE_SIZE) | |
324 | * intermediate copy buffer (PMD_PAGE_SIZE) | |
325 | * pagetable structures for the encryption of the kernel | |
326 | * pagetable structures for workarea (in case not currently mapped) | |
327 | */ | |
328 | execute_start = workarea_start; | |
329 | execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE; | |
330 | execute_len = execute_end - execute_start; | |
331 | ||
332 | /* | |
333 | * One PGD for both encrypted and decrypted mappings and a set of | |
334 | * PUDs and PMDs for each of the encrypted and decrypted mappings. | |
335 | */ | |
336 | pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; | |
337 | pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; | |
338 | if (initrd_len) | |
339 | pgtable_area_len += sme_pgtable_calc(initrd_len) * 2; | |
340 | ||
341 | /* PUDs and PMDs needed in the current pagetables for the workarea */ | |
342 | pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); | |
343 | ||
344 | /* | |
345 | * The total workarea includes the executable encryption area and | |
346 | * the pagetable area. The start of the workarea is already 2MB | |
347 | * aligned, align the end of the workarea on a 2MB boundary so that | |
348 | * we don't try to create/allocate PTE entries from the workarea | |
349 | * before it is mapped. | |
350 | */ | |
351 | workarea_len = execute_len + pgtable_area_len; | |
352 | workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE); | |
353 | ||
354 | /* | |
355 | * Set the address to the start of where newly created pagetable | |
356 | * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable | |
357 | * structures are created when the workarea is added to the current | |
358 | * pagetables and when the new encrypted and decrypted kernel | |
359 | * mappings are populated. | |
360 | */ | |
361 | ppd.pgtable_area = (void *)execute_end; | |
362 | ||
363 | /* | |
364 | * Make sure the current pagetable structure has entries for | |
365 | * addressing the workarea. | |
366 | */ | |
367 | ppd.pgd = (pgd_t *)native_read_cr3_pa(); | |
368 | ppd.paddr = workarea_start; | |
369 | ppd.vaddr = workarea_start; | |
370 | ppd.vaddr_end = workarea_end; | |
371 | sme_map_range_decrypted(&ppd); | |
372 | ||
373 | /* Flush the TLB - no globals so cr3 is enough */ | |
374 | native_write_cr3(__native_read_cr3()); | |
375 | ||
376 | /* | |
377 | * A new pagetable structure is being built to allow for the kernel | |
378 | * and initrd to be encrypted. It starts with an empty PGD that will | |
379 | * then be populated with new PUDs and PMDs as the encrypted and | |
380 | * decrypted kernel mappings are created. | |
381 | */ | |
382 | ppd.pgd = ppd.pgtable_area; | |
383 | memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); | |
384 | ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; | |
385 | ||
386 | /* | |
387 | * A different PGD index/entry must be used to get different | |
388 | * pagetable entries for the decrypted mapping. Choose the next | |
389 | * PGD index and convert it to a virtual address to be used as | |
390 | * the base of the mapping. | |
391 | */ | |
392 | decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); | |
393 | if (initrd_len) { | |
394 | unsigned long check_base; | |
395 | ||
396 | check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1); | |
397 | decrypted_base = max(decrypted_base, check_base); | |
398 | } | |
399 | decrypted_base <<= PGDIR_SHIFT; | |
400 | ||
401 | /* Add encrypted kernel (identity) mappings */ | |
402 | ppd.paddr = kernel_start; | |
403 | ppd.vaddr = kernel_start; | |
404 | ppd.vaddr_end = kernel_end; | |
405 | sme_map_range_encrypted(&ppd); | |
406 | ||
407 | /* Add decrypted, write-protected kernel (non-identity) mappings */ | |
408 | ppd.paddr = kernel_start; | |
409 | ppd.vaddr = kernel_start + decrypted_base; | |
410 | ppd.vaddr_end = kernel_end + decrypted_base; | |
411 | sme_map_range_decrypted_wp(&ppd); | |
412 | ||
413 | if (initrd_len) { | |
414 | /* Add encrypted initrd (identity) mappings */ | |
415 | ppd.paddr = initrd_start; | |
416 | ppd.vaddr = initrd_start; | |
417 | ppd.vaddr_end = initrd_end; | |
418 | sme_map_range_encrypted(&ppd); | |
419 | /* | |
420 | * Add decrypted, write-protected initrd (non-identity) mappings | |
421 | */ | |
422 | ppd.paddr = initrd_start; | |
423 | ppd.vaddr = initrd_start + decrypted_base; | |
424 | ppd.vaddr_end = initrd_end + decrypted_base; | |
425 | sme_map_range_decrypted_wp(&ppd); | |
426 | } | |
427 | ||
428 | /* Add decrypted workarea mappings to both kernel mappings */ | |
429 | ppd.paddr = workarea_start; | |
430 | ppd.vaddr = workarea_start; | |
431 | ppd.vaddr_end = workarea_end; | |
432 | sme_map_range_decrypted(&ppd); | |
433 | ||
434 | ppd.paddr = workarea_start; | |
435 | ppd.vaddr = workarea_start + decrypted_base; | |
436 | ppd.vaddr_end = workarea_end + decrypted_base; | |
437 | sme_map_range_decrypted(&ppd); | |
438 | ||
439 | /* Perform the encryption */ | |
440 | sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, | |
441 | kernel_len, workarea_start, (unsigned long)ppd.pgd); | |
442 | ||
443 | if (initrd_len) | |
444 | sme_encrypt_execute(initrd_start, initrd_start + decrypted_base, | |
445 | initrd_len, workarea_start, | |
446 | (unsigned long)ppd.pgd); | |
447 | ||
448 | /* | |
449 | * At this point we are running encrypted. Remove the mappings for | |
450 | * the decrypted areas - all that is needed for this is to remove | |
451 | * the PGD entry/entries. | |
452 | */ | |
453 | ppd.vaddr = kernel_start + decrypted_base; | |
454 | ppd.vaddr_end = kernel_end + decrypted_base; | |
455 | sme_clear_pgd(&ppd); | |
456 | ||
457 | if (initrd_len) { | |
458 | ppd.vaddr = initrd_start + decrypted_base; | |
459 | ppd.vaddr_end = initrd_end + decrypted_base; | |
460 | sme_clear_pgd(&ppd); | |
461 | } | |
462 | ||
463 | ppd.vaddr = workarea_start + decrypted_base; | |
464 | ppd.vaddr_end = workarea_end + decrypted_base; | |
465 | sme_clear_pgd(&ppd); | |
466 | ||
467 | /* Flush the TLB - no globals so cr3 is enough */ | |
468 | native_write_cr3(__native_read_cr3()); | |
469 | } | |
470 | ||
ae8d1d00 | 471 | void __init sme_enable(struct boot_params *bp) |
1cd9c22f KS |
472 | { |
473 | const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off; | |
474 | unsigned int eax, ebx, ecx, edx; | |
475 | unsigned long feature_mask; | |
476 | bool active_by_default; | |
477 | unsigned long me_mask; | |
478 | char buffer[16]; | |
479 | u64 msr; | |
480 | ||
481 | /* Check for the SME/SEV support leaf */ | |
482 | eax = 0x80000000; | |
483 | ecx = 0; | |
484 | native_cpuid(&eax, &ebx, &ecx, &edx); | |
485 | if (eax < 0x8000001f) | |
486 | return; | |
487 | ||
488 | #define AMD_SME_BIT BIT(0) | |
489 | #define AMD_SEV_BIT BIT(1) | |
490 | /* | |
491 | * Set the feature mask (SME or SEV) based on whether we are | |
492 | * running under a hypervisor. | |
493 | */ | |
494 | eax = 1; | |
495 | ecx = 0; | |
496 | native_cpuid(&eax, &ebx, &ecx, &edx); | |
497 | feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT; | |
498 | ||
499 | /* | |
500 | * Check for the SME/SEV feature: | |
501 | * CPUID Fn8000_001F[EAX] | |
502 | * - Bit 0 - Secure Memory Encryption support | |
503 | * - Bit 1 - Secure Encrypted Virtualization support | |
504 | * CPUID Fn8000_001F[EBX] | |
505 | * - Bits 5:0 - Pagetable bit position used to indicate encryption | |
506 | */ | |
507 | eax = 0x8000001f; | |
508 | ecx = 0; | |
509 | native_cpuid(&eax, &ebx, &ecx, &edx); | |
510 | if (!(eax & feature_mask)) | |
511 | return; | |
512 | ||
513 | me_mask = 1UL << (ebx & 0x3f); | |
514 | ||
515 | /* Check if memory encryption is enabled */ | |
516 | if (feature_mask == AMD_SME_BIT) { | |
517 | /* For SME, check the SYSCFG MSR */ | |
518 | msr = __rdmsr(MSR_K8_SYSCFG); | |
519 | if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) | |
520 | return; | |
521 | } else { | |
522 | /* For SEV, check the SEV MSR */ | |
523 | msr = __rdmsr(MSR_AMD64_SEV); | |
524 | if (!(msr & MSR_AMD64_SEV_ENABLED)) | |
525 | return; | |
526 | ||
527 | /* SEV state cannot be controlled by a command line option */ | |
528 | sme_me_mask = me_mask; | |
529 | sev_enabled = true; | |
530 | return; | |
531 | } | |
532 | ||
533 | /* | |
534 | * Fixups have not been applied to phys_base yet and we're running | |
535 | * identity mapped, so we must obtain the address to the SME command | |
536 | * line argument data using rip-relative addressing. | |
537 | */ | |
538 | asm ("lea sme_cmdline_arg(%%rip), %0" | |
539 | : "=r" (cmdline_arg) | |
540 | : "p" (sme_cmdline_arg)); | |
541 | asm ("lea sme_cmdline_on(%%rip), %0" | |
542 | : "=r" (cmdline_on) | |
543 | : "p" (sme_cmdline_on)); | |
544 | asm ("lea sme_cmdline_off(%%rip), %0" | |
545 | : "=r" (cmdline_off) | |
546 | : "p" (sme_cmdline_off)); | |
547 | ||
548 | if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) | |
549 | active_by_default = true; | |
550 | else | |
551 | active_by_default = false; | |
552 | ||
553 | cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | | |
554 | ((u64)bp->ext_cmd_line_ptr << 32)); | |
555 | ||
556 | cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); | |
557 | ||
558 | if (!strncmp(buffer, cmdline_on, sizeof(buffer))) | |
559 | sme_me_mask = me_mask; | |
560 | else if (!strncmp(buffer, cmdline_off, sizeof(buffer))) | |
561 | sme_me_mask = 0; | |
562 | else | |
563 | sme_me_mask = active_by_default ? me_mask : 0; | |
564 | } |