2 * AMD Memory Encryption Support
4 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define DISABLE_BRANCH_PROFILING
16 * Since we're dealing with identity mappings, physical and virtual
17 * addresses are the same, so override these defines which are ultimately
18 * used by the headers in misc.h.
20 #define __pa(x) ((unsigned long)(x))
21 #define __va(x) ((void *)((unsigned long)(x)))
24 * Special hack: we have to be careful, because no indirections are
25 * allowed here, and paravirt_ops is a kind of one. As it will only run in
26 * baremetal anyway, we just keep it from happening. (This list needs to
27 * be extended when new paravirt and debugging variants are added.)
29 #undef CONFIG_PARAVIRT
30 #undef CONFIG_PARAVIRT_SPINLOCKS
33 * This code runs before CPU feature bits are set. By default, the
34 * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if
35 * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5
36 * is provided to handle this situation and, instead, use a variable that
37 * has been set by the early boot code.
39 #define USE_EARLY_PGTABLE_L5
41 #include <linux/kernel.h>
43 #include <linux/mem_encrypt.h>
45 #include <asm/setup.h>
46 #include <asm/sections.h>
47 #include <asm/cmdline.h>
49 #include "mm_internal.h"
51 #define PGD_FLAGS _KERNPG_TABLE_NOENC
52 #define P4D_FLAGS _KERNPG_TABLE_NOENC
53 #define PUD_FLAGS _KERNPG_TABLE_NOENC
54 #define PMD_FLAGS _KERNPG_TABLE_NOENC
56 #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
58 #define PMD_FLAGS_DEC PMD_FLAGS_LARGE
59 #define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
60 (_PAGE_PAT_LARGE | _PAGE_PWT))
62 #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
64 #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
66 #define PTE_FLAGS_DEC PTE_FLAGS
67 #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
68 (_PAGE_PAT | _PAGE_PWT))
70 #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
72 struct sme_populate_pgd_data {
81 unsigned long vaddr_end;
84 static char sme_cmdline_arg[] __initdata = "mem_encrypt";
85 static char sme_cmdline_on[] __initdata = "on";
86 static char sme_cmdline_off[] __initdata = "off";
88 static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
90 unsigned long pgd_start, pgd_end, pgd_size;
93 pgd_start = ppd->vaddr & PGDIR_MASK;
94 pgd_end = ppd->vaddr_end & PGDIR_MASK;
96 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
98 pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
100 memset(pgd_p, 0, pgd_size);
103 static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
110 pgd = ppd->pgd + pgd_index(ppd->vaddr);
111 if (pgd_none(*pgd)) {
112 p4d = ppd->pgtable_area;
113 memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
114 ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
115 set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
118 p4d = p4d_offset(pgd, ppd->vaddr);
119 if (p4d_none(*p4d)) {
120 pud = ppd->pgtable_area;
121 memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
122 ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
123 set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
126 pud = pud_offset(p4d, ppd->vaddr);
127 if (pud_none(*pud)) {
128 pmd = ppd->pgtable_area;
129 memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
130 ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
131 set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
140 static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
145 pud = sme_prepare_pgd(ppd);
149 pmd = pmd_offset(pud, ppd->vaddr);
153 set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
156 static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
162 pud = sme_prepare_pgd(ppd);
166 pmd = pmd_offset(pud, ppd->vaddr);
167 if (pmd_none(*pmd)) {
168 pte = ppd->pgtable_area;
169 memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
170 ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
171 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
177 pte = pte_offset_map(pmd, ppd->vaddr);
179 set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
182 static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
184 while (ppd->vaddr < ppd->vaddr_end) {
185 sme_populate_pgd_large(ppd);
187 ppd->vaddr += PMD_PAGE_SIZE;
188 ppd->paddr += PMD_PAGE_SIZE;
192 static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
194 while (ppd->vaddr < ppd->vaddr_end) {
195 sme_populate_pgd(ppd);
197 ppd->vaddr += PAGE_SIZE;
198 ppd->paddr += PAGE_SIZE;
202 static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
203 pmdval_t pmd_flags, pteval_t pte_flags)
205 unsigned long vaddr_end;
207 ppd->pmd_flags = pmd_flags;
208 ppd->pte_flags = pte_flags;
210 /* Save original end value since we modify the struct value */
211 vaddr_end = ppd->vaddr_end;
213 /* If start is not 2MB aligned, create PTE entries */
214 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
215 __sme_map_range_pte(ppd);
217 /* Create PMD entries */
218 ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
219 __sme_map_range_pmd(ppd);
221 /* If end is not 2MB aligned, create PTE entries */
222 ppd->vaddr_end = vaddr_end;
223 __sme_map_range_pte(ppd);
226 static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
228 __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
231 static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
233 __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
236 static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
238 __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
241 static unsigned long __init sme_pgtable_calc(unsigned long len)
243 unsigned long entries = 0, tables = 0;
246 * Perform a relatively simplistic calculation of the pagetable
247 * entries that are needed. Those mappings will be covered mostly
248 * by 2MB PMD entries so we can conservatively calculate the required
249 * number of P4D, PUD and PMD structures needed to perform the
250 * mappings. For mappings that are not 2MB aligned, PTE mappings
251 * would be needed for the start and end portion of the address range
252 * that fall outside of the 2MB alignment. This results in, at most,
253 * two extra pages to hold PTE entries for each range that is mapped.
254 * Incrementing the count for each covers the case where the addresses
258 /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
259 if (PTRS_PER_P4D > 1)
260 entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
261 entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
262 entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
263 entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
266 * Now calculate the added pagetable structures needed to populate
267 * the new pagetables.
270 if (PTRS_PER_P4D > 1)
271 tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
272 tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
273 tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
275 return entries + tables;
278 void __init sme_encrypt_kernel(struct boot_params *bp)
280 unsigned long workarea_start, workarea_end, workarea_len;
281 unsigned long execute_start, execute_end, execute_len;
282 unsigned long kernel_start, kernel_end, kernel_len;
283 unsigned long initrd_start, initrd_end, initrd_len;
284 struct sme_populate_pgd_data ppd;
285 unsigned long pgtable_area_len;
286 unsigned long decrypted_base;
292 * Prepare for encrypting the kernel and initrd by building new
293 * pagetables with the necessary attributes needed to encrypt the
296 * One range of virtual addresses will map the memory occupied
297 * by the kernel and initrd as encrypted.
299 * Another range of virtual addresses will map the memory occupied
300 * by the kernel and initrd as decrypted and write-protected.
302 * The use of write-protect attribute will prevent any of the
303 * memory from being cached.
306 /* Physical addresses gives us the identity mapped virtual addresses */
307 kernel_start = __pa_symbol(_text);
308 kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
309 kernel_len = kernel_end - kernel_start;
314 #ifdef CONFIG_BLK_DEV_INITRD
315 initrd_len = (unsigned long)bp->hdr.ramdisk_size |
316 ((unsigned long)bp->ext_ramdisk_size << 32);
318 initrd_start = (unsigned long)bp->hdr.ramdisk_image |
319 ((unsigned long)bp->ext_ramdisk_image << 32);
320 initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
321 initrd_len = initrd_end - initrd_start;
325 /* Set the encryption workarea to be immediately after the kernel */
326 workarea_start = kernel_end;
329 * Calculate required number of workarea bytes needed:
330 * executable encryption area size:
331 * stack page (PAGE_SIZE)
332 * encryption routine page (PAGE_SIZE)
333 * intermediate copy buffer (PMD_PAGE_SIZE)
334 * pagetable structures for the encryption of the kernel
335 * pagetable structures for workarea (in case not currently mapped)
337 execute_start = workarea_start;
338 execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
339 execute_len = execute_end - execute_start;
342 * One PGD for both encrypted and decrypted mappings and a set of
343 * PUDs and PMDs for each of the encrypted and decrypted mappings.
345 pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
346 pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
348 pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
350 /* PUDs and PMDs needed in the current pagetables for the workarea */
351 pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
354 * The total workarea includes the executable encryption area and
355 * the pagetable area. The start of the workarea is already 2MB
356 * aligned, align the end of the workarea on a 2MB boundary so that
357 * we don't try to create/allocate PTE entries from the workarea
358 * before it is mapped.
360 workarea_len = execute_len + pgtable_area_len;
361 workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
364 * Set the address to the start of where newly created pagetable
365 * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
366 * structures are created when the workarea is added to the current
367 * pagetables and when the new encrypted and decrypted kernel
368 * mappings are populated.
370 ppd.pgtable_area = (void *)execute_end;
373 * Make sure the current pagetable structure has entries for
374 * addressing the workarea.
376 ppd.pgd = (pgd_t *)native_read_cr3_pa();
377 ppd.paddr = workarea_start;
378 ppd.vaddr = workarea_start;
379 ppd.vaddr_end = workarea_end;
380 sme_map_range_decrypted(&ppd);
382 /* Flush the TLB - no globals so cr3 is enough */
383 native_write_cr3(__native_read_cr3());
386 * A new pagetable structure is being built to allow for the kernel
387 * and initrd to be encrypted. It starts with an empty PGD that will
388 * then be populated with new PUDs and PMDs as the encrypted and
389 * decrypted kernel mappings are created.
391 ppd.pgd = ppd.pgtable_area;
392 memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
393 ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
396 * A different PGD index/entry must be used to get different
397 * pagetable entries for the decrypted mapping. Choose the next
398 * PGD index and convert it to a virtual address to be used as
399 * the base of the mapping.
401 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
403 unsigned long check_base;
405 check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
406 decrypted_base = max(decrypted_base, check_base);
408 decrypted_base <<= PGDIR_SHIFT;
410 /* Add encrypted kernel (identity) mappings */
411 ppd.paddr = kernel_start;
412 ppd.vaddr = kernel_start;
413 ppd.vaddr_end = kernel_end;
414 sme_map_range_encrypted(&ppd);
416 /* Add decrypted, write-protected kernel (non-identity) mappings */
417 ppd.paddr = kernel_start;
418 ppd.vaddr = kernel_start + decrypted_base;
419 ppd.vaddr_end = kernel_end + decrypted_base;
420 sme_map_range_decrypted_wp(&ppd);
423 /* Add encrypted initrd (identity) mappings */
424 ppd.paddr = initrd_start;
425 ppd.vaddr = initrd_start;
426 ppd.vaddr_end = initrd_end;
427 sme_map_range_encrypted(&ppd);
429 * Add decrypted, write-protected initrd (non-identity) mappings
431 ppd.paddr = initrd_start;
432 ppd.vaddr = initrd_start + decrypted_base;
433 ppd.vaddr_end = initrd_end + decrypted_base;
434 sme_map_range_decrypted_wp(&ppd);
437 /* Add decrypted workarea mappings to both kernel mappings */
438 ppd.paddr = workarea_start;
439 ppd.vaddr = workarea_start;
440 ppd.vaddr_end = workarea_end;
441 sme_map_range_decrypted(&ppd);
443 ppd.paddr = workarea_start;
444 ppd.vaddr = workarea_start + decrypted_base;
445 ppd.vaddr_end = workarea_end + decrypted_base;
446 sme_map_range_decrypted(&ppd);
448 /* Perform the encryption */
449 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
450 kernel_len, workarea_start, (unsigned long)ppd.pgd);
453 sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
454 initrd_len, workarea_start,
455 (unsigned long)ppd.pgd);
458 * At this point we are running encrypted. Remove the mappings for
459 * the decrypted areas - all that is needed for this is to remove
460 * the PGD entry/entries.
462 ppd.vaddr = kernel_start + decrypted_base;
463 ppd.vaddr_end = kernel_end + decrypted_base;
467 ppd.vaddr = initrd_start + decrypted_base;
468 ppd.vaddr_end = initrd_end + decrypted_base;
472 ppd.vaddr = workarea_start + decrypted_base;
473 ppd.vaddr_end = workarea_end + decrypted_base;
476 /* Flush the TLB - no globals so cr3 is enough */
477 native_write_cr3(__native_read_cr3());
480 void __init sme_enable(struct boot_params *bp)
482 const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
483 unsigned int eax, ebx, ecx, edx;
484 unsigned long feature_mask;
485 bool active_by_default;
486 unsigned long me_mask;
490 /* Check for the SME/SEV support leaf */
493 native_cpuid(&eax, &ebx, &ecx, &edx);
494 if (eax < 0x8000001f)
497 #define AMD_SME_BIT BIT(0)
498 #define AMD_SEV_BIT BIT(1)
500 * Set the feature mask (SME or SEV) based on whether we are
501 * running under a hypervisor.
505 native_cpuid(&eax, &ebx, &ecx, &edx);
506 feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
509 * Check for the SME/SEV feature:
510 * CPUID Fn8000_001F[EAX]
511 * - Bit 0 - Secure Memory Encryption support
512 * - Bit 1 - Secure Encrypted Virtualization support
513 * CPUID Fn8000_001F[EBX]
514 * - Bits 5:0 - Pagetable bit position used to indicate encryption
518 native_cpuid(&eax, &ebx, &ecx, &edx);
519 if (!(eax & feature_mask))
522 me_mask = 1UL << (ebx & 0x3f);
524 /* Check if memory encryption is enabled */
525 if (feature_mask == AMD_SME_BIT) {
526 /* For SME, check the SYSCFG MSR */
527 msr = __rdmsr(MSR_K8_SYSCFG);
528 if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
531 /* For SEV, check the SEV MSR */
532 msr = __rdmsr(MSR_AMD64_SEV);
533 if (!(msr & MSR_AMD64_SEV_ENABLED))
536 /* SEV state cannot be controlled by a command line option */
537 sme_me_mask = me_mask;
539 physical_mask &= ~sme_me_mask;
544 * Fixups have not been applied to phys_base yet and we're running
545 * identity mapped, so we must obtain the address to the SME command
546 * line argument data using rip-relative addressing.
548 asm ("lea sme_cmdline_arg(%%rip), %0"
550 : "p" (sme_cmdline_arg));
551 asm ("lea sme_cmdline_on(%%rip), %0"
553 : "p" (sme_cmdline_on));
554 asm ("lea sme_cmdline_off(%%rip), %0"
556 : "p" (sme_cmdline_off));
558 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
559 active_by_default = true;
561 active_by_default = false;
563 cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
564 ((u64)bp->ext_cmd_line_ptr << 32));
566 if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0)
569 if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
570 sme_me_mask = me_mask;
571 else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
574 sme_me_mask = active_by_default ? me_mask : 0;
576 physical_mask &= ~sme_me_mask;