Skip to content

Commit d8aa7ee

Browse files
tlendackyKAGA-KOKO
authored andcommitted
x86/mm: Add Secure Encrypted Virtualization (SEV) support
Provide support for Secure Encrypted Virtualization (SEV). This initial support defines a flag that is used by the kernel to determine if it is running with SEV active. Signed-off-by: Tom Lendacky <[email protected]> Signed-off-by: Brijesh Singh <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Borislav Petkov <[email protected]> Tested-by: Borislav Petkov <[email protected]> Cc: [email protected] Cc: Borislav Petkov <[email protected]> Cc: Andy Lutomirski <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 33e63ac commit d8aa7ee

File tree

3 files changed

+37
-2
lines changed

3 files changed

+37
-2
lines changed

arch/x86/include/asm/mem_encrypt.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,9 @@ void __init mem_encrypt_init(void);
4747

4848
void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
4949

50+
bool sme_active(void);
51+
bool sev_active(void);
52+
5053
#else /* !CONFIG_AMD_MEM_ENCRYPT */
5154

5255
#define sme_me_mask 0ULL
@@ -64,6 +67,9 @@ static inline void __init sme_early_init(void) { }
6467
static inline void __init sme_encrypt_kernel(void) { }
6568
static inline void __init sme_enable(struct boot_params *bp) { }
6669

70+
static inline bool sme_active(void) { return false; }
71+
static inline bool sev_active(void) { return false; }
72+
6773
#endif /* CONFIG_AMD_MEM_ENCRYPT */
6874

6975
/*

arch/x86/mm/mem_encrypt.c

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,8 @@ static char sme_cmdline_off[] __initdata = "off";
4242
u64 sme_me_mask __section(.data) = 0;
4343
EXPORT_SYMBOL_GPL(sme_me_mask);
4444

45+
static bool sev_enabled __section(.data);
46+
4547
/* Buffer used for early in-place encryption by BSP, no locking needed */
4648
static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
4749

@@ -192,6 +194,30 @@ void __init sme_early_init(void)
192194
protection_map[i] = pgprot_encrypted(protection_map[i]);
193195
}
194196

197+
/*
198+
* SME and SEV are very similar but they are not the same, so there are
199+
* times that the kernel will need to distinguish between SME and SEV. The
200+
* sme_active() and sev_active() functions are used for this. When a
201+
* distinction isn't needed, the mem_encrypt_active() function can be used.
202+
*
203+
* The trampoline code is a good example for this requirement. Before
204+
* paging is activated, SME will access all memory as decrypted, but SEV
205+
* will access all memory as encrypted. So, when APs are being brought
206+
* up under SME the trampoline area cannot be encrypted, whereas under SEV
207+
* the trampoline area must be encrypted.
208+
*/
209+
bool sme_active(void)
210+
{
211+
return sme_me_mask && !sev_enabled;
212+
}
213+
EXPORT_SYMBOL_GPL(sme_active);
214+
215+
bool sev_active(void)
216+
{
217+
return sme_me_mask && sev_enabled;
218+
}
219+
EXPORT_SYMBOL_GPL(sev_active);
220+
195221
/* Architecture __weak replacement functions */
196222
void __init mem_encrypt_init(void)
197223
{

include/linux/mem_encrypt.h

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,14 @@
2323

2424
#define sme_me_mask 0ULL
2525

26+
static inline bool sme_active(void) { return false; }
27+
static inline bool sev_active(void) { return false; }
28+
2629
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
2730

28-
static inline bool sme_active(void)
31+
static inline bool mem_encrypt_active(void)
2932
{
30-
return !!sme_me_mask;
33+
return sme_me_mask;
3134
}
3235

3336
static inline u64 sme_get_me_mask(void)

0 commit comments

Comments
 (0)