Commit 32cb4d02 authored by Tom Lendacky's avatar Tom Lendacky Committed by Borislav Petkov
Browse files

x86/sme: Replace occurrences of sme_active() with cc_platform_has()



Replace uses of sme_active() with the more generic cc_platform_has()
using CC_ATTR_HOST_MEM_ENCRYPT. If future support is added for other
memory encryption technologies, the use of CC_ATTR_HOST_MEM_ENCRYPT
can be updated, as required.

This also replaces two usages of sev_active() that are really geared
towards detecting if SME is active.

Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/20210928191009.32551-6-bp@alien8.de
parent bfebd37e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -129,7 +129,7 @@ relocate_kernel(unsigned long indirection_page,
		unsigned long page_list,
		unsigned long start_address,
		unsigned int preserve_context,
		unsigned int sme_active);
		unsigned int host_mem_enc_active);
#endif

#define ARCH_HAS_KIMAGE_ARCH
+0 −2
Original line number Diff line number Diff line
@@ -51,7 +51,6 @@ void __init mem_encrypt_free_decrypted_mem(void);
void __init mem_encrypt_init(void);

void __init sev_es_init_vc_handling(void);
bool sme_active(void);
bool sev_active(void);
bool sev_es_active(void);

@@ -76,7 +75,6 @@ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }

static inline void sev_es_init_vc_handling(void) { }
static inline bool sme_active(void) { return false; }
static inline bool sev_active(void) { return false; }
static inline bool sev_es_active(void) { return false; }

+8 −7
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@
#include <linux/suspend.h>
#include <linux/vmalloc.h>
#include <linux/efi.h>
#include <linux/cc_platform.h>

#include <asm/init.h>
#include <asm/tlbflush.h>
@@ -358,7 +359,7 @@ void machine_kexec(struct kimage *image)
				       (unsigned long)page_list,
				       image->start,
				       image->preserve_context,
				       sme_active());
				       cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT));

#ifdef CONFIG_KEXEC_JUMP
	if (image->preserve_context)
@@ -569,12 +570,12 @@ void arch_kexec_unprotect_crashkres(void)
 */
int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)
{
	if (sev_active())
	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
		return 0;

	/*
	 * If SME is active we need to be sure that kexec pages are
	 * not encrypted because when we boot to the new kernel the
	 * If host memory encryption is active we need to be sure that kexec
	 * pages are not encrypted because when we boot to the new kernel the
	 * pages won't be accessed encrypted (initially).
	 */
	return set_memory_decrypted((unsigned long)vaddr, pages);
@@ -582,12 +583,12 @@ int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, gfp_t gfp)

void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages)
{
	if (sev_active())
	if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
		return;

	/*
	 * If SME is active we need to reset the pages back to being
	 * an encrypted mapping before freeing them.
	 * If host memory encryption is active we need to reset the pages back
	 * to being an encrypted mapping before freeing them.
	 */
	set_memory_encrypted((unsigned long)vaddr, pages);
}
+4 −5
Original line number Diff line number Diff line
@@ -6,7 +6,7 @@
#include <linux/swiotlb.h>
#include <linux/memblock.h>
#include <linux/dma-direct.h>
#include <linux/mem_encrypt.h>
#include <linux/cc_platform.h>

#include <asm/iommu.h>
#include <asm/swiotlb.h>
@@ -45,11 +45,10 @@ int __init pci_swiotlb_detect_4gb(void)
		swiotlb = 1;

	/*
	 * If SME is active then swiotlb will be set to 1 so that bounce
	 * buffers are allocated and used for devices that do not support
	 * the addressing range required for the encryption mask.
	 * Set swiotlb to 1 so that bounce buffers are allocated and used for
	 * devices that can't support DMA to encrypted memory.
	 */
	if (sme_active())
	if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
		swiotlb = 1;

	return swiotlb;
+1 −1
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
	 * %rsi page_list
	 * %rdx start address
	 * %rcx preserve_context
	 * %r8  sme_active
	 * %r8  host_mem_enc_active
	 */

	/* Save the CPU context, used for jumping back */
Loading