Commit 5cd48014 authored by Xin Jiang's avatar Xin Jiang Committed by hanliyang
Browse files

x86: Update memory shared/private attribute in early boot for CSV3 guest

hygon inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IAYGKY


CVE: NA

---------------------------

Add functions to change the memory shared/private attribute in
early boot code.

When CSV3 is active, the decrypted memory must be mapped to normal
(non-isolated) memory in nested page table so that hypervisor and
guest can access shared data.

But in-place encrypt/decrypt action on the memory is not applicable
in CSV3 as CSV3 guest's private page will not be changed to shared
page until the secure processor update NPT.

Also new secure call pages should be initialized for per cpu to
support multiple cpu secure call commands simultaneously.

Signed-off-by: default avatarXin Jiang <jiangxin@hygon.cn>
Signed-off-by: default avatarhanliyang <hanliyang@hygon.cn>
parent f90a5770
Loading
Loading
Loading
Loading
+5 −0
Original line number Diff line number Diff line
@@ -63,6 +63,8 @@ void __init csv_early_reset_memory(struct boot_params *bp);
void __init csv_early_update_memory_enc(u64 vaddr, u64 pages);
void __init csv_early_update_memory_dec(u64 vaddr, u64 pages);

void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc);

#else	/* !CONFIG_HYGON_CSV */

static inline bool csv3_active(void) { return false; }
@@ -71,6 +73,9 @@ static inline void __init csv_early_reset_memory(struct boot_params *bp) { }
static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { }
static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { }

static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size,
						   bool enc) { }

#endif	/* CONFIG_HYGON_CSV */

#endif	/* __ASSEMBLY__ */
+186 −0
Original line number Diff line number Diff line
@@ -19,6 +19,15 @@ struct secure_call_pages {
	struct csv3_secure_call_cmd page_b;
};

static u32 csv3_percpu_secure_call_init __initdata;
static u32 early_secure_call_page_idx __initdata;

static DEFINE_PER_CPU(struct secure_call_pages*, secure_call_data);
static DEFINE_PER_CPU(int, secure_call_page_idx);

typedef void (*csv3_secure_call_func)(u64 base_address, u64 num_pages,
				      enum csv3_secure_command_type cmd_type);

void __init csv_early_reset_memory(struct boot_params *bp)
{
	if (!csv3_active())
@@ -47,3 +56,180 @@ void __init csv_early_update_memory_enc(u64 vaddr, u64 pages)
		csv3_early_secure_call_ident_map(__pa(vaddr), pages,
						 CSV3_SECURE_CMD_ENC);
}

static void __init csv3_alloc_secure_call_data(int cpu)
{
	struct secure_call_pages *data;

	data = memblock_alloc(sizeof(*data), PAGE_SIZE);
	if (!data)
		panic("Can't allocate CSV3 secure all data");

	per_cpu(secure_call_data, cpu) = data;
}

static void __init csv3_secure_call_update_table(void)
{
	int cpu;
	struct secure_call_pages *data;
	struct csv3_secure_call_cmd *page_rd;
	struct csv3_secure_call_cmd *page_wr;
	u32 cmd_ack;

	if (!csv3_active())
		return;

	page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, PAGE_SIZE);
	page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, PAGE_SIZE);

	while (1) {
		page_wr->cmd_type = CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE;
		page_wr->nums = 0;

		/* initialize per-cpu secure call pages */
		for_each_possible_cpu(cpu) {
			if (cpu >= SECURE_CALL_ENTRY_MAX)
				panic("csv does not support cpus > %d\n",
				      SECURE_CALL_ENTRY_MAX);
			csv3_alloc_secure_call_data(cpu);
			data = per_cpu(secure_call_data, cpu);
			per_cpu(secure_call_page_idx, cpu) = 0;
			page_wr->entry[cpu].base_address = __pa(data);
			page_wr->entry[cpu].size = PAGE_SIZE * 2;
			page_wr->nums++;
		}

		/*
		 * Write command in page_wr must be done before retrieve cmd
		 * ack from page_rd, and it is ensured by the mb below.
		 */
		mb();

		cmd_ack = page_rd->cmd_type;
		if (cmd_ack != CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE)
			break;
	}

	early_memunmap(page_rd, PAGE_SIZE);
	early_memunmap(page_wr, PAGE_SIZE);
}

/**
 * __csv3_early_secure_call - issue secure call command at the stage where new
 *			kernel page table is created and early identity page
 *			table is deprecated .
 * @base_address:	Start address of the specified memory range.
 * @num_pages:		number of the specific pages.
 * @cmd_type:		Secure call cmd type.
 */
static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages,
					    enum csv3_secure_command_type cmd_type)
{
	struct csv3_secure_call_cmd *page_rd;
	struct csv3_secure_call_cmd *page_wr;
	u32 cmd_ack;

	if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul)
		return;

	if (!csv3_percpu_secure_call_init) {
		csv3_secure_call_update_table();
		csv3_percpu_secure_call_init = 1;
	}

	if (early_secure_call_page_idx == 0) {
		page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a,
							   PAGE_SIZE);
		page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b,
							   PAGE_SIZE);
	} else {
		page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_a,
							   PAGE_SIZE);
		page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_b,
							   PAGE_SIZE);
	}

	while (1) {
		page_wr->cmd_type = (u32)cmd_type;
		page_wr->nums = 1;
		page_wr->entry[0].base_address = base_address;
		page_wr->entry[0].size = num_pages << PAGE_SHIFT;

		/*
		 * Write command in page_wr must be done before retrieve cmd
		 * ack from page_rd, and it is ensured by the mb below.
		 */
		mb();

		cmd_ack = page_rd->cmd_type;
		if (cmd_ack != cmd_type)
			break;
	}

	early_memunmap(page_rd, PAGE_SIZE);
	early_memunmap(page_wr, PAGE_SIZE);

	early_secure_call_page_idx ^= 1;
}


static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr,
				  u64 pages, bool enc)
{
	u64 vaddr_end, vaddr_next;
	u64 psize, pmask;
	u64 last_paddr, paddr;
	u64 last_psize = 0;
	pte_t *kpte;
	int level;
	enum csv3_secure_command_type cmd_type;

	cmd_type = enc ? CSV3_SECURE_CMD_ENC : CSV3_SECURE_CMD_DEC;
	vaddr_next = vaddr;
	vaddr_end = vaddr + (pages << PAGE_SHIFT);
	for (; vaddr < vaddr_end; vaddr = vaddr_next) {
		kpte = lookup_address(vaddr, &level);
		if (!kpte || pte_none(*kpte)) {
			panic("invalid pte, vaddr 0x%llx\n", vaddr);
			goto out;
		}

		psize = page_level_size(level);
		pmask = page_level_mask(level);

		vaddr_next = (vaddr & pmask) + psize;
		paddr = ((pte_pfn(*kpte) << PAGE_SHIFT) & pmask) +
			(vaddr & ~pmask);
		psize -= (vaddr & ~pmask);

		if (vaddr_end - vaddr < psize)
			psize = vaddr_end - vaddr;
		if (last_psize == 0 || (last_paddr + last_psize) == paddr) {
			last_paddr = (last_psize == 0 ? paddr : last_paddr);
			last_psize += psize;
		} else {
			secure_call(last_paddr, last_psize >> PAGE_SHIFT,
				    cmd_type);
			last_paddr = paddr;
			last_psize = psize;
		}
	}

	if (last_psize)
		secure_call(last_paddr, last_psize >> PAGE_SHIFT, cmd_type);

out:
	return;
}

void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc)
{
	u64 npages;

	if (!csv3_active())
		return;

	npages = (size + (vaddr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
	__csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK,
			      npages, enc);
}
+14 −0
Original line number Diff line number Diff line
@@ -35,6 +35,7 @@
#include <asm/cmdline.h>
#include <asm/sev.h>
#include <asm/ia32.h>
#include <asm/csv.h>

#include "mm_internal.h"

@@ -377,6 +378,9 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
	 */
	clflush_cache_range(__va(pa), size);

	if (csv3_active())
		goto skip_in_place_enc_dec;

	/* Encrypt/decrypt the contents in-place */
	if (enc) {
		sme_early_encrypt(pa, size);
@@ -390,6 +394,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
		early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1);
	}

skip_in_place_enc_dec:
	/* Change the page encryption mask. */
	new_pte = pfn_pte(pfn, new_prot);
	set_pte_atomic(kpte, new_pte);
@@ -469,6 +474,15 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr,
	early_set_mem_enc_dec_hypercall(start, size, enc);
out:
	__flush_tlb_all();

	/*
	 * On CSV3, the shared and private page attr changes should be managed
	 * by secure processor. Private pages live in isolated memory region,
	 * while shared pages live out of isolated memory region.
	 */
	if (csv3_active())
		csv_early_memory_enc_dec(vaddr_end - size, size, enc);

	return ret;
}