Unverified Commit e4f33a9c authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!594 Intel: SGX incremental backporting patches until upstream 6.3

Merge Pull Request from: @zhiquan1-li 
 
This PR includes incremental backporting patches which mainly covers some SGX bugfix until upstream v6.3.
The total patch number is 9.

**Intel-kernel issue:** 
https://gitee.com/openeuler/intel-kernel/issues/I6X1FF

**Test:** 

1. Build successfully for each commits
2. Kernel selftest - SGX: PASSED
```sh
cd tools/testing/selftests/sgx/
make
./test_sgx
```
3. SGX internal stress test: No new failure

**Known issue:** 
None

**Default config change:** 
None 
 
Link:https://gitee.com/openeuler/kernel/pulls/594

 

Reviewed-by: default avatarJason Zeng <jason.zeng@intel.com>
Reviewed-by: default avatarAichun Shi <aichun.shi@intel.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 6dc4499a ec8db8ea
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -322,6 +322,7 @@
#define X86_FEATURE_UNRET              (11*32+15) /* "" AMD BTB untrain return */
#define X86_FEATURE_USE_IBPB_FW	       (11*32+16) /* "" Use IBPB during runtime firmware calls */
#define X86_FEATURE_RSB_VMEXIT_LITE		(11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
#define X86_FEATURE_SGX_EDECCSSA	(11*32+18) /* "" SGX EDECCSSA user leaf function */

/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI		(12*32+ 4) /* AVX VNNI instructions */
+26 −7
Original line number Diff line number Diff line
@@ -97,17 +97,36 @@ enum sgx_miscselect {
 * %SGX_ATTR_EINITTOKENKEY:	Allow to use token signing key that is used to
 *				sign cryptographic tokens that can be passed to
 *				EINIT as an authorization to run an enclave.
 * %SGX_ATTR_ASYNC_EXIT_NOTIFY:	Allow enclaves to be notified after an
 *				asynchronous exit has occurred.
 */
enum sgx_attribute {
	SGX_ATTR_INIT		   = BIT(0),
	SGX_ATTR_DEBUG		   = BIT(1),
	SGX_ATTR_MODE64BIT	   = BIT(2),
				  /* BIT(3) is reserved */
	SGX_ATTR_PROVISIONKEY	   = BIT(4),
	SGX_ATTR_EINITTOKENKEY	   = BIT(5),
				  /* BIT(6) is for CET */
	SGX_ATTR_KSS		   = BIT(7),
				  /* BIT(8) is reserved */
				  /* BIT(9) is reserved */
	SGX_ATTR_ASYNC_EXIT_NOTIFY = BIT(10),
};

#define SGX_ATTR_RESERVED_MASK	(BIT_ULL(3) | BIT_ULL(6) | GENMASK_ULL(63, 8))
#define SGX_ATTR_RESERVED_MASK	(BIT_ULL(3) | \
				 BIT_ULL(6) | \
				 BIT_ULL(8) | \
				 BIT_ULL(9) | \
				 GENMASK_ULL(63, 11))

#define SGX_ATTR_UNPRIV_MASK	(SGX_ATTR_DEBUG	    | \
				 SGX_ATTR_MODE64BIT | \
				 SGX_ATTR_KSS	    | \
				 SGX_ATTR_ASYNC_EXIT_NOTIFY)

#define SGX_ATTR_PRIV_MASK	(SGX_ATTR_PROVISIONKEY	| \
				 SGX_ATTR_EINITTOKENKEY)

/**
 * struct sgx_secs - SGX Enclave Control Structure (SECS)
+1 −0
Original line number Diff line number Diff line
@@ -75,6 +75,7 @@ static const struct cpuid_dep cpuid_deps[] = {
	{ X86_FEATURE_SGX_LC,			X86_FEATURE_SGX	      },
	{ X86_FEATURE_SGX1,			X86_FEATURE_SGX       },
	{ X86_FEATURE_SGX2,			X86_FEATURE_SGX1      },
	{ X86_FEATURE_SGX_EDECCSSA,		X86_FEATURE_SGX1      },
	{ X86_FEATURE_XFD,                      X86_FEATURE_XSAVES    },
	{ X86_FEATURE_XFD,                      X86_FEATURE_XGETBV1   },
	{ X86_FEATURE_AMX_TILE,                 X86_FEATURE_XFD       },
+1 −0
Original line number Diff line number Diff line
@@ -39,6 +39,7 @@ static const struct cpuid_bit cpuid_bits[] = {
	{ X86_FEATURE_PER_THREAD_MBA,	CPUID_ECX,  0, 0x00000010, 3 },
	{ X86_FEATURE_SGX1,		CPUID_EAX,  0, 0x00000012, 0 },
	{ X86_FEATURE_SGX2,		CPUID_EAX,  1, 0x00000012, 0 },
	{ X86_FEATURE_SGX_EDECCSSA,	CPUID_EAX, 11, 0x00000012, 0 },
	{ X86_FEATURE_HW_PSTATE,	CPUID_EDX,  7, 0x80000007, 0 },
	{ X86_FEATURE_CPB,		CPUID_EDX,  9, 0x80000007, 0 },
	{ X86_FEATURE_PROC_FEEDBACK,    CPUID_EDX, 11, 0x80000007, 0 },
+37 −12
Original line number Diff line number Diff line
@@ -12,6 +12,9 @@
#include "encls.h"
#include "sgx.h"

static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
			    struct sgx_backing *backing);

#define PCMDS_PER_PAGE (PAGE_SIZE / sizeof(struct sgx_pcmd))
/*
 * 32 PCMD entries share a PCMD page. PCMD_FIRST_MASK is used to
@@ -344,8 +347,11 @@ static vm_fault_t sgx_encl_eaug_page(struct vm_area_struct *vma,
	}

	va_page = sgx_encl_grow(encl, false);
	if (IS_ERR(va_page))
	if (IS_ERR(va_page)) {
		if (PTR_ERR(va_page) == -EBUSY)
			vmret = VM_FAULT_NOPAGE;
		goto err_out_epc;
	}

	if (va_page)
		list_add(&va_page->list, &encl->va_pages);
@@ -674,11 +680,15 @@ const struct vm_operations_struct sgx_vm_ops = {
void sgx_encl_release(struct kref *ref)
{
	struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
	unsigned long max_page_index = PFN_DOWN(encl->base + encl->size - 1);
	struct sgx_va_page *va_page;
	struct sgx_encl_page *entry;
	unsigned long index;
	unsigned long count = 0;

	xa_for_each(&encl->page_array, index, entry) {
	XA_STATE(xas, &encl->page_array, PFN_DOWN(encl->base));

	xas_lock(&xas);
	xas_for_each(&xas, entry, max_page_index) {
		if (entry->epc_page) {
			/*
			 * The page and its radix tree entry cannot be freed
@@ -693,9 +703,20 @@ void sgx_encl_release(struct kref *ref)
		}

		kfree(entry);
		/* Invoke scheduler to prevent soft lockups. */
		/*
		 * Invoke scheduler on every XA_CHECK_SCHED iteration
		 * to prevent soft lockups.
		 */
		if (!(++count % XA_CHECK_SCHED)) {
			xas_pause(&xas);
			xas_unlock(&xas);

			cond_resched();

			xas_lock(&xas);
		}
	}
	xas_unlock(&xas);

	xa_destroy(&encl->page_array);

@@ -914,7 +935,7 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
}

/**
 * sgx_encl_get_backing() - Pin the backing storage
 * __sgx_encl_get_backing() - Pin the backing storage
 * @encl:	an enclave pointer
 * @page_index:	enclave page index
 * @backing:	data for accessing backing storage for the page
@@ -926,7 +947,7 @@ static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl,
 *   0 on success,
 *   -errno otherwise.
 */
static int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
static int __sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
			 struct sgx_backing *backing)
{
	pgoff_t page_pcmd_off = sgx_encl_get_backing_page_pcmd_offset(encl, page_index);
@@ -1001,7 +1022,7 @@ static struct mem_cgroup *sgx_encl_get_mem_cgroup(struct sgx_encl *encl)
}

/**
 * sgx_encl_alloc_backing() - allocate a new backing storage page
 * sgx_encl_alloc_backing() - create a new backing storage page
 * @encl:	an enclave pointer
 * @page_index:	enclave page index
 * @backing:	data for accessing backing storage for the page
@@ -1009,7 +1030,9 @@ static struct mem_cgroup *sgx_encl_get_mem_cgroup(struct sgx_encl *encl)
 * When called from ksgxd, sets the active memcg from one of the
 * mms in the enclave's mm_list prior to any backing page allocation,
 * in order to ensure that shmem page allocations are charged to the
 * enclave.
 * enclave.  Create a backing page for loading data back into an EPC page with
 * ELDU.  This function takes a reference on a new backing page which
 * must be dropped with a corresponding call to sgx_encl_put_backing().
 *
 * Return:
 *   0 on success,
@@ -1022,7 +1045,7 @@ int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
	struct mem_cgroup *memcg = set_active_memcg(encl_memcg);
	int ret;

	ret = sgx_encl_get_backing(encl, page_index, backing);
	ret = __sgx_encl_get_backing(encl, page_index, backing);

	set_active_memcg(memcg);
	mem_cgroup_put(encl_memcg);
@@ -1040,15 +1063,17 @@ int sgx_encl_alloc_backing(struct sgx_encl *encl, unsigned long page_index,
 * It is the caller's responsibility to ensure that it is appropriate to use
 * sgx_encl_lookup_backing() rather than sgx_encl_alloc_backing(). If lookup is
 * not used correctly, this will cause an allocation which is not accounted for.
 * This function takes a reference on an existing backing page which must be
 * dropped with a corresponding call to sgx_encl_put_backing().
 *
 * Return:
 *   0 on success,
 *   -errno otherwise.
 */
int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
static int sgx_encl_lookup_backing(struct sgx_encl *encl, unsigned long page_index,
			   struct sgx_backing *backing)
{
	return sgx_encl_get_backing(encl, page_index, backing);
	return __sgx_encl_get_backing(encl, page_index, backing);
}

/**
Loading