Commit d2285493 authored by Jarkko Sakkinen's avatar Jarkko Sakkinen Committed by Borislav Petkov
Browse files

x86/sgx: Add SGX page allocator functions



Add functions for runtime allocation and free.

This allocator and its algorithms are as simple as it gets.  They do a
linear search across all EPC sections and find the first free page.  They
are not NUMA-aware and only hand out individual pages.  The SGX hardware
does not support large pages, so something more complicated like a buddy
allocator is unwarranted.

The free function (sgx_free_epc_page()) implicitly calls ENCLS[EREMOVE],
which returns the page to the uninitialized state.  This ensures that the
page is ready for use at the next allocation.

Co-developed-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarJarkko Sakkinen <jarkko@kernel.org>
Signed-off-by: default avatarBorislav Petkov <bp@suse.de>
Acked-by: default avatarJethro Beekman <jethro@fortanix.com>
Link: https://lkml.kernel.org/r/20201112220135.165028-10-jarkko@kernel.org
parent 38853a30
Loading
Loading
Loading
Loading
+65 −0
Original line number Original line Diff line number Diff line
@@ -85,6 +85,71 @@ static bool __init sgx_page_reclaimer_init(void)
	return true;
	return true;
}
}


static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section)
{
	struct sgx_epc_page *page;

	spin_lock(&section->lock);

	if (list_empty(&section->page_list)) {
		spin_unlock(&section->lock);
		return NULL;
	}

	page = list_first_entry(&section->page_list, struct sgx_epc_page, list);
	list_del_init(&page->list);

	spin_unlock(&section->lock);
	return page;
}

/**
 * __sgx_alloc_epc_page() - Allocate an EPC page
 *
 * Iterate through EPC sections and borrow a free EPC page to the caller. When a
 * page is no longer needed it must be released with sgx_free_epc_page().
 *
 * Return:
 *   an EPC page,
 *   -errno on error
 */
struct sgx_epc_page *__sgx_alloc_epc_page(void)
{
	struct sgx_epc_section *section;
	struct sgx_epc_page *page;
	int i;

	for (i = 0; i < sgx_nr_epc_sections; i++) {
		section = &sgx_epc_sections[i];

		page = __sgx_alloc_epc_page_from_section(section);
		if (page)
			return page;
	}

	return ERR_PTR(-ENOMEM);
}

/**
 * sgx_free_epc_page() - Free an EPC page
 * @page:	an EPC page
 *
 * Call EREMOVE for an EPC page and insert it back to the list of free pages.
 */
void sgx_free_epc_page(struct sgx_epc_page *page)
{
	struct sgx_epc_section *section = &sgx_epc_sections[page->section];
	int ret;

	ret = __eremove(sgx_get_epc_virt_addr(page));
	if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
		return;

	spin_lock(&section->lock);
	list_add_tail(&page->list, &section->page_list);
	spin_unlock(&section->lock);
}

static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
					 unsigned long index,
					 unsigned long index,
					 struct sgx_epc_section *section)
					 struct sgx_epc_section *section)
+3 −0
Original line number Original line Diff line number Diff line
@@ -57,4 +57,7 @@ static inline void *sgx_get_epc_virt_addr(struct sgx_epc_page *page)
	return section->virt_addr + index * PAGE_SIZE;
	return section->virt_addr + index * PAGE_SIZE;
}
}


struct sgx_epc_page *__sgx_alloc_epc_page(void);
void sgx_free_epc_page(struct sgx_epc_page *page);

#endif /* _X86_SGX_H */
#endif /* _X86_SGX_H */