Commit 0c4f2623 authored by Vasily Gorbik's avatar Vasily Gorbik
Browse files

s390: setup kernel memory layout early



Currently there are two separate places where kernel memory layout has
to be known and adjusted:
1. early kasan setup.
2. paging setup later.

Those 2 places had to be kept in sync and adjusted to reflect peculiar
technical details of one another. With additional factors which influence
kernel memory layout like ultravisor secure storage limit, complexity
of keeping two things in sync grew up even more.

Besides that if we look forward towards creating identity mapping and
enabling DAT before jumping into uncompressed kernel - that would also
require full knowledge of and control over kernel memory layout.

So, de-duplicate and move kernel memory layout setup logic into
the decompressor.

Reviewed-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent b5415c8f
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -24,6 +24,7 @@ void __printf(1, 2) decompressor_printk(const char *fmt, ...);

extern const char kernel_version[];
extern unsigned long memory_limit;
extern unsigned long vmalloc_size;
extern int vmalloc_size_set;
extern int kaslr_enabled;

+4 −4
Original line number Diff line number Diff line
@@ -12,13 +12,13 @@
#include "boot.h"

char __bootdata(early_command_line)[COMMAND_LINE_SIZE];
int __bootdata(noexec_disabled);

unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
struct ipl_parameter_block __bootdata_preserved(ipl_block);
int __bootdata_preserved(ipl_block_valid);
unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;

unsigned long __bootdata(vmalloc_size) = VMALLOC_DEFAULT_SIZE;
int __bootdata(noexec_disabled);

unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
unsigned long memory_limit;
int vmalloc_size_set;
int kaslr_enabled;
+88 −0
Original line number Diff line number Diff line
@@ -5,6 +5,7 @@
#include <asm/sections.h>
#include <asm/cpu_mf.h>
#include <asm/setup.h>
#include <asm/kasan.h>
#include <asm/kexec.h>
#include <asm/sclp.h>
#include <asm/diag.h>
@@ -15,6 +16,12 @@
extern char __boot_data_start[], __boot_data_end[];
extern char __boot_data_preserved_start[], __boot_data_preserved_end[];
unsigned long __bootdata_preserved(__kaslr_offset);
unsigned long __bootdata_preserved(VMALLOC_START);
unsigned long __bootdata_preserved(VMALLOC_END);
struct page *__bootdata_preserved(vmemmap);
unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size);

u64 __bootdata_preserved(stfle_fac_list[16]);
@@ -172,6 +179,86 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
#endif
}

static void setup_kernel_memory_layout(void)
{
	bool vmalloc_size_verified = false;
	unsigned long vmemmap_off;
	unsigned long vspace_left;
	unsigned long rte_size;
	unsigned long pages;
	unsigned long vmax;

	pages = ident_map_size / PAGE_SIZE;
	/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
	vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);

	/* choose kernel address space layout: 4 or 3 levels. */
	vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
	if (IS_ENABLED(CONFIG_KASAN) ||
	    vmalloc_size > _REGION2_SIZE ||
	    vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
		vmax = _REGION1_SIZE;
	else
		vmax = _REGION2_SIZE;

	/* keep vmemmap_off aligned to a top level region table entry */
	rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
	MODULES_END = vmax;
	if (is_prot_virt_host()) {
		/*
		 * forcing modules and vmalloc area under the ultravisor
		 * secure storage limit, so that any vmalloc allocation
		 * we do could be used to back secure guest storage.
		 */
		adjust_to_uv_max(&MODULES_END);
	}

#ifdef CONFIG_KASAN
	if (MODULES_END < vmax) {
		/* force vmalloc and modules below kasan shadow */
		MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
	} else {
		/*
		 * leave vmalloc and modules above kasan shadow but make
		 * sure they don't overlap with it
		 */
		vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
		vmalloc_size_verified = true;
		vspace_left = KASAN_SHADOW_START;
	}
#endif
	MODULES_VADDR = MODULES_END - MODULES_LEN;
	VMALLOC_END = MODULES_VADDR;

	if (vmalloc_size_verified) {
		VMALLOC_START = VMALLOC_END - vmalloc_size;
	} else {
		vmemmap_off = round_up(ident_map_size, rte_size);

		if (vmemmap_off + vmemmap_size > VMALLOC_END ||
		    vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
			/*
			 * allow vmalloc area to occupy up to 1/2 of
			 * the rest virtual space left.
			 */
			vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
		}
		VMALLOC_START = VMALLOC_END - vmalloc_size;
		vspace_left = VMALLOC_START;
	}

	pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
	pages = SECTION_ALIGN_UP(pages);
	vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
	/* keep vmemmap left most starting from a fresh region table entry */
	vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
	/* take care that identity map is lower then vmemmap */
	ident_map_size = min(ident_map_size, vmemmap_off);
	vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
	VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
	vmemmap = (struct page *)vmemmap_off;
}

/*
 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
 */
@@ -211,6 +298,7 @@ void startup_kernel(void)
	parse_boot_command_line();
	setup_ident_map_size(detect_memory());
	setup_vmalloc_size();
	setup_kernel_memory_layout();

	random_lma = __kaslr_offset = 0;
	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
+25 −0
Original line number Diff line number Diff line
@@ -44,3 +44,28 @@ void uv_query_info(void)
		prot_virt_guest = 1;
#endif
}

#if IS_ENABLED(CONFIG_KVM)
static bool has_uv_sec_stor_limit(void)
{
	/*
	 * keep these conditions in line with setup_uv()
	 */
	if (!is_prot_virt_host())
		return false;

	if (is_prot_virt_guest())
		return false;

	if (!test_facility(158))
		return false;

	return !!uv_info.max_sec_stor_addr;
}

void adjust_to_uv_max(unsigned long *vmax)
{
	if (has_uv_sec_stor_limit())
		*vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
}
#endif
+0 −1
Original line number Diff line number Diff line
@@ -16,7 +16,6 @@
extern void kasan_early_init(void);
extern void kasan_copy_shadow_mapping(void);
extern void kasan_free_early_identity(void);
extern unsigned long kasan_vmax;

/*
 * Estimate kasan memory requirements, which it will reserve
Loading