Commit 8c37cb7d authored by Vasily Gorbik's avatar Vasily Gorbik Committed by Heiko Carstens
Browse files

s390/boot: rename mem_detect to physmem_info



In preparation to extending mem_detect with additional information like
reserved ranges rename it to more generic physmem_info. This new naming
also help to avoid confusion by using more exact terms like "physmem
online ranges", etc.

Acked-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Reviewed-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent 53fcc7db
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -35,7 +35,7 @@ endif

CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char

obj-y	:= head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o vmem.o
obj-y	:= head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y	+= string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y	+= version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE))	+= uv.o
+1 −1
Original line number Diff line number Diff line
@@ -34,7 +34,7 @@ struct vmlinux_info {

void startup_kernel(void);
unsigned long detect_memory(unsigned long *safe_addr);
void mem_detect_set_usable_limit(unsigned long limit);
void physmem_set_usable_limit(unsigned long limit);
bool is_ipl_block_dump(void);
void store_ipl_parmblock(void);
unsigned long read_ipl_report(unsigned long safe_addr);
+7 −7
Original line number Diff line number Diff line
@@ -3,7 +3,7 @@
 * Copyright IBM Corp. 2019
 */
#include <linux/pgtable.h>
#include <asm/mem_detect.h>
#include <asm/physmem_info.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
@@ -93,7 +93,7 @@ static int get_random(unsigned long limit, unsigned long *value)

/*
 * To randomize kernel base address we have to consider several facts:
 * 1. physical online memory might not be continuous and have holes. mem_detect
 * 1. physical online memory might not be continuous and have holes. physmem
 *    info contains list of online memory ranges we should consider.
 * 2. we have several memory regions which are occupied and we should not
 *    overlap and destroy them. Currently safe_addr tells us the border below
@@ -108,7 +108,7 @@ static int get_random(unsigned long limit, unsigned long *value)
 *    (16 pages when the kernel is built with kasan enabled)
 * Assumptions:
 * 1. kernel size (including .bss size) and upper memory limit are page aligned.
 * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
 * 2. physmem online region start is THREAD_SIZE aligned / end is PAGE_SIZE
 *    aligned (in practice memory configurations granularity on z/VM and LPAR
 *    is 1mb).
 *
@@ -132,7 +132,7 @@ static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
	unsigned long start, end, pos = 0;
	int i;

	for_each_mem_detect_usable_block(i, &start, &end) {
	for_each_physmem_usable_range(i, &start, &end) {
		if (_min >= end)
			continue;
		if (start >= _max)
@@ -153,7 +153,7 @@ static unsigned long position_to_address(unsigned long pos, unsigned long kernel
	unsigned long start, end;
	int i;

	for_each_mem_detect_usable_block(i, &start, &end) {
	for_each_physmem_usable_range(i, &start, &end) {
		if (_min >= end)
			continue;
		if (start >= _max)
@@ -172,8 +172,8 @@ static unsigned long position_to_address(unsigned long pos, unsigned long kernel

unsigned long get_random_base(unsigned long safe_addr)
{
	unsigned long usable_total = get_mem_detect_usable_total();
	unsigned long memory_limit = get_mem_detect_end();
	unsigned long usable_total = get_physmem_usable_total();
	unsigned long memory_limit = get_physmem_usable_end();
	unsigned long base_pos, max_pos, kernel_size;
	int i;

+38 −38
Original line number Diff line number Diff line
@@ -5,44 +5,44 @@
#include <asm/processor.h>
#include <asm/sclp.h>
#include <asm/sections.h>
#include <asm/mem_detect.h>
#include <asm/physmem_info.h>
#include <asm/sparsemem.h>
#include "decompressor.h"
#include "boot.h"

struct mem_detect_info __bootdata(mem_detect);
struct physmem_info __bootdata(physmem_info);

/* up to 256 storage elements, 1020 subincrements each */
#define ENTRIES_EXTENDED_MAX						       \
	(256 * (1020 / 2) * sizeof(struct mem_detect_block))
	(256 * (1020 / 2) * sizeof(struct physmem_range))

static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
static struct physmem_range *__get_physmem_range_ptr(u32 n)
{
	if (n < MEM_INLINED_ENTRIES)
		return &mem_detect.entries[n];
	return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
		return &physmem_info.online[n];
	return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
}

/*
 * sequential calls to add_mem_detect_block with adjacent memory areas
 * are merged together into single memory block.
 * sequential calls to add_physmem_online_range with adjacent memory ranges
 * are merged together into single memory range.
 */
void add_mem_detect_block(u64 start, u64 end)
void add_physmem_online_range(u64 start, u64 end)
{
	struct mem_detect_block *block;
	struct physmem_range *range;

	if (mem_detect.count) {
		block = __get_mem_detect_block_ptr(mem_detect.count - 1);
		if (block->end == start) {
			block->end = end;
	if (physmem_info.range_count) {
		range = __get_physmem_range_ptr(physmem_info.range_count - 1);
		if (range->end == start) {
			range->end = end;
			return;
		}
	}

	block = __get_mem_detect_block_ptr(mem_detect.count);
	block->start = start;
	block->end = end;
	mem_detect.count++;
	range = __get_physmem_range_ptr(physmem_info.range_count);
	range->start = start;
	range->end = end;
	physmem_info.range_count++;
}

static int __diag260(unsigned long rx1, unsigned long rx2)
@@ -95,7 +95,7 @@ static int diag260(void)
		return -1;

	for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
		add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
		add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
	return 0;
}

@@ -148,44 +148,44 @@ unsigned long detect_memory(unsigned long *safe_addr)
	unsigned long max_physmem_end = 0;

	sclp_early_get_memsize(&max_physmem_end);
	mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
	physmem_info.online_extended = (struct physmem_range *)ALIGN(*safe_addr, sizeof(u64));

	if (!sclp_early_read_storage_info()) {
		mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
		physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
	} else if (!diag260()) {
		mem_detect.info_source = MEM_DETECT_DIAG260;
		max_physmem_end = max_physmem_end ?: get_mem_detect_end();
		physmem_info.info_source = MEM_DETECT_DIAG260;
		max_physmem_end = max_physmem_end ?: get_physmem_usable_end();
	} else if (max_physmem_end) {
		add_mem_detect_block(0, max_physmem_end);
		mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
		add_physmem_online_range(0, max_physmem_end);
		physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
	} else {
		max_physmem_end = search_mem_end();
		add_mem_detect_block(0, max_physmem_end);
		mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
		add_physmem_online_range(0, max_physmem_end);
		physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
	}

	if (mem_detect.count > MEM_INLINED_ENTRIES) {
		*safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
			     sizeof(struct mem_detect_block);
	if (physmem_info.range_count > MEM_INLINED_ENTRIES) {
		*safe_addr += (physmem_info.range_count - MEM_INLINED_ENTRIES) *
			      sizeof(struct physmem_range);
	}

	return max_physmem_end;
}

void mem_detect_set_usable_limit(unsigned long limit)
void physmem_set_usable_limit(unsigned long limit)
{
	struct mem_detect_block *block;
	struct physmem_range *range;
	int i;

	/* make sure mem_detect.usable ends up within online memory block */
	for (i = 0; i < mem_detect.count; i++) {
		block = __get_mem_detect_block_ptr(i);
		if (block->start >= limit)
	for (i = 0; i < physmem_info.range_count; i++) {
		range = __get_physmem_range_ptr(i);
		if (range->start >= limit)
			break;
		if (block->end >= limit) {
			mem_detect.usable = limit;
		if (range->end >= limit) {
			physmem_info.usable = limit;
			break;
		}
		mem_detect.usable = block->end;
		physmem_info.usable = range->end;
	}
}
+3 −3
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@
#include <asm/diag.h>
#include <asm/uv.h>
#include <asm/abs_lowcore.h>
#include <asm/mem_detect.h>
#include <asm/physmem_info.h>
#include "decompressor.h"
#include "boot.h"
#include "uv.h"
@@ -139,7 +139,7 @@ static void handle_relocs(unsigned long offset)
 *
 * Consider the following factors:
 * 1. max_physmem_end - end of physical memory online or standby.
 *    Always <= end of the last online memory block (get_mem_detect_end()).
 *    Always >= end of the last online memory range (get_physmem_online_end()).
 * 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
 *    kernel is able to support.
 * 3. "mem=" kernel command line option which limits physical memory usage.
@@ -303,7 +303,7 @@ void startup_kernel(void)
	setup_ident_map_size(max_physmem_end);
	setup_vmalloc_size();
	asce_limit = setup_kernel_memory_layout();
	mem_detect_set_usable_limit(ident_map_size);
	physmem_set_usable_limit(ident_map_size);

	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
		random_lma = get_random_base(safe_addr);
Loading