Commit f8863bc8 authored by Will Deacon's avatar Will Deacon
Browse files

Merge branch 'for-next/kdump' into for-next/core

* for-next/kdump:
  arm64: kdump: defer the crashkernel reservation for platforms with no DMA memory zones
  arm64: kdump: do not map crashkernel region specifically
  arm64: kdump : take off the protection on crashkernel memory region
parents ea88dc92 504cae45
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -102,12 +102,6 @@ void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,

int machine_kexec_post_load(struct kimage *image);
#define machine_kexec_post_load machine_kexec_post_load

void arch_kexec_protect_crashkres(void);
#define arch_kexec_protect_crashkres arch_kexec_protect_crashkres

void arch_kexec_unprotect_crashkres(void);
#define arch_kexec_unprotect_crashkres arch_kexec_unprotect_crashkres
#endif

#define ARCH_HAS_KIMAGE_ARCH
+0 −5
Original line number Diff line number Diff line
@@ -374,11 +374,6 @@ static inline void *phys_to_virt(phys_addr_t x)
})

void dump_mem_limit(void);

static inline bool defer_reserve_crashkernel(void)
{
	return IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32);
}
#endif /* !ASSEMBLY */

/*
+0 −20
Original line number Diff line number Diff line
@@ -268,26 +268,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
	pr_info("Starting crashdump kernel...\n");
}

void arch_kexec_protect_crashkres(void)
{
	int i;

	for (i = 0; i < kexec_crash_image->nr_segments; i++)
		set_memory_valid(
			__phys_to_virt(kexec_crash_image->segment[i].mem),
			kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 0);
}

void arch_kexec_unprotect_crashkres(void)
{
	int i;

	for (i = 0; i < kexec_crash_image->nr_segments; i++)
		set_memory_valid(
			__phys_to_virt(kexec_crash_image->segment[i].mem),
			kexec_crash_image->segment[i].memsz >> PAGE_SHIFT, 1);
}

#ifdef CONFIG_HIBERNATION
/*
 * To preserve the crash dump kernel image, the relevant memory segments
+3 −31
Original line number Diff line number Diff line
@@ -61,34 +61,8 @@ EXPORT_SYMBOL(memstart_addr);
 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
 * otherwise it is empty.
 *
 * Memory reservation for crash kernel either done early or deferred
 * depending on DMA memory zones configs (ZONE_DMA) --
 *
 * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
 * here instead of max_zone_phys().  This lets early reservation of
 * crash kernel memory which has a dependency on arm64_dma_phys_limit.
 * Reserving memory early for crash kernel allows linear creation of block
 * mappings (greater than page-granularity) for all the memory bank rangs.
 * In this scheme a comparatively quicker boot is observed.
 *
 * If ZONE_DMA configs are defined, crash kernel memory reservation
 * is delayed until DMA zone memory range size initialization performed in
 * zone_sizes_init().  The defer is necessary to steer clear of DMA zone
 * memory range to avoid overlap allocation.  So crash kernel memory boundaries
 * are not known when mapping all bank memory ranges, which otherwise means
 * not possible to exclude crash kernel range from creating block mappings
 * so page-granularity mappings are created for the entire memory range.
 * Hence a slightly slower boot is observed.
 *
 * Note: Page-granularity mappings are necessary for crash kernel memory
 * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
 */
#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
phys_addr_t __ro_after_init arm64_dma_phys_limit;
#else
phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
#endif

/* Current arm64 boot protocol requires 2MB alignment */
#define CRASH_ALIGN			SZ_2M
@@ -248,6 +222,8 @@ static void __init zone_sizes_init(void)
	if (!arm64_dma_phys_limit)
		arm64_dma_phys_limit = dma32_phys_limit;
#endif
	if (!arm64_dma_phys_limit)
		arm64_dma_phys_limit = PHYS_MASK + 1;
	max_zone_pfns[ZONE_NORMAL] = max_pfn;

	free_area_init(max_zone_pfns);
@@ -408,9 +384,6 @@ void __init arm64_memblock_init(void)

	early_init_fdt_scan_reserved_mem();

	if (!defer_reserve_crashkernel())
		reserve_crashkernel();

	high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
}

@@ -457,7 +430,6 @@ void __init bootmem_init(void)
	 * request_standard_resources() depends on crashkernel's memory being
	 * reserved, so do it here.
	 */
	if (defer_reserve_crashkernel())
	reserve_crashkernel();

	memblock_dump_all();
+0 −43
Original line number Diff line number Diff line
@@ -510,21 +510,6 @@ void __init mark_linear_text_alias_ro(void)
			    PAGE_KERNEL_RO);
}

static bool crash_mem_map __initdata;

static int __init enable_crash_mem_map(char *arg)
{
	/*
	 * Proper parameter parsing is done by reserve_crashkernel(). We only
	 * need to know if the linear map has to avoid block mappings so that
	 * the crashkernel reservations can be unmapped later.
	 */
	crash_mem_map = true;

	return 0;
}
early_param("crashkernel", enable_crash_mem_map);

static void __init map_mem(pgd_t *pgdp)
{
	static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
@@ -554,16 +539,6 @@ static void __init map_mem(pgd_t *pgdp)
	 */
	memblock_mark_nomap(kernel_start, kernel_end - kernel_start);

#ifdef CONFIG_KEXEC_CORE
	if (crash_mem_map) {
		if (defer_reserve_crashkernel())
			flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
		else if (crashk_res.end)
			memblock_mark_nomap(crashk_res.start,
			    resource_size(&crashk_res));
	}
#endif

	/* map all the memory banks */
	for_each_mem_range(i, &start, &end) {
		if (start >= end)
@@ -590,24 +565,6 @@ static void __init map_mem(pgd_t *pgdp)
	__map_memblock(pgdp, kernel_start, kernel_end,
		       PAGE_KERNEL, NO_CONT_MAPPINGS);
	memblock_clear_nomap(kernel_start, kernel_end - kernel_start);

	/*
	 * Use page-level mappings here so that we can shrink the region
	 * in page granularity and put back unused memory to buddy system
	 * through /sys/kernel/kexec_crash_size interface.
	 */
#ifdef CONFIG_KEXEC_CORE
	if (crash_mem_map && !defer_reserve_crashkernel()) {
		if (crashk_res.end) {
			__map_memblock(pgdp, crashk_res.start,
				       crashk_res.end + 1,
				       PAGE_KERNEL,
				       NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
			memblock_clear_nomap(crashk_res.start,
					     resource_size(&crashk_res));
		}
	}
#endif
}

void mark_rodata_ro(void)