Commit d8a2c0fb authored by Will Deacon's avatar Will Deacon
Browse files

Merge branch 'for-next/kexec' into for-next/core

* for-next/kexec:
  arm64: trans_pgd: remove trans_pgd_map_page()
  arm64: kexec: remove cpu-reset.h
  arm64: kexec: remove the pre-kexec PoC maintenance
  arm64: kexec: keep MMU enabled during kexec relocation
  arm64: kexec: install a copy of the linear-map
  arm64: kexec: use ld script for relocation function
  arm64: kexec: relocate in EL1 mode
  arm64: kexec: configure EL2 vectors for kexec
  arm64: kexec: pass kimage as the only argument to relocation function
  arm64: kexec: Use dcache ops macros instead of open-coding
  arm64: kexec: skip relocation code for inplace kexec
  arm64: kexec: flush image and lists during kexec load time
  arm64: hibernate: abstract ttrb0 setup function
  arm64: trans_pgd: hibernate: Add trans_pgd_copy_el2_vectors
  arm64: kernel: add helper for booted at EL2 and not VHE
parents 99fe09c8 6091dd9e
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1135,7 +1135,7 @@ config CRASH_DUMP

config TRANS_TABLE
	def_bool y
	depends on HIBERNATION
	depends on HIBERNATION || KEXEC_CORE

config XEN_DOM0
	def_bool y
+42 −7
Original line number Diff line number Diff line
@@ -380,19 +380,19 @@ alternative_endif

/*
 * Macro to perform a data cache maintenance for the interval
 * [start, end)
 * [start, end) with dcache line size explicitly provided.
 *
 * 	op:		operation passed to dc instruction
 * 	domain:		domain used in dsb instruciton
 * 	start:          starting virtual address of the region
 * 	end:            end virtual address of the region
 *	linesz:		dcache line size
 * 	fixup:		optional label to branch to on user fault
 * 	Corrupts:       start, end, tmp1, tmp2
 * 	Corrupts:       start, end, tmp
 */
	.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
	dcache_line_size \tmp1, \tmp2
	sub	\tmp2, \tmp1, #1
	bic	\start, \start, \tmp2
	.macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
	sub	\tmp, \linesz, #1
	bic	\start, \start, \tmp
.Ldcache_op\@:
	.ifc	\op, cvau
	__dcache_op_workaround_clean_cache \op, \start
@@ -411,7 +411,7 @@ alternative_endif
	.endif
	.endif
	.endif
	add	\start, \start, \tmp1
	add	\start, \start, \linesz
	cmp	\start, \end
	b.lo	.Ldcache_op\@
	dsb	\domain
@@ -419,6 +419,22 @@ alternative_endif
	_cond_extable .Ldcache_op\@, \fixup
	.endm

/*
 * Macro to perform a data cache maintenance for the interval
 * [start, end)
 *
 * 	op:		operation passed to dc instruction
 * 	domain:		domain used in dsb instruciton
 * 	start:          starting virtual address of the region
 * 	end:            end virtual address of the region
 * 	fixup:		optional label to branch to on user fault
 * 	Corrupts:       start, end, tmp1, tmp2
 */
	.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
	dcache_line_size \tmp1, \tmp2
	dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
	.endm

/*
 * Macro to perform an instruction cache maintenance for the interval
 * [start, end)
@@ -442,6 +458,25 @@ alternative_endif
	_cond_extable .Licache_op\@, \fixup
	.endm

/*
 * To prevent the possibility of old and new partial table walks being visible
 * in the tlb, switch the ttbr to a zero page when we invalidate the old
 * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
 * Even switching to our copied tables will cause a changed output address at
 * each stage of the walk.
 */
	.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
	phys_to_ttbr \tmp, \zero_page
	msr	ttbr1_el1, \tmp
	isb
	tlbi	vmalle1
	dsb	nsh
	phys_to_ttbr \tmp, \page_table
	offset_ttbr1 \tmp, \tmp2
	msr	ttbr1_el1, \tmp
	isb
	.endm

/*
 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
 */
+12 −0
Original line number Diff line number Diff line
@@ -90,12 +90,24 @@ static inline void crash_prepare_suspend(void) {}
static inline void crash_post_resume(void) {}
#endif

#if defined(CONFIG_KEXEC_CORE)
void cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
		      unsigned long arg0, unsigned long arg1,
		      unsigned long arg2);
#endif

#define ARCH_HAS_KIMAGE_ARCH

struct kimage_arch {
	void *dtb;
	phys_addr_t dtb_mem;
	phys_addr_t kern_reloc;
	phys_addr_t el2_vectors;
	phys_addr_t ttbr0;
	phys_addr_t ttbr1;
	phys_addr_t zero_page;
	unsigned long phys_offset;
	unsigned long t0sz;
};

#ifdef CONFIG_KEXEC_FILE
+24 −0
Original line number Diff line number Diff line
@@ -115,6 +115,30 @@ static inline void cpu_install_idmap(void)
	cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
}

/*
 * Load our new page tables. A strict BBM approach requires that we ensure that
 * TLBs are free of any entries that may overlap with the global mappings we are
 * about to install.
 *
 * For a real hibernate/resume/kexec cycle TTBR0 currently points to a zero
 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
 * services), while for a userspace-driven test_resume cycle it points to
 * userspace page tables (and we must point it at a zero page ourselves).
 *
 * We change T0SZ as part of installing the idmap. This is undone by
 * cpu_uninstall_idmap() in __cpu_suspend_exit().
 */
static inline void cpu_install_ttbr0(phys_addr_t ttbr0, unsigned long t0sz)
{
	cpu_set_reserved_ttbr0();
	local_flush_tlb_all();
	__cpu_set_tcr_t0sz(t0sz);

	/* avoid cpu_switch_mm() and its SW-PAN and CNP interactions */
	write_sysreg(ttbr0, ttbr0_el1);
	isb();
}

/*
 * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
 * avoiding the possibility of conflicting TLB entries being allocated.
+1 −0
Original line number Diff line number Diff line
@@ -21,5 +21,6 @@ extern char __exittext_begin[], __exittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];

#endif /* __ASM_SECTIONS_H */
Loading