Commit e7cf636c authored by Will Deacon's avatar Will Deacon
Browse files

Merge branch 'for-next/boot' into for-next/core

Boot path cleanups to enable early initialisation of per-cpu operations
needed by KCSAN.

* for-next/boot:
  arm64: scs: Drop unused 'tmp' argument to scs_{load, save} asm macros
  arm64: smp: initialize cpu offset earlier
  arm64: smp: unify task and sp setup
  arm64: smp: remove stack from secondary_data
  arm64: smp: remove pointless secondary_data maintenance
  arm64: assembler: add set_this_cpu_offset
parents 0b573a02 16c230b3
Loading
Loading
Loading
Loading
+13 −5
Original line number Diff line number Diff line
@@ -232,15 +232,23 @@ lr .req x30 // link register
	 * @dst: destination register
	 */
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
	.macro	this_cpu_offset, dst
	.macro	get_this_cpu_offset, dst
	mrs	\dst, tpidr_el2
	.endm
#else
	.macro	this_cpu_offset, dst
	.macro	get_this_cpu_offset, dst
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
	mrs	\dst, tpidr_el1
alternative_else
	mrs	\dst, tpidr_el2
alternative_endif
	.endm

	.macro	set_this_cpu_offset, src
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
	msr	tpidr_el1, \src
alternative_else
	msr	tpidr_el2, \src
alternative_endif
	.endm
#endif
@@ -253,7 +261,7 @@ alternative_endif
	.macro adr_this_cpu, dst, sym, tmp
	adrp	\tmp, \sym
	add	\dst, \tmp, #:lo12:\sym
	this_cpu_offset \tmp
	get_this_cpu_offset \tmp
	add	\dst, \dst, \tmp
	.endm

@@ -264,7 +272,7 @@ alternative_endif
	 */
	.macro ldr_this_cpu dst, sym, tmp
	adr_l	\dst, \sym
	this_cpu_offset \tmp
	get_this_cpu_offset \tmp
	ldr	\dst, [\dst, \tmp]
	.endm

@@ -745,7 +753,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
	cbz		\tmp, \lbl
#endif
	adr_l		\tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
	this_cpu_offset	\tmp2
	get_this_cpu_offset	\tmp2
	ldr		w\tmp, [\tmp, \tmp2]
	cbnz		w\tmp, \lbl	// yield on pending softirq in task context
.Lnoyield_\@:
+4 −4
Original line number Diff line number Diff line
@@ -9,18 +9,18 @@
#ifdef CONFIG_SHADOW_CALL_STACK
	scs_sp	.req	x18

	.macro scs_load tsk, tmp
	.macro scs_load tsk
	ldr	scs_sp, [\tsk, #TSK_TI_SCS_SP]
	.endm

	.macro scs_save tsk, tmp
	.macro scs_save tsk
	str	scs_sp, [\tsk, #TSK_TI_SCS_SP]
	.endm
#else
	.macro scs_load tsk, tmp
	.macro scs_load tsk
	.endm

	.macro scs_save tsk, tmp
	.macro scs_save tsk
	.endm
#endif /* CONFIG_SHADOW_CALL_STACK */

+0 −2
Original line number Diff line number Diff line
@@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void);

/*
 * Initial data for bringing up a secondary CPU.
 * @stack  - sp for the secondary CPU
 * @status - Result passed back from the secondary CPU to
 *           indicate failure.
 */
struct secondary_data {
	void *stack;
	struct task_struct *task;
	long status;
};
+1 −1
Original line number Diff line number Diff line
@@ -27,6 +27,7 @@
int main(void)
{
  DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
  DEFINE(TSK_CPU,		offsetof(struct task_struct, cpu));
  BLANK();
  DEFINE(TSK_TI_FLAGS,		offsetof(struct task_struct, thread_info.flags));
  DEFINE(TSK_TI_PREEMPT,	offsetof(struct task_struct, thread_info.preempt_count));
@@ -99,7 +100,6 @@ int main(void)
  DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
  DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
  BLANK();
  DEFINE(CPU_BOOT_STACK,	offsetof(struct secondary_data, stack));
  DEFINE(CPU_BOOT_TASK,		offsetof(struct secondary_data, task));
  BLANK();
  DEFINE(FTR_OVR_VAL_OFFSET,	offsetof(struct arm64_ftr_override, val));
+4 −4
Original line number Diff line number Diff line
@@ -275,7 +275,7 @@ alternative_else_nop_endif

	mte_set_kernel_gcr x22, x23

	scs_load tsk, x20
	scs_load tsk
	.else
	add	x21, sp, #PT_REGS_SIZE
	get_current_task tsk
@@ -375,7 +375,7 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
	scs_save tsk, x0
	scs_save tsk

#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -979,8 +979,8 @@ SYM_FUNC_START(cpu_switch_to)
	mov	sp, x9
	msr	sp_el0, x1
	ptrauth_keys_install_kernel x1, x8, x9, x10
	scs_save x0, x8
	scs_load x1, x8
	scs_save x0
	scs_load x1
	ret
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
Loading