Commit 94f9c00f authored by Will Deacon's avatar Will Deacon Committed by Peter Zijlstra
Browse files

arm64: Remove logic to kill 32-bit tasks on 64-bit-only cores



The scheduler now knows enough about these braindead systems to place
32-bit tasks accordingly, so throw out the safety checks and allow the
ret-to-user path to avoid do_notify_resume() if there is nothing to do.

Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20210730112443.23245-16-will@kernel.org
parent ead7de46
Loading
Loading
Loading
Loading
+1 −13
Original line number Diff line number Diff line
@@ -469,15 +469,6 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
	write_sysreg(val, cntkctl_el1);
}

static void compat_thread_switch(struct task_struct *next)
{
	if (!is_compat_thread(task_thread_info(next)))
		return;

	if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
		set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
}

static void update_sctlr_el1(u64 sctlr)
{
	/*
@@ -519,7 +510,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
	ssbs_thread_switch(next);
	erratum_1418040_thread_switch(prev, next);
	ptrauth_thread_switch_user(next);
	compat_thread_switch(next);

	/*
	 * Complete any pending TLB or cache maintenance on this CPU in case
@@ -621,10 +611,8 @@ void arch_setup_new_exec(void)
		 * at the point of execve(), although we try a bit harder to
		 * honour the cpuset hierarchy.
		 */
		if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
		if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
			force_compatible_cpus_allowed_ptr(current);
			set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
		}
	} else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
		relax_compatible_cpus_allowed_ptr(current);
	}
+0 −26
Original line number Diff line number Diff line
@@ -912,19 +912,6 @@ static void do_signal(struct pt_regs *regs)
	restore_saved_sigmask();
}

static bool cpu_affinity_invalid(struct pt_regs *regs)
{
	if (!compat_user_mode(regs))
		return false;

	/*
	 * We're preemptible, but a reschedule will cause us to check the
	 * affinity again.
	 */
	return !cpumask_test_cpu(raw_smp_processor_id(),
				 system_32bit_el0_cpumask());
}

asmlinkage void do_notify_resume(struct pt_regs *regs,
				 unsigned long thread_flags)
{
@@ -952,19 +939,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
			if (thread_flags & _TIF_NOTIFY_RESUME) {
				tracehook_notify_resume(regs);
				rseq_handle_notify_resume(NULL, regs);

				/*
				 * If we reschedule after checking the affinity
				 * then we must ensure that TIF_NOTIFY_RESUME
				 * is set so that we check the affinity again.
				 * Since tracehook_notify_resume() clears the
				 * flag, ensure that the compiler doesn't move
				 * it after the affinity check.
				 */
				barrier();

				if (cpu_affinity_invalid(regs))
					force_sig(SIGKILL);
			}

			if (thread_flags & _TIF_FOREIGN_FPSTATE)