Commit ec97a729 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull arm64 fixes from Will Deacon:

 - Fix incorrect asm constraint for load_unaligned_zeropad() fixup

 - Fix thread flag update when setting TIF_MTE_ASYNC_FAULT

 - Fix restored irq state when handling fault on kprobe

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: kprobes: Restore local irqflag if kprobes is cancelled
  arm64: mte: Ensure TIF_MTE_ASYNC_FAULT is set atomically
  arm64: fix inline asm in load_unaligned_zeropad()
parents c17a3066 738fa58e
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -1406,10 +1406,13 @@ config ARM64_PAN
config AS_HAS_LDAPR
	def_bool $(as-instr,.arch_extension rcpc)

config AS_HAS_LSE_ATOMICS
	def_bool $(as-instr,.arch_extension lse)

config ARM64_LSE_ATOMICS
	bool
	default ARM64_USE_LSE_ATOMICS
	depends on $(as-instr,.arch_extension lse)
	depends on AS_HAS_LSE_ATOMICS

config ARM64_USE_LSE_ATOMICS
	bool "Atomic instructions"
@@ -1666,6 +1669,7 @@ config ARM64_MTE
	default y
	depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
	depends on AS_HAS_ARMV8_5
	depends on AS_HAS_LSE_ATOMICS
	# Required for tag checking in the uaccess routines
	depends on ARM64_PAN
	select ARCH_USES_HIGH_VMA_FLAGS
+5 −5
Original line number Diff line number Diff line
@@ -53,7 +53,7 @@ static inline unsigned long find_zero(unsigned long mask)
 */
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
	unsigned long ret, offset;
	unsigned long ret, tmp;

	/* Load word from unaligned pointer addr */
	asm(
@@ -61,9 +61,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
	"2:\n"
	"	.pushsection .fixup,\"ax\"\n"
	"	.align 2\n"
	"3:	and	%1, %2, #0x7\n"
	"	bic	%2, %2, #0x7\n"
	"	ldr	%0, [%2]\n"
	"3:	bic	%1, %2, #0x7\n"
	"	ldr	%0, [%1]\n"
	"	and	%1, %2, #0x7\n"
	"	lsl	%1, %1, #0x3\n"
#ifndef __AARCH64EB__
	"	lsr	%0, %0, %1\n"
@@ -73,7 +73,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
	"	b	2b\n"
	"	.popsection\n"
	_ASM_EXTABLE(1b, 3b)
	: "=&r" (ret), "=&r" (offset)
	: "=&r" (ret), "=&r" (tmp)
	: "r" (addr), "Q" (*(unsigned long *)addr));

	return ret;
+6 −4
Original line number Diff line number Diff line
@@ -148,16 +148,18 @@ alternative_cb_end
	.endm

	/* Check for MTE asynchronous tag check faults */
	.macro check_mte_async_tcf, flgs, tmp
	.macro check_mte_async_tcf, tmp, ti_flags
#ifdef CONFIG_ARM64_MTE
	.arch_extension lse
alternative_if_not ARM64_MTE
	b	1f
alternative_else_nop_endif
	mrs_s	\tmp, SYS_TFSRE0_EL1
	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
	orr	\flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
	str	\flgs, [tsk, #TSK_TI_FLAGS]
	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
	add	\ti_flags, tsk, #TSK_TI_FLAGS
	stset	\tmp, [\ti_flags]
	msr_s	SYS_TFSRE0_EL1, xzr
1:
#endif
@@ -244,7 +246,7 @@ alternative_else_nop_endif
	disable_step_tsk x19, x20

	/* Check for asynchronous tag check faults in user space */
	check_mte_async_tcf x19, x22
	check_mte_async_tcf x22, x23
	apply_ssbd 1, x22, x23

	ptrauth_keys_install_kernel tsk, x20, x22, x23
+4 −2
Original line number Diff line number Diff line
@@ -267,10 +267,12 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
		if (!instruction_pointer(regs))
			BUG();

		if (kcb->kprobe_status == KPROBE_REENTER)
		if (kcb->kprobe_status == KPROBE_REENTER) {
			restore_previous_kprobe(kcb);
		else
		} else {
			kprobes_restore_local_irqflag(kcb, regs);
			reset_current_kprobe();
		}

		break;
	case KPROBE_HIT_ACTIVE: