Commit dda5f312 authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra
Browse files

locking/atomic: arm: fix sync ops



The sync_*() ops on arch/arm are defined in terms of the regular bitops
with no special handling. This is not correct, as UP kernels elide
barriers for the fully-ordered operations, and so the required ordering
is lost when such UP kernels are run under a hypervsior on an SMP
system.

Fix this by defining sync ops with the required barriers.

Note: On 32-bit arm, the sync_*() ops are currently only used by Xen,
which requires ARMv7, but the semantics can be implemented for ARMv6+.

Fixes: e54d2f61 ("xen/arm: sync_bitops")
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-2-mark.rutland@arm.com
parent 497cc42b
Loading
Loading
Loading
Loading
+17 −0
Original line number Diff line number Diff line
@@ -394,6 +394,23 @@ ALT_UP_B(.L0_\@)
#endif
	.endm

/*
 * Raw SMP data memory barrier
 */
	.macro	__smp_dmb mode
#if __LINUX_ARM_ARCH__ >= 7
	.ifeqs "\mode","arm"
	dmb	ish
	.else
	W(dmb)	ish
	.endif
#elif __LINUX_ARM_ARCH__ == 6
	mcr	p15, 0, r0, c7, c10, 5	@ dmb
#else
	.error "Incompatible SMP platform"
#endif
	.endm

#if defined(CONFIG_CPU_V7M)
	/*
	 * setmode is used to assert to be in svc mode during boot. For v7-M
+25 −4
Original line number Diff line number Diff line
@@ -14,14 +14,35 @@
 * ops which are SMP safe even on a UP kernel.
 */

/*
 * Unordered
 */

#define sync_set_bit(nr, p)		_set_bit(nr, p)
#define sync_clear_bit(nr, p)		_clear_bit(nr, p)
#define sync_change_bit(nr, p)		_change_bit(nr, p)
#define sync_test_and_set_bit(nr, p)	_test_and_set_bit(nr, p)
#define sync_test_and_clear_bit(nr, p)	_test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p)	_test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr)		test_bit(nr, addr)
#define arch_sync_cmpxchg		arch_cmpxchg

/*
 * Fully ordered
 */

int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
#define sync_test_and_set_bit(nr, p)	_sync_test_and_set_bit(nr, p)

int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
#define sync_test_and_clear_bit(nr, p)	_sync_test_and_clear_bit(nr, p)

int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
#define sync_test_and_change_bit(nr, p)	_sync_test_and_change_bit(nr, p)

#define arch_sync_cmpxchg(ptr, old, new)				\
({									\
	__typeof__(*(ptr)) __ret;					\
	__smp_mb__before_atomic();					\
	__ret = arch_cmpxchg_relaxed((ptr), (old), (new));		\
	__smp_mb__after_atomic();					\
	__ret;								\
})

#endif
+11 −3
Original line number Diff line number Diff line
@@ -28,7 +28,7 @@ UNWIND( .fnend )
ENDPROC(\name		)
	.endm

	.macro	testop, name, instr, store
	.macro	__testop, name, instr, store, barrier
ENTRY(	\name		)
UNWIND(	.fnstart	)
	ands	ip, r1, #3
@@ -38,7 +38,7 @@ UNWIND( .fnstart )
	mov	r0, r0, lsr #5
	add	r1, r1, r0, lsl #2	@ Get word offset
	mov	r3, r2, lsl r3		@ create mask
	smp_dmb
	\barrier
#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
	.arch_extension	mp
	ALT_SMP(W(pldw)	[r1])
@@ -50,13 +50,21 @@ UNWIND( .fnstart )
	strex	ip, r2, [r1]
	cmp	ip, #0
	bne	1b
	smp_dmb
	\barrier
	cmp	r0, #0
	movne	r0, #1
2:	bx	lr
UNWIND(	.fnend		)
ENDPROC(\name		)
	.endm

	.macro	testop, name, instr, store
	__testop \name, \instr, \store, smp_dmb
	.endm

	.macro	sync_testop, name, instr, store
	__testop \name, \instr, \store, __smp_dmb
	.endm
#else
	.macro	bitop, name, instr
ENTRY(	\name		)
+4 −0
Original line number Diff line number Diff line
@@ -10,3 +10,7 @@
                .text

testop	_test_and_change_bit, eor, str

#if __LINUX_ARM_ARCH__ >= 6
sync_testop	_sync_test_and_change_bit, eor, str
#endif
+4 −0
Original line number Diff line number Diff line
@@ -10,3 +10,7 @@
                .text

testop	_test_and_clear_bit, bicne, strne

#if __LINUX_ARM_ARCH__ >= 6
sync_testop	_sync_test_and_clear_bit, bicne, strne
#endif
Loading