Commit 78f6f5c9 authored by Mark Rutland's avatar Mark Rutland Committed by Catalin Marinas
Browse files

arm64: atomic: always inline the assembly



The __lse_*() and __ll_sc_*() atomic implementations are marked as
inline rather than __always_inline, permitting a compiler to generate
out-of-line versions, which may be instrumented.

We marked the atomic wrappers as __always_inline in commit:

  c35a824c ("arm64: make atomic helpers __always_inline")

... but did not think to do the same for the underlying implementations.

If the compiler were to out-of-line an LSE or LL/SC atomic, this could
break noinstr code. Ensure this doesn't happen by marking the underlying
implementations as __always_inline.

There should be no functional change as a result of this patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220817155914.3975112-3-mark.rutland@arm.com


Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent b2c3ccbd
Loading
Loading
Loading
Loading
+9 −9
Original line number Diff line number Diff line
@@ -23,7 +23,7 @@
 */

#define ATOMIC_OP(op, asm_op, constraint)				\
static inline void							\
static __always_inline void						\
__ll_sc_atomic_##op(int i, atomic_t *v)					\
{									\
	unsigned long tmp;						\
@@ -40,7 +40,7 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \
}

#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline int							\
static __always_inline int						\
__ll_sc_atomic_##op##_return##name(int i, atomic_t *v)			\
{									\
	unsigned long tmp;						\
@@ -61,7 +61,7 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
}

#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
static inline int							\
static __always_inline int						\
__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v)			\
{									\
	unsigned long tmp;						\
@@ -119,7 +119,7 @@ ATOMIC_OPS(andnot, bic, )
#undef ATOMIC_OP

#define ATOMIC64_OP(op, asm_op, constraint)				\
static inline void							\
static __always_inline void						\
__ll_sc_atomic64_##op(s64 i, atomic64_t *v)				\
{									\
	s64 result;							\
@@ -136,7 +136,7 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
}

#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline long							\
static __always_inline long						\
__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)		\
{									\
	s64 result;							\
@@ -157,7 +157,7 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
}

#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
static inline long							\
static __always_inline long						\
__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)			\
{									\
	s64 result, val;						\
@@ -214,7 +214,7 @@ ATOMIC64_OPS(andnot, bic, )
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP

static inline s64
static __always_inline s64
__ll_sc_atomic64_dec_if_positive(atomic64_t *v)
{
	s64 result;
@@ -237,7 +237,7 @@ __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
}

#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint)	\
static inline u##sz							\
static __always_inline u##sz						\
__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr,			\
					 unsigned long old,		\
					 u##sz new)			\
@@ -295,7 +295,7 @@ __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
#undef __CMPXCHG_CASE

#define __CMPXCHG_DBL(name, mb, rel, cl)				\
static inline long							\
static __always_inline long						\
__ll_sc__cmpxchg_double##name(unsigned long old1,			\
				      unsigned long old2,		\
				      unsigned long new1,		\
+29 −17
Original line number Diff line number Diff line
@@ -11,7 +11,8 @@
#define __ASM_ATOMIC_LSE_H

#define ATOMIC_OP(op, asm_op)						\
static inline void __lse_atomic_##op(int i, atomic_t *v)		\
static __always_inline void						\
__lse_atomic_##op(int i, atomic_t *v)					\
{									\
	asm volatile(							\
	__LSE_PREAMBLE							\
@@ -25,7 +26,7 @@ ATOMIC_OP(or, stset)
ATOMIC_OP(xor, steor)
ATOMIC_OP(add, stadd)

static inline void __lse_atomic_sub(int i, atomic_t *v)
static __always_inline void __lse_atomic_sub(int i, atomic_t *v)
{
	__lse_atomic_add(-i, v);
}
@@ -33,7 +34,8 @@ static inline void __lse_atomic_sub(int i, atomic_t *v)
#undef ATOMIC_OP

#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)	\
static __always_inline int						\
__lse_atomic_fetch_##op##name(int i, atomic_t *v)			\
{									\
	int old;							\
									\
@@ -63,7 +65,8 @@ ATOMIC_FETCH_OPS(add, ldadd)
#undef ATOMIC_FETCH_OPS

#define ATOMIC_FETCH_OP_SUB(name)					\
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)	\
static __always_inline int						\
__lse_atomic_fetch_sub##name(int i, atomic_t *v)			\
{									\
	return __lse_atomic_fetch_add##name(-i, v);			\
}
@@ -76,12 +79,14 @@ ATOMIC_FETCH_OP_SUB( )
#undef ATOMIC_FETCH_OP_SUB

#define ATOMIC_OP_ADD_SUB_RETURN(name)					\
static inline int __lse_atomic_add_return##name(int i, atomic_t *v)	\
static __always_inline int						\
__lse_atomic_add_return##name(int i, atomic_t *v)			\
{									\
	return __lse_atomic_fetch_add##name(i, v) + i;			\
}									\
									\
static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)	\
static __always_inline int						\
__lse_atomic_sub_return##name(int i, atomic_t *v)			\
{									\
	return __lse_atomic_fetch_sub(i, v) - i;			\
}
@@ -93,13 +98,14 @@ ATOMIC_OP_ADD_SUB_RETURN( )

#undef ATOMIC_OP_ADD_SUB_RETURN

static inline void __lse_atomic_and(int i, atomic_t *v)
static __always_inline void __lse_atomic_and(int i, atomic_t *v)
{
	return __lse_atomic_andnot(~i, v);
}

#define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)	\
static __always_inline int						\
__lse_atomic_fetch_and##name(int i, atomic_t *v)			\
{									\
	return __lse_atomic_fetch_andnot##name(~i, v);			\
}
@@ -112,7 +118,8 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
#undef ATOMIC_FETCH_OP_AND

#define ATOMIC64_OP(op, asm_op)						\
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)		\
static __always_inline void						\
__lse_atomic64_##op(s64 i, atomic64_t *v)				\
{									\
	asm volatile(							\
	__LSE_PREAMBLE							\
@@ -126,7 +133,7 @@ ATOMIC64_OP(or, stset)
ATOMIC64_OP(xor, steor)
ATOMIC64_OP(add, stadd)

static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
static __always_inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
{
	__lse_atomic64_add(-i, v);
}
@@ -134,7 +141,8 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
#undef ATOMIC64_OP

#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
static __always_inline long						\
__lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)			\
{									\
	s64 old;							\
									\
@@ -164,7 +172,8 @@ ATOMIC64_FETCH_OPS(add, ldadd)
#undef ATOMIC64_FETCH_OPS

#define ATOMIC64_FETCH_OP_SUB(name)					\
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)	\
static __always_inline long						\
__lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)			\
{									\
	return __lse_atomic64_fetch_add##name(-i, v);			\
}
@@ -177,12 +186,14 @@ ATOMIC64_FETCH_OP_SUB( )
#undef ATOMIC64_FETCH_OP_SUB

#define ATOMIC64_OP_ADD_SUB_RETURN(name)				\
static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
static __always_inline long						\
__lse_atomic64_add_return##name(s64 i, atomic64_t *v)			\
{									\
	return __lse_atomic64_fetch_add##name(i, v) + i;		\
}									\
									\
static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)\
static __always_inline long						\
__lse_atomic64_sub_return##name(s64 i, atomic64_t *v)			\
{									\
	return __lse_atomic64_fetch_sub##name(i, v) - i;		\
}
@@ -194,13 +205,14 @@ ATOMIC64_OP_ADD_SUB_RETURN( )

#undef ATOMIC64_OP_ADD_SUB_RETURN

static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
static __always_inline void __lse_atomic64_and(s64 i, atomic64_t *v)
{
	return __lse_atomic64_andnot(~i, v);
}

#define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)	\
static __always_inline long						\
__lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)			\
{									\
	return __lse_atomic64_fetch_andnot##name(~i, v);		\
}
@@ -212,7 +224,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")

#undef ATOMIC64_FETCH_OP_AND

static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
static __always_inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
{
	unsigned long tmp;