Commit c35a824c authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Catalin Marinas
Browse files

arm64: make atomic helpers __always_inline



With UBSAN enabled and building with clang, there are occasionally
warnings like

WARNING: modpost: vmlinux.o(.text+0xc533ec): Section mismatch in reference from the function arch_atomic64_or() to the variable .init.data:numa_nodes_parsed
The function arch_atomic64_or() references
the variable __initdata numa_nodes_parsed.
This is often because arch_atomic64_or lacks a __initdata
annotation or the annotation of numa_nodes_parsed is wrong.

for functions that end up not being inlined as intended but operating
on __initdata variables. Mark these as __always_inline, along with
the corresponding asm-generic wrappers.

Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Acked-by: default avatarWill Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210108092024.4034860-1-arnd@kernel.org


Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 71e70184
Loading
Loading
Loading
Loading
+5 −5
Original line number Diff line number Diff line
@@ -17,7 +17,7 @@
#include <asm/lse.h>

#define ATOMIC_OP(op)							\
static inline void arch_##op(int i, atomic_t *v)			\
static __always_inline void arch_##op(int i, atomic_t *v)		\
{									\
	__lse_ll_sc_body(op, i, v);					\
}
@@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
#undef ATOMIC_OP

#define ATOMIC_FETCH_OP(name, op)					\
static inline int arch_##op##name(int i, atomic_t *v)			\
static __always_inline int arch_##op##name(int i, atomic_t *v)		\
{									\
	return __lse_ll_sc_body(op##name, i, v);			\
}
@@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
#undef ATOMIC_FETCH_OPS

#define ATOMIC64_OP(op)							\
static inline void arch_##op(long i, atomic64_t *v)			\
static __always_inline void arch_##op(long i, atomic64_t *v)		\
{									\
	__lse_ll_sc_body(op, i, v);					\
}
@@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
#undef ATOMIC64_OP

#define ATOMIC64_FETCH_OP(name, op)					\
static inline long arch_##op##name(long i, atomic64_t *v)		\
static __always_inline long arch_##op##name(long i, atomic64_t *v)	\
{									\
	return __lse_ll_sc_body(op##name, i, v);			\
}
@@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_FETCH_OPS

static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
	return __lse_ll_sc_body(atomic64_dec_if_positive, v);
}
+3 −3
Original line number Diff line number Diff line
@@ -11,19 +11,19 @@
 * See Documentation/atomic_bitops.txt for details.
 */

static inline void set_bit(unsigned int nr, volatile unsigned long *p)
static __always_inline void set_bit(unsigned int nr, volatile unsigned long *p)
{
	p += BIT_WORD(nr);
	atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}

static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
static __always_inline void clear_bit(unsigned int nr, volatile unsigned long *p)
{
	p += BIT_WORD(nr);
	atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}

static inline void change_bit(unsigned int nr, volatile unsigned long *p)
static __always_inline void change_bit(unsigned int nr, volatile unsigned long *p)
{
	p += BIT_WORD(nr);
	atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);