Commit 00017423 authored by Heiko Carstens's avatar Heiko Carstens
Browse files

s390/atomic,cmpxchg: switch to use atomic-instrumented.h



Add arch_ prefix to all atomic operations, and define ARCH_ATOMIC.
This enables KASAN instrumentation for all atomic operations on s390.

This is the s390 variant of commit 8bf705d1 ("locking/atomic/x86:
Switch atomic.h to use atomic-instrumented.h").

Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent d2b1f6d2
Loading
Loading
Loading
Loading
+52 −24
Original line number Diff line number Diff line
@@ -15,41 +15,46 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>

static inline int atomic_read(const atomic_t *v)
static inline int arch_atomic_read(const atomic_t *v)
{
	return __atomic_read(v);
}
#define arch_atomic_read arch_atomic_read

static inline void atomic_set(atomic_t *v, int i)
static inline void arch_atomic_set(atomic_t *v, int i)
{
	__atomic_set(v, i);
}
#define arch_atomic_set arch_atomic_set

static inline int atomic_add_return(int i, atomic_t *v)
static inline int arch_atomic_add_return(int i, atomic_t *v)
{
	return __atomic_add_barrier(i, &v->counter) + i;
}
#define arch_atomic_add_return arch_atomic_add_return

static inline int atomic_fetch_add(int i, atomic_t *v)
static inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
	return __atomic_add_barrier(i, &v->counter);
}
#define arch_atomic_fetch_add arch_atomic_fetch_add

static inline void atomic_add(int i, atomic_t *v)
static inline void arch_atomic_add(int i, atomic_t *v)
{
	__atomic_add(i, &v->counter);
}
#define arch_atomic_add arch_atomic_add

#define atomic_sub(_i, _v)		atomic_add(-(int)(_i), _v)
#define atomic_sub_return(_i, _v)	atomic_add_return(-(int)(_i), _v)
#define atomic_fetch_sub(_i, _v)	atomic_fetch_add(-(int)(_i), _v)
#define arch_atomic_sub(_i, _v)		arch_atomic_add(-(int)(_i), _v)
#define arch_atomic_sub_return(_i, _v)	arch_atomic_add_return(-(int)(_i), _v)
#define arch_atomic_fetch_sub(_i, _v)	arch_atomic_fetch_add(-(int)(_i), _v)

#define ATOMIC_OPS(op)							\
static inline void atomic_##op(int i, atomic_t *v)			\
static inline void arch_atomic_##op(int i, atomic_t *v)			\
{									\
	__atomic_##op(i, &v->counter);					\
}									\
static inline int atomic_fetch_##op(int i, atomic_t *v)			\
static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
{									\
	return __atomic_##op##_barrier(i, &v->counter);			\
}
@@ -60,53 +65,67 @@ ATOMIC_OPS(xor)

#undef ATOMIC_OPS

#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define arch_atomic_and			arch_atomic_and
#define arch_atomic_or			arch_atomic_or
#define arch_atomic_xor			arch_atomic_xor
#define arch_atomic_fetch_and		arch_atomic_fetch_and
#define arch_atomic_fetch_or		arch_atomic_fetch_or
#define arch_atomic_fetch_xor		arch_atomic_fetch_xor

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
#define arch_atomic_xchg(v, new)	(arch_xchg(&((v)->counter), new))

static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
	return __atomic_cmpxchg(&v->counter, old, new);
}
#define arch_atomic_cmpxchg arch_atomic_cmpxchg

#define ATOMIC64_INIT(i)  { (i) }

static inline s64 atomic64_read(const atomic64_t *v)
static inline s64 arch_atomic64_read(const atomic64_t *v)
{
	return __atomic64_read(v);
}
#define arch_atomic64_read arch_atomic64_read

static inline void atomic64_set(atomic64_t *v, s64 i)
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
	__atomic64_set(v, i);
}
#define arch_atomic64_set arch_atomic64_set

static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
	return __atomic64_add_barrier(i, (long *)&v->counter) + i;
}
#define arch_atomic64_add_return arch_atomic64_add_return

static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
	return __atomic64_add_barrier(i, (long *)&v->counter);
}
#define arch_atomic64_fetch_add arch_atomic64_fetch_add

static inline void atomic64_add(s64 i, atomic64_t *v)
static inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
	__atomic64_add(i, (long *)&v->counter);
}
#define arch_atomic64_add arch_atomic64_add

#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define arch_atomic64_xchg(v, new)	(arch_xchg(&((v)->counter), new))

static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
	return __atomic64_cmpxchg((long *)&v->counter, old, new);
}
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg

#define ATOMIC64_OPS(op)						\
static inline void atomic64_##op(s64 i, atomic64_t *v)			\
static inline void arch_atomic64_##op(s64 i, atomic64_t *v)		\
{									\
	__atomic64_##op(i, (long *)&v->counter);			\
}									\
static inline long atomic64_fetch_##op(s64 i, atomic64_t *v)		\
static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v)	\
{									\
	return __atomic64_##op##_barrier(i, (long *)&v->counter);	\
}
@@ -117,8 +136,17 @@ ATOMIC64_OPS(xor)

#undef ATOMIC64_OPS

#define atomic64_sub_return(_i, _v)	atomic64_add_return(-(s64)(_i), _v)
#define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(s64)(_i), _v)
#define atomic64_sub(_i, _v)		atomic64_add(-(s64)(_i), _v)
#define arch_atomic64_and		arch_atomic64_and
#define arch_atomic64_or		arch_atomic64_or
#define arch_atomic64_xor		arch_atomic64_xor
#define arch_atomic64_fetch_and		arch_atomic64_fetch_and
#define arch_atomic64_fetch_or		arch_atomic64_fetch_or
#define arch_atomic64_fetch_xor		arch_atomic64_fetch_xor

#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v)
#define arch_atomic64_fetch_sub(_i, _v)  arch_atomic64_fetch_add(-(s64)(_i), _v)
#define arch_atomic64_sub(_i, _v)	 arch_atomic64_add(-(s64)(_i), _v)

#define ARCH_ATOMIC

#endif /* __ARCH_S390_ATOMIC__  */
+6 −6
Original line number Diff line number Diff line
@@ -73,7 +73,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
	return x;
}

#define xchg(ptr, x)							\
#define arch_xchg(ptr, x)						\
({									\
	__typeof__(*(ptr)) __ret;					\
									\
@@ -154,7 +154,7 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
	return old;
}

#define cmpxchg(ptr, o, n)						\
#define arch_cmpxchg(ptr, o, n)						\
({									\
	__typeof__(*(ptr)) __ret;					\
									\
@@ -164,9 +164,9 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
	__ret;								\
})

#define cmpxchg64	cmpxchg
#define cmpxchg_local	cmpxchg
#define cmpxchg64_local cmpxchg
#define arch_cmpxchg64		arch_cmpxchg
#define arch_cmpxchg_local	arch_cmpxchg
#define arch_cmpxchg64_local	arch_cmpxchg

#define system_has_cmpxchg_double()	1

@@ -188,7 +188,7 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
	!cc;								\
})

#define cmpxchg_double(p1, p2, o1, o2, n1, n2)				\
#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2)			\
({									\
	__typeof__(p1) __p1 = (p1);					\
	__typeof__(p2) __p2 = (p2);					\