Commit 301014cf authored by Vineet Gupta's avatar Vineet Gupta
Browse files

ARC: atomic_cmpxchg/atomic_xchg: implement relaxed variants



And move them out of cmpxchg.h to canonical atomic.h

Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarVineet Gupta <vgupta@kernel.org>
parent ddc348c4
Loading
Loading
Loading
Loading
+27 −0
Original line number Diff line number Diff line
@@ -22,6 +22,33 @@
#include <asm/atomic-spinlock.h>
#endif

#define arch_atomic_cmpxchg(v, o, n)					\
({									\
	arch_cmpxchg(&((v)->counter), (o), (n));			\
})

#ifdef arch_cmpxchg_relaxed
#define arch_atomic_cmpxchg_relaxed(v, o, n)				\
({									\
	arch_cmpxchg_relaxed(&((v)->counter), (o), (n));		\
})
#endif

#define arch_atomic_xchg(v, n)						\
({									\
	arch_xchg(&((v)->counter), (n));				\
})

#ifdef arch_xchg_relaxed
#define arch_atomic_xchg_relaxed(v, n)					\
({									\
	arch_xchg_relaxed(&((v)->counter), (n));			\
})
#endif

/*
 * 64-bit atomics
 */
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#else
+0 −23
Original line number Diff line number Diff line
@@ -80,14 +80,6 @@

#endif

/*
 * atomic_cmpxchg is same as cmpxchg
 *   LLSC: only different in data-type, semantics are exactly same
 *  !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
 *         semantics, and this lock also happens to be used by atomic_*()
 */
#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))

/*
 * xchg
 */
@@ -148,19 +140,4 @@

#endif

/*
 * "atomic" variant of xchg()
 * REQ: It needs to follow the same serialization rules as other atomic_xxx()
 * Since xchg() doesn't always do that, it would seem that following definition
 * is incorrect. But here's the rationale:
 *   SMP : Even xchg() takes the atomic_ops_lock, so OK.
 *   LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
 *         is natively "SMP safe", no serialization required).
 *   UP  : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
 *         could clobber them. atomic_xchg() itself would be 1 insn, so it
 *         can't be clobbered by others. Thus no serialization required when
 *         atomic_xchg is involved.
 */
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))

#endif