Commit c7b5fd6f authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra
Browse files

locking/atomic: mips: move to ARCH_ATOMIC



We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates mips to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-23-mark.rutland@arm.com
parent f5b1c0f9
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -3,6 +3,7 @@ config MIPS
	bool
	default y
	select ARCH_32BIT_OFF_T if !64BIT
	select ARCH_ATOMIC
	select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
	select ARCH_HAS_DEBUG_VIRTUAL if !64BIT
	select ARCH_HAS_FORTIFY_SOURCE
+29 −26
Original line number Diff line number Diff line
@@ -25,24 +25,25 @@
#include <asm/war.h>

#define ATOMIC_OPS(pfx, type)						\
static __always_inline type pfx##_read(const pfx##_t *v)		\
static __always_inline type arch_##pfx##_read(const pfx##_t *v)		\
{									\
	return READ_ONCE(v->counter);					\
}									\
									\
static __always_inline void pfx##_set(pfx##_t *v, type i)		\
static __always_inline void arch_##pfx##_set(pfx##_t *v, type i)	\
{									\
	WRITE_ONCE(v->counter, i);					\
}									\
									\
static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n)	\
static __always_inline type						\
arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n)			\
{									\
	return cmpxchg(&v->counter, o, n);				\
	return arch_cmpxchg(&v->counter, o, n);				\
}									\
									\
static __always_inline type pfx##_xchg(pfx##_t *v, type n)		\
static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n)	\
{									\
	return xchg(&v->counter, n);					\
	return arch_xchg(&v->counter, n);				\
}

ATOMIC_OPS(atomic, int)
@@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64)
#endif

#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)			\
static __inline__ void pfx##_##op(type i, pfx##_t * v)			\
static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v)		\
{									\
	type temp;							\
									\
@@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v) \
}

#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)		\
static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v)	\
static __inline__ type							\
arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v)			\
{									\
	type temp, result;						\
									\
@@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
}

#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)		\
static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)	\
static __inline__ type							\
arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)			\
{									\
	int temp, result;						\
									\
@@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)

#define atomic_add_return_relaxed	atomic_add_return_relaxed
#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
#define arch_atomic_add_return_relaxed	arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed	arch_atomic_sub_return_relaxed
#define arch_atomic_fetch_add_relaxed	arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub_relaxed	arch_atomic_fetch_sub_relaxed

#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
# define atomic64_add_return_relaxed	atomic64_add_return_relaxed
# define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
# define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
# define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
# define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
# define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
# define arch_atomic64_fetch_add_relaxed	arch_atomic64_fetch_add_relaxed
# define arch_atomic64_fetch_sub_relaxed	arch_atomic64_fetch_sub_relaxed
#endif /* CONFIG_64BIT */

#undef ATOMIC_OPS
@@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)

#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
#define arch_atomic_fetch_and_relaxed	arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or_relaxed	arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed	arch_atomic_fetch_xor_relaxed

#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
# define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
# define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
# define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
# define arch_atomic64_fetch_and_relaxed	arch_atomic64_fetch_and_relaxed
# define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed
# define arch_atomic64_fetch_xor_relaxed	arch_atomic64_fetch_xor_relaxed
#endif

#undef ATOMIC_OPS
@@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
 * The function returns the old value of @v minus @i.
 */
#define ATOMIC_SIP_OP(pfx, type, op, ll, sc)				\
static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v)	\
static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v)	\
{									\
	type temp, result;						\
									\
@@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
}

ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
#define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
#define arch_atomic_dec_if_positive(v)	arch_atomic_sub_if_positive(1, v)

#ifdef CONFIG_64BIT
ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
#define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
#define arch_atomic64_dec_if_positive(v)	arch_atomic64_sub_if_positive(1, v)
#endif

#undef ATOMIC_SIP_OP
+11 −11
Original line number Diff line number Diff line
@@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
	}
}

#define xchg(ptr, x)							\
#define arch_xchg(ptr, x)						\
({									\
	__typeof__(*(ptr)) __res;					\
									\
@@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
	}
}

#define cmpxchg_local(ptr, old, new)					\
#define arch_cmpxchg_local(ptr, old, new)				\
	((__typeof__(*(ptr)))						\
		__cmpxchg((ptr),					\
			  (unsigned long)(__typeof__(*(ptr)))(old),	\
			  (unsigned long)(__typeof__(*(ptr)))(new),	\
			  sizeof(*(ptr))))

#define cmpxchg(ptr, old, new)						\
#define arch_cmpxchg(ptr, old, new)					\
({									\
	__typeof__(*(ptr)) __res;					\
									\
@@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
	if (__SYNC_loongson3_war == 0)					\
		smp_mb__before_llsc();					\
									\
	__res = cmpxchg_local((ptr), (old), (new));			\
	__res = arch_cmpxchg_local((ptr), (old), (new));		\
									\
	/*								\
	 * In the Loongson3 workaround case __cmpxchg_asm() already	\
@@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
})

#ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n)					\
#define arch_cmpxchg64_local(ptr, o, n)					\
  ({									\
	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
	cmpxchg_local((ptr), (o), (n));					\
	arch_cmpxchg_local((ptr), (o), (n));				\
  })

#define cmpxchg64(ptr, o, n)						\
#define arch_cmpxchg64(ptr, o, n)					\
  ({									\
	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
	cmpxchg((ptr), (o), (n));					\
	arch_cmpxchg((ptr), (o), (n));					\
  })
#else

# include <asm-generic/cmpxchg-local.h>
# define cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
# define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))

# ifdef CONFIG_SMP

@@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
	return ret;
}

#  define cmpxchg64(ptr, o, n) ({					\
#  define arch_cmpxchg64(ptr, o, n) ({					\
	unsigned long long __old = (__typeof__(*(ptr)))(o);		\
	unsigned long long __new = (__typeof__(*(ptr)))(n);		\
	__typeof__(*(ptr)) __res;					\
@@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
})

# else /* !CONFIG_SMP */
#  define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#  define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
# endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */

+2 −2
Original line number Diff line number Diff line
@@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
	do {
		old32 = load32;
		new32 = (load32 & ~mask) | (val << shift);
		load32 = cmpxchg(ptr32, old32, new32);
		load32 = arch_cmpxchg(ptr32, old32, new32);
	} while (load32 != old32);

	return (load32 & mask) >> shift;
@@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
		 */
		old32 = (load32 & ~mask) | (old << shift);
		new32 = (load32 & ~mask) | (new << shift);
		load32 = cmpxchg(ptr32, old32, new32);
		load32 = arch_cmpxchg(ptr32, old32, new32);
		if (load32 == old32)
			return old;
	}