Commit f3e615b4 authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra
Browse files

locking/atomic: remove ARCH_ATOMIC remanants



Now that gen-atomic-fallback.sh is only used to generate the arch_*
fallbacks, we don't need to also generate the non-arch_* forms, and can
removethe infrastructure this needed.

There is no change to any of the generated headers as a result of this
patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210713105253.7615-3-mark.rutland@arm.com
parent 47401d94
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_${pfx}${name}${sfx}_acquire(${params})
arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
	${ret} ret = ${arch}${atomic}_${pfx}${name}${sfx}_relaxed(${args});
	${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
	__atomic_acquire_fence();
	return ret;
}
+3 −3
Original line number Diff line number Diff line
cat <<EOF
/**
 * ${arch}${atomic}_add_negative - add and test if negative
 * arch_${atomic}_add_negative - add and test if negative
 * @i: integer value to add
 * @v: pointer of type ${atomic}_t
 *
@@ -9,8 +9,8 @@ cat <<EOF
 * result is greater than or equal to zero.
 */
static __always_inline bool
${arch}${atomic}_add_negative(${int} i, ${atomic}_t *v)
arch_${atomic}_add_negative(${int} i, ${atomic}_t *v)
{
	return ${arch}${atomic}_add_return(i, v) < 0;
	return arch_${atomic}_add_return(i, v) < 0;
}
EOF
+3 −3
Original line number Diff line number Diff line
cat << EOF
/**
 * ${arch}${atomic}_add_unless - add unless the number is already a given value
 * arch_${atomic}_add_unless - add unless the number is already a given value
 * @v: pointer of type ${atomic}_t
 * @a: the amount to add to v...
 * @u: ...unless v is equal to u.
@@ -9,8 +9,8 @@ cat << EOF
 * Returns true if the addition was done.
 */
static __always_inline bool
${arch}${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
arch_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
{
	return ${arch}${atomic}_fetch_add_unless(v, a, u) != u;
	return arch_${atomic}_fetch_add_unless(v, a, u) != u;
}
EOF
+2 −2
Original line number Diff line number Diff line
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
arch_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
{
	${retstmt}${arch}${atomic}_${pfx}and${sfx}${order}(~i, v);
	${retstmt}arch_${atomic}_${pfx}and${sfx}${order}(~i, v);
}
EOF
+2 −2
Original line number Diff line number Diff line
cat <<EOF
static __always_inline ${ret}
${arch}${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
arch_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
{
	${retstmt}${arch}${atomic}_${pfx}sub${sfx}${order}(1, v);
	${retstmt}arch_${atomic}_${pfx}sub${sfx}${order}(1, v);
}
EOF
Loading