Commit 9257959a authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra
Browse files

locking/atomic: scripts: restructure fallback ifdeffery



Currently the various ordering variants of an atomic operation are
defined in groups of full/acquire/release/relaxed ordering variants with
some shared ifdeffery and several potential definitions of each ordering
variant in different branches of the shared ifdeffery.

As an ordering variant can have several potential definitions down
different branches of the shared ifdeffery, it can be painful for a
human to find a relevant definition, and we don't have a good location
to place anything common to all definitions of an ordering variant (e.g.
kerneldoc).

Historically the grouping of full/acquire/release/relaxed ordering
variants was necessary as we filled in the missing atomics in the same
namespace as the architecture used. It would be easy to accidentally
define one ordering fallback in terms of another ordering fallback with
redundant barriers, and avoiding that would otherwise require a lot of
baroque ifdeffery.

With recent changes we no longer need to fill in the missing atomics in
the arch_atomic*_<op>() namespace, and only need to fill in the
raw_atomic*_<op>() namespace. Due to this, there's no risk of a
namespace collision, and we can define each raw_atomic*_<op> ordering
variant with its own ifdeffery checking for the arch_atomic*_<op>
ordering variants.

Restructure the fallbacks in this way, with each ordering variant having
its own ifdeffery of the form:

| #if defined(arch_atomic_fetch_andnot_acquire)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire
| #elif defined(arch_atomic_fetch_andnot_relaxed)
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| 	int ret = arch_atomic_fetch_andnot_relaxed(i, v);
| 	__atomic_acquire_fence();
| 	return ret;
| }
| #elif defined(arch_atomic_fetch_andnot)
| #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot
| #else
| static __always_inline int
| raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
| {
| 	return raw_atomic_fetch_and_acquire(~i, v);
| }
| #endif

Note that where there's no relevant arch_atomic*_<op>() ordering
variant, we'll define the operation in terms of a distinct
raw_atomic*_<otherop>(), as this itself might have been filled in with a
fallback.

As we now generate the raw_atomic*_<op>() implementations directly, we
no longer need the trivial wrappers, so they are removed.

This makes the ifdeffery easier to follow, and will allow for further
improvements in subsequent patches.

There should be no functional change as a result of this patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-21-mark.rutland@arm.com
parent 1815da17
Loading
Loading
Loading
Loading
+0 −1
Original line number Diff line number Diff line
@@ -78,7 +78,6 @@
})

#include <linux/atomic/atomic-arch-fallback.h>
#include <linux/atomic/atomic-raw.h>
#include <linux/atomic/atomic-long.h>
#include <linux/atomic/atomic-instrumented.h>

+1670 −1508

File changed.

Preview size limit exceeded, changes collapsed.

include/linux/atomic/atomic-raw.h

deleted100644 → 0
+0 −1135
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0

// Generated by scripts/atomic/gen-atomic-raw.sh
// DO NOT MODIFY THIS FILE DIRECTLY

#ifndef _LINUX_ATOMIC_RAW_H
#define _LINUX_ATOMIC_RAW_H

static __always_inline int
raw_atomic_read(const atomic_t *v)
{
	return arch_atomic_read(v);
}

static __always_inline int
raw_atomic_read_acquire(const atomic_t *v)
{
	return arch_atomic_read_acquire(v);
}

static __always_inline void
raw_atomic_set(atomic_t *v, int i)
{
	arch_atomic_set(v, i);
}

static __always_inline void
raw_atomic_set_release(atomic_t *v, int i)
{
	arch_atomic_set_release(v, i);
}

static __always_inline void
raw_atomic_add(int i, atomic_t *v)
{
	arch_atomic_add(i, v);
}

static __always_inline int
raw_atomic_add_return(int i, atomic_t *v)
{
	return arch_atomic_add_return(i, v);
}

static __always_inline int
raw_atomic_add_return_acquire(int i, atomic_t *v)
{
	return arch_atomic_add_return_acquire(i, v);
}

static __always_inline int
raw_atomic_add_return_release(int i, atomic_t *v)
{
	return arch_atomic_add_return_release(i, v);
}

static __always_inline int
raw_atomic_add_return_relaxed(int i, atomic_t *v)
{
	return arch_atomic_add_return_relaxed(i, v);
}

static __always_inline int
raw_atomic_fetch_add(int i, atomic_t *v)
{
	return arch_atomic_fetch_add(i, v);
}

static __always_inline int
raw_atomic_fetch_add_acquire(int i, atomic_t *v)
{
	return arch_atomic_fetch_add_acquire(i, v);
}

static __always_inline int
raw_atomic_fetch_add_release(int i, atomic_t *v)
{
	return arch_atomic_fetch_add_release(i, v);
}

static __always_inline int
raw_atomic_fetch_add_relaxed(int i, atomic_t *v)
{
	return arch_atomic_fetch_add_relaxed(i, v);
}

static __always_inline void
raw_atomic_sub(int i, atomic_t *v)
{
	arch_atomic_sub(i, v);
}

static __always_inline int
raw_atomic_sub_return(int i, atomic_t *v)
{
	return arch_atomic_sub_return(i, v);
}

static __always_inline int
raw_atomic_sub_return_acquire(int i, atomic_t *v)
{
	return arch_atomic_sub_return_acquire(i, v);
}

static __always_inline int
raw_atomic_sub_return_release(int i, atomic_t *v)
{
	return arch_atomic_sub_return_release(i, v);
}

static __always_inline int
raw_atomic_sub_return_relaxed(int i, atomic_t *v)
{
	return arch_atomic_sub_return_relaxed(i, v);
}

static __always_inline int
raw_atomic_fetch_sub(int i, atomic_t *v)
{
	return arch_atomic_fetch_sub(i, v);
}

static __always_inline int
raw_atomic_fetch_sub_acquire(int i, atomic_t *v)
{
	return arch_atomic_fetch_sub_acquire(i, v);
}

static __always_inline int
raw_atomic_fetch_sub_release(int i, atomic_t *v)
{
	return arch_atomic_fetch_sub_release(i, v);
}

static __always_inline int
raw_atomic_fetch_sub_relaxed(int i, atomic_t *v)
{
	return arch_atomic_fetch_sub_relaxed(i, v);
}

static __always_inline void
raw_atomic_inc(atomic_t *v)
{
	arch_atomic_inc(v);
}

static __always_inline int
raw_atomic_inc_return(atomic_t *v)
{
	return arch_atomic_inc_return(v);
}

static __always_inline int
raw_atomic_inc_return_acquire(atomic_t *v)
{
	return arch_atomic_inc_return_acquire(v);
}

static __always_inline int
raw_atomic_inc_return_release(atomic_t *v)
{
	return arch_atomic_inc_return_release(v);
}

static __always_inline int
raw_atomic_inc_return_relaxed(atomic_t *v)
{
	return arch_atomic_inc_return_relaxed(v);
}

static __always_inline int
raw_atomic_fetch_inc(atomic_t *v)
{
	return arch_atomic_fetch_inc(v);
}

static __always_inline int
raw_atomic_fetch_inc_acquire(atomic_t *v)
{
	return arch_atomic_fetch_inc_acquire(v);
}

static __always_inline int
raw_atomic_fetch_inc_release(atomic_t *v)
{
	return arch_atomic_fetch_inc_release(v);
}

static __always_inline int
raw_atomic_fetch_inc_relaxed(atomic_t *v)
{
	return arch_atomic_fetch_inc_relaxed(v);
}

static __always_inline void
raw_atomic_dec(atomic_t *v)
{
	arch_atomic_dec(v);
}

static __always_inline int
raw_atomic_dec_return(atomic_t *v)
{
	return arch_atomic_dec_return(v);
}

static __always_inline int
raw_atomic_dec_return_acquire(atomic_t *v)
{
	return arch_atomic_dec_return_acquire(v);
}

static __always_inline int
raw_atomic_dec_return_release(atomic_t *v)
{
	return arch_atomic_dec_return_release(v);
}

static __always_inline int
raw_atomic_dec_return_relaxed(atomic_t *v)
{
	return arch_atomic_dec_return_relaxed(v);
}

static __always_inline int
raw_atomic_fetch_dec(atomic_t *v)
{
	return arch_atomic_fetch_dec(v);
}

static __always_inline int
raw_atomic_fetch_dec_acquire(atomic_t *v)
{
	return arch_atomic_fetch_dec_acquire(v);
}

static __always_inline int
raw_atomic_fetch_dec_release(atomic_t *v)
{
	return arch_atomic_fetch_dec_release(v);
}

static __always_inline int
raw_atomic_fetch_dec_relaxed(atomic_t *v)
{
	return arch_atomic_fetch_dec_relaxed(v);
}

static __always_inline void
raw_atomic_and(int i, atomic_t *v)
{
	arch_atomic_and(i, v);
}

static __always_inline int
raw_atomic_fetch_and(int i, atomic_t *v)
{
	return arch_atomic_fetch_and(i, v);
}

static __always_inline int
raw_atomic_fetch_and_acquire(int i, atomic_t *v)
{
	return arch_atomic_fetch_and_acquire(i, v);
}

static __always_inline int
raw_atomic_fetch_and_release(int i, atomic_t *v)
{
	return arch_atomic_fetch_and_release(i, v);
}

static __always_inline int
raw_atomic_fetch_and_relaxed(int i, atomic_t *v)
{
	return arch_atomic_fetch_and_relaxed(i, v);
}

static __always_inline void
raw_atomic_andnot(int i, atomic_t *v)
{
	arch_atomic_andnot(i, v);
}

static __always_inline int
raw_atomic_fetch_andnot(int i, atomic_t *v)
{
	return arch_atomic_fetch_andnot(i, v);
}

static __always_inline int
raw_atomic_fetch_andnot_acquire(int i, atomic_t *v)
{
	return arch_atomic_fetch_andnot_acquire(i, v);
}

static __always_inline int
raw_atomic_fetch_andnot_release(int i, atomic_t *v)
{
	return arch_atomic_fetch_andnot_release(i, v);
}

static __always_inline int
raw_atomic_fetch_andnot_relaxed(int i, atomic_t *v)
{
	return arch_atomic_fetch_andnot_relaxed(i, v);
}

static __always_inline void
raw_atomic_or(int i, atomic_t *v)
{
	arch_atomic_or(i, v);
}

static __always_inline int
raw_atomic_fetch_or(int i, atomic_t *v)
{
	return arch_atomic_fetch_or(i, v);
}

static __always_inline int
raw_atomic_fetch_or_acquire(int i, atomic_t *v)
{
	return arch_atomic_fetch_or_acquire(i, v);
}

static __always_inline int
raw_atomic_fetch_or_release(int i, atomic_t *v)
{
	return arch_atomic_fetch_or_release(i, v);
}

static __always_inline int
raw_atomic_fetch_or_relaxed(int i, atomic_t *v)
{
	return arch_atomic_fetch_or_relaxed(i, v);
}

static __always_inline void
raw_atomic_xor(int i, atomic_t *v)
{
	arch_atomic_xor(i, v);
}

static __always_inline int
raw_atomic_fetch_xor(int i, atomic_t *v)
{
	return arch_atomic_fetch_xor(i, v);
}

static __always_inline int
raw_atomic_fetch_xor_acquire(int i, atomic_t *v)
{
	return arch_atomic_fetch_xor_acquire(i, v);
}

static __always_inline int
raw_atomic_fetch_xor_release(int i, atomic_t *v)
{
	return arch_atomic_fetch_xor_release(i, v);
}

static __always_inline int
raw_atomic_fetch_xor_relaxed(int i, atomic_t *v)
{
	return arch_atomic_fetch_xor_relaxed(i, v);
}

static __always_inline int
raw_atomic_xchg(atomic_t *v, int i)
{
	return arch_atomic_xchg(v, i);
}

static __always_inline int
raw_atomic_xchg_acquire(atomic_t *v, int i)
{
	return arch_atomic_xchg_acquire(v, i);
}

static __always_inline int
raw_atomic_xchg_release(atomic_t *v, int i)
{
	return arch_atomic_xchg_release(v, i);
}

static __always_inline int
raw_atomic_xchg_relaxed(atomic_t *v, int i)
{
	return arch_atomic_xchg_relaxed(v, i);
}

static __always_inline int
raw_atomic_cmpxchg(atomic_t *v, int old, int new)
{
	return arch_atomic_cmpxchg(v, old, new);
}

static __always_inline int
raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
{
	return arch_atomic_cmpxchg_acquire(v, old, new);
}

static __always_inline int
raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
{
	return arch_atomic_cmpxchg_release(v, old, new);
}

static __always_inline int
raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
{
	return arch_atomic_cmpxchg_relaxed(v, old, new);
}

static __always_inline bool
raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
	return arch_atomic_try_cmpxchg(v, old, new);
}

static __always_inline bool
raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
{
	return arch_atomic_try_cmpxchg_acquire(v, old, new);
}

static __always_inline bool
raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
{
	return arch_atomic_try_cmpxchg_release(v, old, new);
}

static __always_inline bool
raw_atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
{
	return arch_atomic_try_cmpxchg_relaxed(v, old, new);
}

static __always_inline bool
raw_atomic_sub_and_test(int i, atomic_t *v)
{
	return arch_atomic_sub_and_test(i, v);
}

static __always_inline bool
raw_atomic_dec_and_test(atomic_t *v)
{
	return arch_atomic_dec_and_test(v);
}

static __always_inline bool
raw_atomic_inc_and_test(atomic_t *v)
{
	return arch_atomic_inc_and_test(v);
}

static __always_inline bool
raw_atomic_add_negative(int i, atomic_t *v)
{
	return arch_atomic_add_negative(i, v);
}

static __always_inline bool
raw_atomic_add_negative_acquire(int i, atomic_t *v)
{
	return arch_atomic_add_negative_acquire(i, v);
}

static __always_inline bool
raw_atomic_add_negative_release(int i, atomic_t *v)
{
	return arch_atomic_add_negative_release(i, v);
}

static __always_inline bool
raw_atomic_add_negative_relaxed(int i, atomic_t *v)
{
	return arch_atomic_add_negative_relaxed(i, v);
}

static __always_inline int
raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
	return arch_atomic_fetch_add_unless(v, a, u);
}

static __always_inline bool
raw_atomic_add_unless(atomic_t *v, int a, int u)
{
	return arch_atomic_add_unless(v, a, u);
}

static __always_inline bool
raw_atomic_inc_not_zero(atomic_t *v)
{
	return arch_atomic_inc_not_zero(v);
}

static __always_inline bool
raw_atomic_inc_unless_negative(atomic_t *v)
{
	return arch_atomic_inc_unless_negative(v);
}

static __always_inline bool
raw_atomic_dec_unless_positive(atomic_t *v)
{
	return arch_atomic_dec_unless_positive(v);
}

static __always_inline int
raw_atomic_dec_if_positive(atomic_t *v)
{
	return arch_atomic_dec_if_positive(v);
}

static __always_inline s64
raw_atomic64_read(const atomic64_t *v)
{
	return arch_atomic64_read(v);
}

static __always_inline s64
raw_atomic64_read_acquire(const atomic64_t *v)
{
	return arch_atomic64_read_acquire(v);
}

static __always_inline void
raw_atomic64_set(atomic64_t *v, s64 i)
{
	arch_atomic64_set(v, i);
}

static __always_inline void
raw_atomic64_set_release(atomic64_t *v, s64 i)
{
	arch_atomic64_set_release(v, i);
}

static __always_inline void
raw_atomic64_add(s64 i, atomic64_t *v)
{
	arch_atomic64_add(i, v);
}

static __always_inline s64
raw_atomic64_add_return(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_return(i, v);
}

static __always_inline s64
raw_atomic64_add_return_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_return_acquire(i, v);
}

static __always_inline s64
raw_atomic64_add_return_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_return_release(i, v);
}

static __always_inline s64
raw_atomic64_add_return_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_return_relaxed(i, v);
}

static __always_inline s64
raw_atomic64_fetch_add(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_add(i, v);
}

static __always_inline s64
raw_atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_add_acquire(i, v);
}

static __always_inline s64
raw_atomic64_fetch_add_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_add_release(i, v);
}

static __always_inline s64
raw_atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_add_relaxed(i, v);
}

static __always_inline void
raw_atomic64_sub(s64 i, atomic64_t *v)
{
	arch_atomic64_sub(i, v);
}

static __always_inline s64
raw_atomic64_sub_return(s64 i, atomic64_t *v)
{
	return arch_atomic64_sub_return(i, v);
}

static __always_inline s64
raw_atomic64_sub_return_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_sub_return_acquire(i, v);
}

static __always_inline s64
raw_atomic64_sub_return_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_sub_return_release(i, v);
}

static __always_inline s64
raw_atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_sub_return_relaxed(i, v);
}

static __always_inline s64
raw_atomic64_fetch_sub(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_sub(i, v);
}

static __always_inline s64
raw_atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_sub_acquire(i, v);
}

static __always_inline s64
raw_atomic64_fetch_sub_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_sub_release(i, v);
}

static __always_inline s64
raw_atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_sub_relaxed(i, v);
}

static __always_inline void
raw_atomic64_inc(atomic64_t *v)
{
	arch_atomic64_inc(v);
}

static __always_inline s64
raw_atomic64_inc_return(atomic64_t *v)
{
	return arch_atomic64_inc_return(v);
}

static __always_inline s64
raw_atomic64_inc_return_acquire(atomic64_t *v)
{
	return arch_atomic64_inc_return_acquire(v);
}

static __always_inline s64
raw_atomic64_inc_return_release(atomic64_t *v)
{
	return arch_atomic64_inc_return_release(v);
}

static __always_inline s64
raw_atomic64_inc_return_relaxed(atomic64_t *v)
{
	return arch_atomic64_inc_return_relaxed(v);
}

static __always_inline s64
raw_atomic64_fetch_inc(atomic64_t *v)
{
	return arch_atomic64_fetch_inc(v);
}

static __always_inline s64
raw_atomic64_fetch_inc_acquire(atomic64_t *v)
{
	return arch_atomic64_fetch_inc_acquire(v);
}

static __always_inline s64
raw_atomic64_fetch_inc_release(atomic64_t *v)
{
	return arch_atomic64_fetch_inc_release(v);
}

static __always_inline s64
raw_atomic64_fetch_inc_relaxed(atomic64_t *v)
{
	return arch_atomic64_fetch_inc_relaxed(v);
}

static __always_inline void
raw_atomic64_dec(atomic64_t *v)
{
	arch_atomic64_dec(v);
}

static __always_inline s64
raw_atomic64_dec_return(atomic64_t *v)
{
	return arch_atomic64_dec_return(v);
}

static __always_inline s64
raw_atomic64_dec_return_acquire(atomic64_t *v)
{
	return arch_atomic64_dec_return_acquire(v);
}

static __always_inline s64
raw_atomic64_dec_return_release(atomic64_t *v)
{
	return arch_atomic64_dec_return_release(v);
}

static __always_inline s64
raw_atomic64_dec_return_relaxed(atomic64_t *v)
{
	return arch_atomic64_dec_return_relaxed(v);
}

static __always_inline s64
raw_atomic64_fetch_dec(atomic64_t *v)
{
	return arch_atomic64_fetch_dec(v);
}

static __always_inline s64
raw_atomic64_fetch_dec_acquire(atomic64_t *v)
{
	return arch_atomic64_fetch_dec_acquire(v);
}

static __always_inline s64
raw_atomic64_fetch_dec_release(atomic64_t *v)
{
	return arch_atomic64_fetch_dec_release(v);
}

static __always_inline s64
raw_atomic64_fetch_dec_relaxed(atomic64_t *v)
{
	return arch_atomic64_fetch_dec_relaxed(v);
}

static __always_inline void
raw_atomic64_and(s64 i, atomic64_t *v)
{
	arch_atomic64_and(i, v);
}

static __always_inline s64
raw_atomic64_fetch_and(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_and(i, v);
}

static __always_inline s64
raw_atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_and_acquire(i, v);
}

static __always_inline s64
raw_atomic64_fetch_and_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_and_release(i, v);
}

static __always_inline s64
raw_atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_and_relaxed(i, v);
}

static __always_inline void
raw_atomic64_andnot(s64 i, atomic64_t *v)
{
	arch_atomic64_andnot(i, v);
}

static __always_inline s64
raw_atomic64_fetch_andnot(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_andnot(i, v);
}

static __always_inline s64
raw_atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_andnot_acquire(i, v);
}

static __always_inline s64
raw_atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_andnot_release(i, v);
}

static __always_inline s64
raw_atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_andnot_relaxed(i, v);
}

static __always_inline void
raw_atomic64_or(s64 i, atomic64_t *v)
{
	arch_atomic64_or(i, v);
}

static __always_inline s64
raw_atomic64_fetch_or(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_or(i, v);
}

static __always_inline s64
raw_atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_or_acquire(i, v);
}

static __always_inline s64
raw_atomic64_fetch_or_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_or_release(i, v);
}

static __always_inline s64
raw_atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_or_relaxed(i, v);
}

static __always_inline void
raw_atomic64_xor(s64 i, atomic64_t *v)
{
	arch_atomic64_xor(i, v);
}

static __always_inline s64
raw_atomic64_fetch_xor(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_xor(i, v);
}

static __always_inline s64
raw_atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_xor_acquire(i, v);
}

static __always_inline s64
raw_atomic64_fetch_xor_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_xor_release(i, v);
}

static __always_inline s64
raw_atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_fetch_xor_relaxed(i, v);
}

static __always_inline s64
raw_atomic64_xchg(atomic64_t *v, s64 i)
{
	return arch_atomic64_xchg(v, i);
}

static __always_inline s64
raw_atomic64_xchg_acquire(atomic64_t *v, s64 i)
{
	return arch_atomic64_xchg_acquire(v, i);
}

static __always_inline s64
raw_atomic64_xchg_release(atomic64_t *v, s64 i)
{
	return arch_atomic64_xchg_release(v, i);
}

static __always_inline s64
raw_atomic64_xchg_relaxed(atomic64_t *v, s64 i)
{
	return arch_atomic64_xchg_relaxed(v, i);
}

static __always_inline s64
raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
	return arch_atomic64_cmpxchg(v, old, new);
}

static __always_inline s64
raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
{
	return arch_atomic64_cmpxchg_acquire(v, old, new);
}

static __always_inline s64
raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
{
	return arch_atomic64_cmpxchg_release(v, old, new);
}

static __always_inline s64
raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
{
	return arch_atomic64_cmpxchg_relaxed(v, old, new);
}

static __always_inline bool
raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
{
	return arch_atomic64_try_cmpxchg(v, old, new);
}

static __always_inline bool
raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
{
	return arch_atomic64_try_cmpxchg_acquire(v, old, new);
}

static __always_inline bool
raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
{
	return arch_atomic64_try_cmpxchg_release(v, old, new);
}

static __always_inline bool
raw_atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
{
	return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
}

static __always_inline bool
raw_atomic64_sub_and_test(s64 i, atomic64_t *v)
{
	return arch_atomic64_sub_and_test(i, v);
}

static __always_inline bool
raw_atomic64_dec_and_test(atomic64_t *v)
{
	return arch_atomic64_dec_and_test(v);
}

static __always_inline bool
raw_atomic64_inc_and_test(atomic64_t *v)
{
	return arch_atomic64_inc_and_test(v);
}

static __always_inline bool
raw_atomic64_add_negative(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_negative(i, v);
}

static __always_inline bool
raw_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_negative_acquire(i, v);
}

static __always_inline bool
raw_atomic64_add_negative_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_negative_release(i, v);
}

static __always_inline bool
raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_negative_relaxed(i, v);
}

static __always_inline s64
raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
	return arch_atomic64_fetch_add_unless(v, a, u);
}

static __always_inline bool
raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
{
	return arch_atomic64_add_unless(v, a, u);
}

static __always_inline bool
raw_atomic64_inc_not_zero(atomic64_t *v)
{
	return arch_atomic64_inc_not_zero(v);
}

static __always_inline bool
raw_atomic64_inc_unless_negative(atomic64_t *v)
{
	return arch_atomic64_inc_unless_negative(v);
}

static __always_inline bool
raw_atomic64_dec_unless_positive(atomic64_t *v)
{
	return arch_atomic64_dec_unless_positive(v);
}

static __always_inline s64
raw_atomic64_dec_if_positive(atomic64_t *v)
{
	return arch_atomic64_dec_if_positive(v);
}

#define raw_xchg(...) \
	arch_xchg(__VA_ARGS__)

#define raw_xchg_acquire(...) \
	arch_xchg_acquire(__VA_ARGS__)

#define raw_xchg_release(...) \
	arch_xchg_release(__VA_ARGS__)

#define raw_xchg_relaxed(...) \
	arch_xchg_relaxed(__VA_ARGS__)

#define raw_cmpxchg(...) \
	arch_cmpxchg(__VA_ARGS__)

#define raw_cmpxchg_acquire(...) \
	arch_cmpxchg_acquire(__VA_ARGS__)

#define raw_cmpxchg_release(...) \
	arch_cmpxchg_release(__VA_ARGS__)

#define raw_cmpxchg_relaxed(...) \
	arch_cmpxchg_relaxed(__VA_ARGS__)

#define raw_cmpxchg64(...) \
	arch_cmpxchg64(__VA_ARGS__)

#define raw_cmpxchg64_acquire(...) \
	arch_cmpxchg64_acquire(__VA_ARGS__)

#define raw_cmpxchg64_release(...) \
	arch_cmpxchg64_release(__VA_ARGS__)

#define raw_cmpxchg64_relaxed(...) \
	arch_cmpxchg64_relaxed(__VA_ARGS__)

#define raw_cmpxchg128(...) \
	arch_cmpxchg128(__VA_ARGS__)

#define raw_cmpxchg128_acquire(...) \
	arch_cmpxchg128_acquire(__VA_ARGS__)

#define raw_cmpxchg128_release(...) \
	arch_cmpxchg128_release(__VA_ARGS__)

#define raw_cmpxchg128_relaxed(...) \
	arch_cmpxchg128_relaxed(__VA_ARGS__)

#define raw_try_cmpxchg(...) \
	arch_try_cmpxchg(__VA_ARGS__)

#define raw_try_cmpxchg_acquire(...) \
	arch_try_cmpxchg_acquire(__VA_ARGS__)

#define raw_try_cmpxchg_release(...) \
	arch_try_cmpxchg_release(__VA_ARGS__)

#define raw_try_cmpxchg_relaxed(...) \
	arch_try_cmpxchg_relaxed(__VA_ARGS__)

#define raw_try_cmpxchg64(...) \
	arch_try_cmpxchg64(__VA_ARGS__)

#define raw_try_cmpxchg64_acquire(...) \
	arch_try_cmpxchg64_acquire(__VA_ARGS__)

#define raw_try_cmpxchg64_release(...) \
	arch_try_cmpxchg64_release(__VA_ARGS__)

#define raw_try_cmpxchg64_relaxed(...) \
	arch_try_cmpxchg64_relaxed(__VA_ARGS__)

#define raw_try_cmpxchg128(...) \
	arch_try_cmpxchg128(__VA_ARGS__)

#define raw_try_cmpxchg128_acquire(...) \
	arch_try_cmpxchg128_acquire(__VA_ARGS__)

#define raw_try_cmpxchg128_release(...) \
	arch_try_cmpxchg128_release(__VA_ARGS__)

#define raw_try_cmpxchg128_relaxed(...) \
	arch_try_cmpxchg128_relaxed(__VA_ARGS__)

#define raw_cmpxchg_local(...) \
	arch_cmpxchg_local(__VA_ARGS__)

#define raw_cmpxchg64_local(...) \
	arch_cmpxchg64_local(__VA_ARGS__)

#define raw_cmpxchg128_local(...) \
	arch_cmpxchg128_local(__VA_ARGS__)

#define raw_sync_cmpxchg(...) \
	arch_sync_cmpxchg(__VA_ARGS__)

#define raw_try_cmpxchg_local(...) \
	arch_try_cmpxchg_local(__VA_ARGS__)

#define raw_try_cmpxchg64_local(...) \
	arch_try_cmpxchg64_local(__VA_ARGS__)

#define raw_try_cmpxchg128_local(...) \
	arch_try_cmpxchg128_local(__VA_ARGS__)

#endif /* _LINUX_ATOMIC_RAW_H */
// b23ed4424e85200e200ded094522e1d743b3a5b1
+1 −1
Original line number Diff line number Diff line
cat <<EOF
static __always_inline ${ret}
arch_${atomic}_${pfx}${name}${sfx}_acquire(${params})
raw_${atomic}_${pfx}${name}${sfx}_acquire(${params})
{
	${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
	__atomic_acquire_fence();
+2 −2
Original line number Diff line number Diff line
cat <<EOF
static __always_inline bool
arch_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
raw_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
{
	return arch_${atomic}_add_return${order}(i, v) < 0;
	return raw_${atomic}_add_return${order}(i, v) < 0;
}
EOF
Loading