Commit b133fffe authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
parents 163c2c70 ee1ee6db
Loading
Loading
Loading
Loading
+199 −9
Original line number Diff line number Diff line
@@ -1208,15 +1208,21 @@ arch_atomic_inc_and_test(atomic_t *v)
#define arch_atomic_inc_and_test arch_atomic_inc_and_test
#endif

#ifndef arch_atomic_add_negative_relaxed
#ifdef arch_atomic_add_negative
#define arch_atomic_add_negative_acquire arch_atomic_add_negative
#define arch_atomic_add_negative_release arch_atomic_add_negative
#define arch_atomic_add_negative_relaxed arch_atomic_add_negative
#endif /* arch_atomic_add_negative */

#ifndef arch_atomic_add_negative
/**
 * arch_atomic_add_negative - add and test if negative
 * arch_atomic_add_negative - Add and test if negative
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v and returns true
 * if the result is negative, or false when
 * result is greater than or equal to zero.
 * Atomically adds @i to @v and returns true if the result is negative,
 * or false when the result is greater than or equal to zero.
 */
static __always_inline bool
arch_atomic_add_negative(int i, atomic_t *v)
@@ -1226,6 +1232,95 @@ arch_atomic_add_negative(int i, atomic_t *v)
#define arch_atomic_add_negative arch_atomic_add_negative
#endif

#ifndef arch_atomic_add_negative_acquire
/**
 * arch_atomic_add_negative_acquire - Add and test if negative
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v and returns true if the result is negative,
 * or false when the result is greater than or equal to zero.
 */
static __always_inline bool
arch_atomic_add_negative_acquire(int i, atomic_t *v)
{
	return arch_atomic_add_return_acquire(i, v) < 0;
}
#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire
#endif

#ifndef arch_atomic_add_negative_release
/**
 * arch_atomic_add_negative_release - Add and test if negative
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v and returns true if the result is negative,
 * or false when the result is greater than or equal to zero.
 */
static __always_inline bool
arch_atomic_add_negative_release(int i, atomic_t *v)
{
	return arch_atomic_add_return_release(i, v) < 0;
}
#define arch_atomic_add_negative_release arch_atomic_add_negative_release
#endif

#ifndef arch_atomic_add_negative_relaxed
/**
 * arch_atomic_add_negative_relaxed - Add and test if negative
 * @i: integer value to add
 * @v: pointer of type atomic_t
 *
 * Atomically adds @i to @v and returns true if the result is negative,
 * or false when the result is greater than or equal to zero.
 */
static __always_inline bool
arch_atomic_add_negative_relaxed(int i, atomic_t *v)
{
	return arch_atomic_add_return_relaxed(i, v) < 0;
}
#define arch_atomic_add_negative_relaxed arch_atomic_add_negative_relaxed
#endif

#else /* arch_atomic_add_negative_relaxed */

#ifndef arch_atomic_add_negative_acquire
static __always_inline bool
arch_atomic_add_negative_acquire(int i, atomic_t *v)
{
	bool ret = arch_atomic_add_negative_relaxed(i, v);
	__atomic_acquire_fence();
	return ret;
}
#define arch_atomic_add_negative_acquire arch_atomic_add_negative_acquire
#endif

#ifndef arch_atomic_add_negative_release
static __always_inline bool
arch_atomic_add_negative_release(int i, atomic_t *v)
{
	__atomic_release_fence();
	return arch_atomic_add_negative_relaxed(i, v);
}
#define arch_atomic_add_negative_release arch_atomic_add_negative_release
#endif

#ifndef arch_atomic_add_negative
static __always_inline bool
arch_atomic_add_negative(int i, atomic_t *v)
{
	bool ret;
	__atomic_pre_full_fence();
	ret = arch_atomic_add_negative_relaxed(i, v);
	__atomic_post_full_fence();
	return ret;
}
#define arch_atomic_add_negative arch_atomic_add_negative
#endif

#endif /* arch_atomic_add_negative_relaxed */

#ifndef arch_atomic_fetch_add_unless
/**
 * arch_atomic_fetch_add_unless - add unless the number is already a given value
@@ -2329,15 +2424,21 @@ arch_atomic64_inc_and_test(atomic64_t *v)
#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
#endif

#ifndef arch_atomic64_add_negative_relaxed
#ifdef arch_atomic64_add_negative
#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative
#define arch_atomic64_add_negative_release arch_atomic64_add_negative
#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative
#endif /* arch_atomic64_add_negative */

#ifndef arch_atomic64_add_negative
/**
 * arch_atomic64_add_negative - add and test if negative
 * arch_atomic64_add_negative - Add and test if negative
 * @i: integer value to add
 * @v: pointer of type atomic64_t
 *
 * Atomically adds @i to @v and returns true
 * if the result is negative, or false when
 * result is greater than or equal to zero.
 * Atomically adds @i to @v and returns true if the result is negative,
 * or false when the result is greater than or equal to zero.
 */
static __always_inline bool
arch_atomic64_add_negative(s64 i, atomic64_t *v)
@@ -2347,6 +2448,95 @@ arch_atomic64_add_negative(s64 i, atomic64_t *v)
#define arch_atomic64_add_negative arch_atomic64_add_negative
#endif

#ifndef arch_atomic64_add_negative_acquire
/**
 * arch_atomic64_add_negative_acquire - Add and test if negative
 * @i: integer value to add
 * @v: pointer of type atomic64_t
 *
 * Atomically adds @i to @v and returns true if the result is negative,
 * or false when the result is greater than or equal to zero.
 */
static __always_inline bool
arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_return_acquire(i, v) < 0;
}
#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire
#endif

#ifndef arch_atomic64_add_negative_release
/**
 * arch_atomic64_add_negative_release - Add and test if negative
 * @i: integer value to add
 * @v: pointer of type atomic64_t
 *
 * Atomically adds @i to @v and returns true if the result is negative,
 * or false when the result is greater than or equal to zero.
 */
static __always_inline bool
arch_atomic64_add_negative_release(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_return_release(i, v) < 0;
}
#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release
#endif

#ifndef arch_atomic64_add_negative_relaxed
/**
 * arch_atomic64_add_negative_relaxed - Add and test if negative
 * @i: integer value to add
 * @v: pointer of type atomic64_t
 *
 * Atomically adds @i to @v and returns true if the result is negative,
 * or false when the result is greater than or equal to zero.
 */
static __always_inline bool
arch_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
{
	return arch_atomic64_add_return_relaxed(i, v) < 0;
}
#define arch_atomic64_add_negative_relaxed arch_atomic64_add_negative_relaxed
#endif

#else /* arch_atomic64_add_negative_relaxed */

#ifndef arch_atomic64_add_negative_acquire
static __always_inline bool
arch_atomic64_add_negative_acquire(s64 i, atomic64_t *v)
{
	bool ret = arch_atomic64_add_negative_relaxed(i, v);
	__atomic_acquire_fence();
	return ret;
}
#define arch_atomic64_add_negative_acquire arch_atomic64_add_negative_acquire
#endif

#ifndef arch_atomic64_add_negative_release
static __always_inline bool
arch_atomic64_add_negative_release(s64 i, atomic64_t *v)
{
	__atomic_release_fence();
	return arch_atomic64_add_negative_relaxed(i, v);
}
#define arch_atomic64_add_negative_release arch_atomic64_add_negative_release
#endif

#ifndef arch_atomic64_add_negative
static __always_inline bool
arch_atomic64_add_negative(s64 i, atomic64_t *v)
{
	bool ret;
	__atomic_pre_full_fence();
	ret = arch_atomic64_add_negative_relaxed(i, v);
	__atomic_post_full_fence();
	return ret;
}
#define arch_atomic64_add_negative arch_atomic64_add_negative
#endif

#endif /* arch_atomic64_add_negative_relaxed */

#ifndef arch_atomic64_fetch_add_unless
/**
 * arch_atomic64_fetch_add_unless - add unless the number is already a given value
@@ -2456,4 +2646,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v)
#endif

#endif /* _LINUX_ATOMIC_FALLBACK_H */
// b5e87bdd5ede61470c29f7a7e4de781af3770f09
// 00071fffa021cec66f6290d706d69c91df87bade
+67 −1
Original line number Diff line number Diff line
@@ -592,6 +592,28 @@ atomic_add_negative(int i, atomic_t *v)
	return arch_atomic_add_negative(i, v);
}

static __always_inline bool
atomic_add_negative_acquire(int i, atomic_t *v)
{
	instrument_atomic_read_write(v, sizeof(*v));
	return arch_atomic_add_negative_acquire(i, v);
}

static __always_inline bool
atomic_add_negative_release(int i, atomic_t *v)
{
	kcsan_release();
	instrument_atomic_read_write(v, sizeof(*v));
	return arch_atomic_add_negative_release(i, v);
}

static __always_inline bool
atomic_add_negative_relaxed(int i, atomic_t *v)
{
	instrument_atomic_read_write(v, sizeof(*v));
	return arch_atomic_add_negative_relaxed(i, v);
}

static __always_inline int
atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
@@ -1211,6 +1233,28 @@ atomic64_add_negative(s64 i, atomic64_t *v)
	return arch_atomic64_add_negative(i, v);
}

static __always_inline bool
atomic64_add_negative_acquire(s64 i, atomic64_t *v)
{
	instrument_atomic_read_write(v, sizeof(*v));
	return arch_atomic64_add_negative_acquire(i, v);
}

static __always_inline bool
atomic64_add_negative_release(s64 i, atomic64_t *v)
{
	kcsan_release();
	instrument_atomic_read_write(v, sizeof(*v));
	return arch_atomic64_add_negative_release(i, v);
}

static __always_inline bool
atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
{
	instrument_atomic_read_write(v, sizeof(*v));
	return arch_atomic64_add_negative_relaxed(i, v);
}

static __always_inline s64
atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
@@ -1830,6 +1874,28 @@ atomic_long_add_negative(long i, atomic_long_t *v)
	return arch_atomic_long_add_negative(i, v);
}

static __always_inline bool
atomic_long_add_negative_acquire(long i, atomic_long_t *v)
{
	instrument_atomic_read_write(v, sizeof(*v));
	return arch_atomic_long_add_negative_acquire(i, v);
}

static __always_inline bool
atomic_long_add_negative_release(long i, atomic_long_t *v)
{
	kcsan_release();
	instrument_atomic_read_write(v, sizeof(*v));
	return arch_atomic_long_add_negative_release(i, v);
}

static __always_inline bool
atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
{
	instrument_atomic_read_write(v, sizeof(*v));
	return arch_atomic_long_add_negative_relaxed(i, v);
}

static __always_inline long
atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
@@ -2083,4 +2149,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
})

#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
// 764f741eb77a7ad565dc8d99ce2837d5542e8aee
// 1b485de9cbaa4900de59e14ee2084357eaeb1c3a
+37 −1
Original line number Diff line number Diff line
@@ -479,6 +479,24 @@ arch_atomic_long_add_negative(long i, atomic_long_t *v)
	return arch_atomic64_add_negative(i, v);
}

static __always_inline bool
arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
{
	return arch_atomic64_add_negative_acquire(i, v);
}

static __always_inline bool
arch_atomic_long_add_negative_release(long i, atomic_long_t *v)
{
	return arch_atomic64_add_negative_release(i, v);
}

static __always_inline bool
arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
{
	return arch_atomic64_add_negative_relaxed(i, v);
}

static __always_inline long
arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
@@ -973,6 +991,24 @@ arch_atomic_long_add_negative(long i, atomic_long_t *v)
	return arch_atomic_add_negative(i, v);
}

static __always_inline bool
arch_atomic_long_add_negative_acquire(long i, atomic_long_t *v)
{
	return arch_atomic_add_negative_acquire(i, v);
}

static __always_inline bool
arch_atomic_long_add_negative_release(long i, atomic_long_t *v)
{
	return arch_atomic_add_negative_release(i, v);
}

static __always_inline bool
arch_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
{
	return arch_atomic_add_negative_relaxed(i, v);
}

static __always_inline long
arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
{
@@ -1011,4 +1047,4 @@ arch_atomic_long_dec_if_positive(atomic_long_t *v)

#endif /* CONFIG_64BIT */
#endif /* _LINUX_ATOMIC_LONG_H */
// e8f0e08ff072b74d180eabe2ad001282b38c2c88
// a194c07d7d2f4b0e178d3c118c919775d5d65f50

include/linux/rcuref.h

0 → 100644
+155 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_RCUREF_H
#define _LINUX_RCUREF_H

#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/limits.h>
#include <linux/lockdep.h>
#include <linux/preempt.h>
#include <linux/rcupdate.h>

#define RCUREF_ONEREF		0x00000000U
#define RCUREF_MAXREF		0x7FFFFFFFU
#define RCUREF_SATURATED	0xA0000000U
#define RCUREF_RELEASED		0xC0000000U
#define RCUREF_DEAD		0xE0000000U
#define RCUREF_NOREF		0xFFFFFFFFU

/**
 * rcuref_init - Initialize a rcuref reference count with the given reference count
 * @ref:	Pointer to the reference count
 * @cnt:	The initial reference count typically '1'
 */
static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
{
	atomic_set(&ref->refcnt, cnt - 1);
}

/**
 * rcuref_read - Read the number of held reference counts of a rcuref
 * @ref:	Pointer to the reference count
 *
 * Return: The number of held references (0 ... N)
 */
static inline unsigned int rcuref_read(rcuref_t *ref)
{
	unsigned int c = atomic_read(&ref->refcnt);

	/* Return 0 if within the DEAD zone. */
	return c >= RCUREF_RELEASED ? 0 : c + 1;
}

extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);

/**
 * rcuref_get - Acquire one reference on a rcuref reference count
 * @ref:	Pointer to the reference count
 *
 * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF.
 *
 * Provides no memory ordering, it is assumed the caller has guaranteed the
 * object memory to be stable (RCU, etc.). It does provide a control dependency
 * and thereby orders future stores. See documentation in lib/rcuref.c
 *
 * Return:
 *	False if the attempt to acquire a reference failed. This happens
 *	when the last reference has been put already
 *
 *	True if a reference was successfully acquired
 */
static inline __must_check bool rcuref_get(rcuref_t *ref)
{
	/*
	 * Unconditionally increase the reference count. The saturation and
	 * dead zones provide enough tolerance for this.
	 */
	if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt)))
		return true;

	/* Handle the cases inside the saturation and dead zones */
	return rcuref_get_slowpath(ref);
}

extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);

/*
 * Internal helper. Do not invoke directly.
 */
static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
{
	RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
			 "suspicious rcuref_put_rcusafe() usage");
	/*
	 * Unconditionally decrease the reference count. The saturation and
	 * dead zones provide enough tolerance for this.
	 */
	if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
		return false;

	/*
	 * Handle the last reference drop and cases inside the saturation
	 * and dead zones.
	 */
	return rcuref_put_slowpath(ref);
}

/**
 * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe
 * @ref:	Pointer to the reference count
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides an acquire ordering on success such that free()
 * must come after.
 *
 * Can be invoked from contexts, which guarantee that no grace period can
 * happen which would free the object concurrently if the decrement drops
 * the last reference and the slowpath races against a concurrent get() and
 * put() pair. rcu_read_lock()'ed and atomic contexts qualify.
 *
 * Return:
 *	True if this was the last reference with no future references
 *	possible. This signals the caller that it can safely release the
 *	object which is protected by the reference counter.
 *
 *	False if there are still active references or the put() raced
 *	with a concurrent get()/put() pair. Caller is not allowed to
 *	release the protected object.
 */
static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref)
{
	return __rcuref_put(ref);
}

/**
 * rcuref_put -- Release one reference for a rcuref reference count
 * @ref:	Pointer to the reference count
 *
 * Can be invoked from any context.
 *
 * Provides release memory ordering, such that prior loads and stores are done
 * before, and provides an acquire ordering on success such that free()
 * must come after.
 *
 * Return:
 *
 *	True if this was the last reference with no future references
 *	possible. This signals the caller that it can safely schedule the
 *	object, which is protected by the reference counter, for
 *	deconstruction.
 *
 *	False if there are still active references or the put() raced
 *	with a concurrent get()/put() pair. Caller is not allowed to
 *	deconstruct the protected object.
 */
static inline __must_check bool rcuref_put(rcuref_t *ref)
{
	bool released;

	preempt_disable();
	released = __rcuref_put(ref);
	preempt_enable();
	return released;
}

#endif
+6 −0
Original line number Diff line number Diff line
@@ -175,6 +175,12 @@ typedef struct {
} atomic64_t;
#endif

typedef struct {
	atomic_t refcnt;
} rcuref_t;

#define RCUREF_INIT(i)	{ .refcnt = ATOMIC_INIT(i - 1) }

struct list_head {
	struct list_head *next, *prev;
};
Loading