Commit f4d2436f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Wentao Guan
Browse files

rcuref: Plug slowpath race in rcuref_put()

stable inclusion
from stable-v6.6.81
commit 1d26aaa86124e2622cfe38d6fdb9c4da3b49449d
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/IBYZED

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?id=1d26aaa86124e2622cfe38d6fdb9c4da3b49449d



--------------------------------

commit b9a49520679e98700d3d89689cc91c08a1c88c1d upstream.

Kernel test robot reported an "imbalanced put" in the rcuref_put() slow
path, which turned out to be a false positive. Consider the following race:

            ref  = 0 (via rcuref_init(ref, 1))
 T1                                      T2
 rcuref_put(ref)
 -> atomic_add_negative_release(-1, ref)                                         # ref -> 0xffffffff
 -> rcuref_put_slowpath(ref)
                                         rcuref_get(ref)
                                         -> atomic_add_negative_relaxed(1, &ref->refcnt)
                                           -> return true;                       # ref -> 0

                                         rcuref_put(ref)
                                         -> atomic_add_negative_release(-1, ref) # ref -> 0xffffffff
                                         -> rcuref_put_slowpath()

    -> cnt = atomic_read(&ref->refcnt);                                          # cnt -> 0xffffffff / RCUREF_NOREF
    -> atomic_try_cmpxchg_release(&ref->refcnt, &cnt, RCUREF_DEAD))              # ref -> 0xe0000000 / RCUREF_DEAD
       -> return true
                                           -> cnt = atomic_read(&ref->refcnt);   # cnt -> 0xe0000000 / RCUREF_DEAD
                                           -> if (cnt > RCUREF_RELEASED)         # 0xe0000000 > 0xc0000000
                                             -> WARN_ONCE(cnt >= RCUREF_RELEASED, "rcuref - imbalanced put()")

The problem is the additional read in the slow path (after it
decremented to RCUREF_NOREF) which can happen after the counter has been
marked RCUREF_DEAD.

Prevent this by reusing the return value of the decrement. Now every "final"
put uses RCUREF_NOREF in the slow path and attempts the final cmpxchg() to
RCUREF_DEAD.

[ bigeasy: Add changelog ]

Fixes: ee1ee6db ("atomics: Provide rcuref - scalable reference counting")
Reported-by: default avatarkernel test robot <oliver.sang@intel.com>
Debugged-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: stable@vger.kernel.org
Closes: https://lore.kernel.org/oe-lkp/202412311453.9d7636a2-lkp@intel.com


Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit 1d26aaa86124e2622cfe38d6fdb9c4da3b49449d)
Signed-off-by: default avatarWentao Guan <guanwentao@uniontech.com>
parent 0244c133
Loading
Loading
Loading
Loading
+6 −3
Original line number Diff line number Diff line
@@ -71,27 +71,30 @@ static inline __must_check bool rcuref_get(rcuref_t *ref)
	return rcuref_get_slowpath(ref);
}

extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);

/*
 * Internal helper. Do not invoke directly.
 */
static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
{
	int cnt;

	RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
			 "suspicious rcuref_put_rcusafe() usage");
	/*
	 * Unconditionally decrease the reference count. The saturation and
	 * dead zones provide enough tolerance for this.
	 */
	if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
	cnt = atomic_sub_return_release(1, &ref->refcnt);
	if (likely(cnt >= 0))
		return false;

	/*
	 * Handle the last reference drop and cases inside the saturation
	 * and dead zones.
	 */
	return rcuref_put_slowpath(ref);
	return rcuref_put_slowpath(ref, cnt);
}

/**
+2 −3
Original line number Diff line number Diff line
@@ -220,6 +220,7 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
/**
 * rcuref_put_slowpath - Slowpath of __rcuref_put()
 * @ref:	Pointer to the reference count
 * @cnt:	The resulting value of the fastpath decrement
 *
 * Invoked when the reference count is outside of the valid zone.
 *
@@ -233,10 +234,8 @@ EXPORT_SYMBOL_GPL(rcuref_get_slowpath);
 *	with a concurrent get()/put() pair. Caller is not allowed to
 *	deconstruct the protected object.
 */
bool rcuref_put_slowpath(rcuref_t *ref)
bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt)
{
	unsigned int cnt = atomic_read(&ref->refcnt);

	/* Did this drop the last reference? */
	if (likely(cnt == RCUREF_NOREF)) {
		/*