Commit 95d248d1 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Borislav Petkov:

 - Prevent the leaking of a debug timer in futex_waitv()

 - A preempt-RT mutex locking fix, adding the proper acquire semantics

* tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  futex: Fix futex_waitv() hrtimer debug object leak on kcalloc error
  rtmutex: Add acquire semantics for rtmutex lock acquisition slow path
parents 8b419482 94cd8fa0
Loading
Loading
Loading
Loading
+7 −4
Original line number Diff line number Diff line
@@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
	}

	futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
	if (!futexv)
		return -ENOMEM;
	if (!futexv) {
		ret = -ENOMEM;
		goto destroy_timer;
	}

	ret = futex_parse_waitv(futexv, waiters, nr_futexes);
	if (!ret)
		ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);

	kfree(futexv);

destroy_timer:
	if (timeout) {
		hrtimer_cancel(&to.timer);
		destroy_hrtimer_on_stack(&to.timer);
	}

	kfree(futexv);
	return ret;
}

+46 −9
Original line number Diff line number Diff line
@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
 * set this bit before looking at the lock.
 */

static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
static __always_inline struct task_struct *
rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
{
	unsigned long val = (unsigned long)owner;

	if (rt_mutex_has_waiters(lock))
		val |= RT_MUTEX_HAS_WAITERS;

	WRITE_ONCE(lock->owner, (struct task_struct *)val);
	return (struct task_struct *)val;
}

static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
{
	/*
	 * lock->wait_lock is held but explicit acquire semantics are needed
	 * for a new lock owner so WRITE_ONCE is insufficient.
	 */
	xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
}

static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
{
	/* lock->wait_lock is held so the unlock provides release semantics. */
	WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
}

static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
}

static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
{
	unsigned long owner, *p = (unsigned long *) &lock->owner;

@@ -172,9 +189,22 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
	 * still set.
	 */
	owner = READ_ONCE(*p);
	if (owner & RT_MUTEX_HAS_WAITERS)
	if (owner & RT_MUTEX_HAS_WAITERS) {
		/*
		 * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
		 * why xchg_acquire() is used for updating owner for
		 * locking and WRITE_ONCE() for unlocking.
		 *
		 * WRITE_ONCE() would work for the acquire case too, but
		 * in case that the lock acquisition failed it might
		 * force other lockers into the slow path unnecessarily.
		 */
		if (acquire_lock)
			xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
		else
			WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
	}
}

/*
 * We can speed up the acquire/release, if there's no debugging state to be
@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
		owner = *p;
	} while (cmpxchg_relaxed(p, owner,
				 owner | RT_MUTEX_HAS_WAITERS) != owner);

	/*
	 * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
	 * operations in the event of contention. Ensure the successful
	 * cmpxchg is visible.
	 */
	smp_mb__after_atomic();
}

/*
@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
	 * try_to_take_rt_mutex() sets the lock waiters bit
	 * unconditionally. Clean this up.
	 */
	fixup_rt_mutex_waiters(lock);
	fixup_rt_mutex_waiters(lock, true);

	return ret;
}
@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
	 * try_to_take_rt_mutex() sets the waiter bit
	 * unconditionally. We might have to fix that up.
	 */
	fixup_rt_mutex_waiters(lock);
	fixup_rt_mutex_waiters(lock, true);

	trace_contention_end(lock, ret);

@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
	 * try_to_take_rt_mutex() sets the waiter bit unconditionally.
	 * We might have to fix that up:
	 */
	fixup_rt_mutex_waiters(lock);
	fixup_rt_mutex_waiters(lock, true);
	debug_rt_mutex_free_waiter(&waiter);

	trace_contention_end(lock, 0);
+3 −3
Original line number Diff line number Diff line
@@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
{
	debug_rt_mutex_proxy_unlock(lock);
	rt_mutex_set_owner(lock, NULL);
	rt_mutex_clear_owner(lock);
}

/**
@@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
	 * have to fix that up.
	 */
	fixup_rt_mutex_waiters(lock);
	fixup_rt_mutex_waiters(lock, true);
	raw_spin_unlock_irq(&lock->wait_lock);

	return ret;
@@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
	 * have to fix that up.
	 */
	fixup_rt_mutex_waiters(lock);
	fixup_rt_mutex_waiters(lock, false);

	raw_spin_unlock_irq(&lock->wait_lock);