Commit 70c80103 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

locking/rtmutex: Consolidate the fast/slowpath invocation



The indirection via a function pointer (which is at least optimized into a
tail call by the compiler) is making the code hard to read.

Clean it up and move the futex related trylock functions down to the futex
section.

Move the wake_q wakeup into rt_mutex_slowunlock(). No point in handing it
to the caller. The futex code uses a different function.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210326153944.247927548@linutronix.de
parent d7a2edb8
Loading
Loading
Loading
Loading
+59 −85
Original line number Diff line number Diff line
@@ -1298,14 +1298,25 @@ static int __sched rt_mutex_slowtrylock(struct rt_mutex *lock)
	return ret;
}

/*
 * Performs the wakeup of the top-waiter and re-enables preemption.
 */
void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
{
	wake_up_q(wake_q);

	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
	preempt_enable();
}

/*
 * Slow path to release a rt-mutex.
 *
 * Return whether the current task needs to call rt_mutex_postunlock().
 */
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
					struct wake_q_head *wake_q)
static void __sched rt_mutex_slowunlock(struct rt_mutex *lock)
{
	DEFINE_WAKE_Q(wake_q);
	unsigned long flags;

	/* irqsave required to support early boot calls */
@@ -1347,7 +1358,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
	while (!rt_mutex_has_waiters(lock)) {
		/* Drops lock->wait_lock ! */
		if (unlock_rt_mutex_safe(lock, flags) == true)
			return false;
			return;
		/* Relock the rtmutex and try again */
		raw_spin_lock_irqsave(&lock->wait_lock, flags);
	}
@@ -1358,10 +1369,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
	 *
	 * Queue the next waiter for wakeup once we release the wait_lock.
	 */
	mark_wakeup_next_waiter(wake_q, lock);
	mark_wakeup_next_waiter(&wake_q, lock);
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);

	return true; /* call rt_mutex_postunlock() */
	rt_mutex_postunlock(&wake_q);
}

/*
@@ -1370,60 +1381,21 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
 * The atomic acquire/release ops are compiled away, when either the
 * architecture does not support cmpxchg or when debugging is enabled.
 */
static __always_inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
		  int (*slowfn)(struct rt_mutex *lock, int state,
				struct hrtimer_sleeper *timeout,
				enum rtmutex_chainwalk chwalk))
static __always_inline int __rt_mutex_lock(struct rt_mutex *lock, long state,
					   unsigned int subclass)
{
	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
		return 0;
	int ret;

	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
}
	might_sleep();
	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);

static __always_inline int
rt_mutex_fasttrylock(struct rt_mutex *lock,
		     int (*slowfn)(struct rt_mutex *lock))
{
	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
		return 1;

	return slowfn(lock);
}

/*
 * Performs the wakeup of the top-waiter and re-enables preemption.
 */
void __sched rt_mutex_postunlock(struct wake_q_head *wake_q)
{
	wake_up_q(wake_q);

	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
	preempt_enable();
}

static __always_inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
		    bool (*slowfn)(struct rt_mutex *lock,
				   struct wake_q_head *wqh))
{
	DEFINE_WAKE_Q(wake_q);

	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
		return;

	if (slowfn(lock, &wake_q))
		rt_mutex_postunlock(&wake_q);
}

static __always_inline void __rt_mutex_lock(struct rt_mutex *lock,
					    unsigned int subclass)
{
	might_sleep();
		return 0;

	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
	ret = rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
	if (ret)
		mutex_release(&lock->dep_map, _RET_IP_);
	return ret;
}

#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -1435,7 +1407,7 @@ static __always_inline void __rt_mutex_lock(struct rt_mutex *lock,
 */
void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
{
	__rt_mutex_lock(lock, subclass);
	__rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);

@@ -1448,7 +1420,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
 */
void __sched rt_mutex_lock(struct rt_mutex *lock)
{
	__rt_mutex_lock(lock, 0);
	__rt_mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
#endif
@@ -1464,42 +1436,21 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
 */
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
	int ret;

	might_sleep();

	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
	ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
	if (ret)
		mutex_release(&lock->dep_map, _RET_IP_);

	return ret;
	return __rt_mutex_lock(lock, TASK_INTERRUPTIBLE, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);

/*
 * Futex variant, must not use fastpath.
 */
int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
{
	return rt_mutex_slowtrylock(lock);
}

int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
{
	return __rt_mutex_slowtrylock(lock);
}

/**
 * rt_mutex_trylock - try to lock a rt_mutex
 *
 * @lock:	the rt_mutex to be locked
 *
 * This function can only be called in thread context. It's safe to
 * call it from atomic regions, but not from hard interrupt or soft
 * interrupt context.
 * This function can only be called in thread context. It's safe to call it
 * from atomic regions, but not from hard or soft interrupt context.
 *
 * Returns 1 on success and 0 on contention
 * Returns:
 *  1 on success
 *  0 on contention
 */
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
@@ -1508,7 +1459,14 @@ int __sched rt_mutex_trylock(struct rt_mutex *lock)
	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
		return 0;

	ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
	/*
	 * No lockdep annotation required because lockdep disables the fast
	 * path.
	 */
	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
		return 1;

	ret = rt_mutex_slowtrylock(lock);
	if (ret)
		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);

@@ -1524,10 +1482,26 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
	mutex_release(&lock->dep_map, _RET_IP_);
	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
		return;

	rt_mutex_slowunlock(lock);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);

/*
 * Futex variants, must not use fastpath.
 */
int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
{
	return rt_mutex_slowtrylock(lock);
}

int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
{
	return __rt_mutex_slowtrylock(lock);
}

/**
 * __rt_mutex_futex_unlock - Futex variant, that since futex variants
 * do not use the fast-path, can be simple and will not need to retry.