Commit ebbdc41e authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar
Browse files

locking/rtmutex: Provide rt_mutex_slowlock_locked()



Split the inner workings of rt_mutex_slowlock() out into a separate
function, which can be reused by the upcoming RT lock substitutions,
e.g. for rw_semaphores.

Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211302.841971086@linutronix.de
parent 830e6acc
Loading
Loading
Loading
Loading
+58 −42
Original line number Diff line number Diff line
@@ -1106,7 +1106,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
}

/**
 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
 * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
 * @lock:		 the rt_mutex to take
 * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
 *			 or TASK_UNINTERRUPTIBLE)
@@ -1115,7 +1115,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock,
 *
 * Must be called with lock->wait_lock held and interrupts disabled
 */
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
					   unsigned int state,
					   struct hrtimer_sleeper *timeout,
					   struct rt_mutex_waiter *waiter)
@@ -1168,52 +1168,37 @@ static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
	}
}

/*
 * Slow path lock function:
/**
 * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
 * @lock:	The rtmutex to block lock
 * @state:	The task state for sleeping
 * @chwalk:	Indicator whether full or partial chainwalk is requested
 * @waiter:	Initializer waiter for blocking
 */
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
				       unsigned int state,
				     struct hrtimer_sleeper *timeout,
				     enum rtmutex_chainwalk chwalk)
				       enum rtmutex_chainwalk chwalk,
				       struct rt_mutex_waiter *waiter)
{
	struct rt_mutex_waiter waiter;
	unsigned long flags;
	int ret = 0;

	rt_mutex_init_waiter(&waiter);
	int ret;

	/*
	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
	 * be called in early boot if the cmpxchg() fast path is disabled
	 * (debug, no architecture support). In this case we will acquire the
	 * rtmutex with lock->wait_lock held. But we cannot unconditionally
	 * enable interrupts in that early boot case. So we need to use the
	 * irqsave/restore variants.
	 */
	raw_spin_lock_irqsave(&lock->wait_lock, flags);
	lockdep_assert_held(&lock->wait_lock);

	/* Try to acquire the lock again: */
	if (try_to_take_rt_mutex(lock, current, NULL)) {
		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
	if (try_to_take_rt_mutex(lock, current, NULL))
		return 0;
	}

	set_current_state(state);

	/* Setup the timer, when timeout != NULL */
	if (unlikely(timeout))
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);

	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
	ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);

	if (likely(!ret))
		/* sleep on the mutex */
		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
		ret = rt_mutex_slowlock_block(lock, state, NULL, waiter);

	if (unlikely(ret)) {
		__set_current_state(TASK_RUNNING);
		remove_waiter(lock, &waiter);
		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
		remove_waiter(lock, waiter);
		rt_mutex_handle_deadlock(ret, chwalk, waiter);
	}

	/*
@@ -1221,14 +1206,45 @@ static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
	 * unconditionally. We might have to fix that up.
	 */
	fixup_rt_mutex_waiters(lock);
	return ret;
}

	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
					     unsigned int state)
{
	struct rt_mutex_waiter waiter;
	int ret;

	rt_mutex_init_waiter(&waiter);

	/* Remove pending timer: */
	if (unlikely(timeout))
		hrtimer_cancel(&timeout->timer);
	ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter);

	debug_rt_mutex_free_waiter(&waiter);
	return ret;
}

/*
 * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
 * @lock:	The rtmutex to block lock
 * @state:	The task state for sleeping
 */
static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
				     unsigned int state)
{
	unsigned long flags;
	int ret;

	/*
	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
	 * be called in early boot if the cmpxchg() fast path is disabled
	 * (debug, no architecture support). In this case we will acquire the
	 * rtmutex with lock->wait_lock held. But we cannot unconditionally
	 * enable interrupts in that early boot case. So we need to use the
	 * irqsave/restore variants.
	 */
	raw_spin_lock_irqsave(&lock->wait_lock, flags);
	ret = __rt_mutex_slowlock_locked(lock, state);
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);

	return ret;
}
@@ -1239,7 +1255,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
		return 0;

	return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
	return rt_mutex_slowlock(lock, state);
}

static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
+1 −1
Original line number Diff line number Diff line
@@ -342,7 +342,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
	raw_spin_lock_irq(&lock->wait_lock);
	/* sleep on the mutex */
	set_current_state(TASK_INTERRUPTIBLE);
	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
	ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter);
	/*
	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
	 * have to fix that up.