Commit f5e29a26 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'locking-urgent-2021-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Thomas Gleixner:
 "A set of updates for the RT specific reader/writer locking base code:

   - Make the fast path reader ordering guarantees correct.

   - Code reshuffling to make the fix simpler"

[ This plays ugly games with atomic_add_return_release() because we
  don't have a plain atomic_add_release(), and should really be cleaned
  up, I think    - Linus ]

* tag 'locking-urgent-2021-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rwbase: Take care of ordering guarantee for fastpath reader
  locking/rwbase: Extract __rwbase_write_trylock()
  locking/rwbase: Properly match set_and_save_state() to restore_state()
parents 62453a46 81121524
Loading
Loading
Loading
Loading
+45 −20
Original line number Diff line number Diff line
@@ -41,6 +41,12 @@
 * The risk of writer starvation is there, but the pathological use cases
 * which trigger it are not necessarily the typical RT workloads.
 *
 * Fast-path orderings:
 * The lock/unlock of readers can run in fast paths: lock and unlock are only
 * atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
 * semantics of rwbase_rt. Atomic ops should thus provide _acquire()
 * and _release() (or stronger).
 *
 * Common code shared between RT rw_semaphore and rwlock
 */

@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
	 * set.
	 */
	for (r = atomic_read(&rwb->readers); r < 0;) {
		/* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
		if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
			return 1;
	}
@@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
	/*
	 * rwb->readers can only hit 0 when a writer is waiting for the
	 * active readers to leave the critical section.
	 *
	 * dec_and_test() is fully ordered, provides RELEASE.
	 */
	if (unlikely(atomic_dec_and_test(&rwb->readers)))
		__rwbase_read_unlock(rwb, state);
@@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
{
	struct rt_mutex_base *rtm = &rwb->rtmutex;

	atomic_add(READER_BIAS - bias, &rwb->readers);
	/*
	 * _release() is needed in case that reader is in fast path, pairing
	 * with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
	 */
	(void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
	raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
	rwbase_rtmutex_unlock(rtm);
}
@@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
	__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
}

static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
{
	/* Can do without CAS because we're serialized by wait_lock. */
	lockdep_assert_held(&rwb->rtmutex.wait_lock);

	/*
	 * _acquire is needed in case the reader is in the fast path, pairing
	 * with rwbase_read_unlock(), provides ACQUIRE.
	 */
	if (!atomic_read_acquire(&rwb->readers)) {
		atomic_set(&rwb->readers, WRITER_BIAS);
		return 1;
	}

	return 0;
}

static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
				     unsigned int state)
{
@@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
	atomic_sub(READER_BIAS, &rwb->readers);

	raw_spin_lock_irqsave(&rtm->wait_lock, flags);
	/*
	 * set_current_state() for rw_semaphore
	 * current_save_and_set_rtlock_wait_state() for rwlock
	 */
	rwbase_set_and_save_current_state(state);
	if (__rwbase_write_trylock(rwb))
		goto out_unlock;

	/* Block until all readers have left the critical section. */
	for (; atomic_read(&rwb->readers);) {
	rwbase_set_and_save_current_state(state);
	for (;;) {
		/* Optimized out for rwlocks */
		if (rwbase_signal_pending_state(state, current)) {
			__set_current_state(TASK_RUNNING);
			rwbase_restore_current_state();
			__rwbase_write_unlock(rwb, 0, flags);
			return -EINTR;
		}
		raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);

		/*
		 * Schedule and wait for the readers to leave the critical
		 * section. The last reader leaving it wakes the waiter.
		 */
		if (atomic_read(&rwb->readers) != 0)
		if (__rwbase_write_trylock(rwb))
			break;

		raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
		rwbase_schedule();
		set_current_state(state);
		raw_spin_lock_irqsave(&rtm->wait_lock, flags);
	}

	atomic_set(&rwb->readers, WRITER_BIAS);
		set_current_state(state);
	}
	rwbase_restore_current_state();

out_unlock:
	raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
	return 0;
}
@@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
	atomic_sub(READER_BIAS, &rwb->readers);

	raw_spin_lock_irqsave(&rtm->wait_lock, flags);
	if (!atomic_read(&rwb->readers)) {
		atomic_set(&rwb->readers, WRITER_BIAS);
	if (__rwbase_write_trylock(rwb)) {
		raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
		return 1;
	}