Commit 96017bf9 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcu-tasks: Simplify trc_read_check_handler() atomic operations



Currently, trc_wait_for_one_reader() atomically increments
the trc_n_readers_need_end counter before sending the IPI
invoking trc_read_check_handler().  All failure paths out of
trc_read_check_handler() and also from the smp_call_function_single()
within trc_wait_for_one_reader() must carefully atomically decrement
this counter.  This is more complex than it needs to be.

This commit therefore simplifies things and saves a few lines of
code by dispensing with the atomic decrements in favor of having
trc_read_check_handler() do the atomic increment only in the success case.
In theory, this represents no change in functionality.

Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent cbe0d8d9
Loading
Loading
Loading
Loading
+3 −17
Original line number Diff line number Diff line
@@ -890,32 +890,24 @@ static void trc_read_check_handler(void *t_in)

	// If the task is no longer running on this CPU, leave.
	if (unlikely(texp != t)) {
		if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
			wake_up(&trc_wait);
		goto reset_ipi; // Already on holdout list, so will check later.
	}

	// If the task is not in a read-side critical section, and
	// if this is the last reader, awaken the grace-period kthread.
	if (likely(!READ_ONCE(t->trc_reader_nesting))) {
		if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
			wake_up(&trc_wait);
		// Mark as checked after decrement to avoid false
		// positives on the above WARN_ON_ONCE().
		WRITE_ONCE(t->trc_reader_checked, true);
		goto reset_ipi;
	}
	// If we are racing with an rcu_read_unlock_trace(), try again later.
	if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
		if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
			wake_up(&trc_wait);
	if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
		goto reset_ipi;
	}
	WRITE_ONCE(t->trc_reader_checked, true);

	// Get here if the task is in a read-side critical section.  Set
	// its state so that it will awaken the grace-period kthread upon
	// exit from that critical section.
	atomic_inc(&trc_n_readers_need_end); // One more to wait on.
	WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
	WRITE_ONCE(t->trc_reader_special.b.need_qs, true);

@@ -1015,21 +1007,15 @@ static void trc_wait_for_one_reader(struct task_struct *t,
		if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
			return;

		atomic_inc(&trc_n_readers_need_end);
		per_cpu(trc_ipi_to_cpu, cpu) = true;
		t->trc_ipi_to_cpu = cpu;
		rcu_tasks_trace.n_ipis++;
		if (smp_call_function_single(cpu,
					     trc_read_check_handler, t, 0)) {
		if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
			// Just in case there is some other reason for
			// failure than the target CPU being offline.
			rcu_tasks_trace.n_ipis_fails++;
			per_cpu(trc_ipi_to_cpu, cpu) = false;
			t->trc_ipi_to_cpu = cpu;
			if (atomic_dec_and_test(&trc_n_readers_need_end)) {
				WARN_ON_ONCE(1);
				wake_up(&trc_wait);
			}
		}
	}
}