Commit 5f5fa7ea authored by Lai Jiangshan's avatar Lai Jiangshan Committed by Paul E. McKenney
Browse files

rcu: Don't use negative nesting depth in __rcu_read_unlock()



Now that RCU flavors have been consolidated, an RCU-preempt
rcu_read_unlock() in an interrupt or softirq handler cannot possibly
end the RCU read-side critical section.  Consider the old vulnerability
involving rcu_read_unlock() being invoked within such a handler that
interrupted an __rcu_read_unlock_special(), in which a wakeup might be
invoked with a scheduler lock held.  Because rcu_read_unlock_special()
no longer does wakeups in such situations, it is no longer necessary
for __rcu_read_unlock() to set the nesting level negative.

This commit therefore removes this recursion-protection code from
__rcu_read_unlock().

[ paulmck: Let rcu_exp_handler() continue to call rcu_report_exp_rdp(). ]
[ paulmck: Adjust other checks given no more negative nesting. ]
Signed-off-by: default avatarLai Jiangshan <laijs@linux.alibaba.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent f0bdf6d4
Loading
Loading
Loading
Loading
+5 −26
Original line number Diff line number Diff line
@@ -639,6 +639,7 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
 */
static void rcu_exp_handler(void *unused)
{
	int depth = rcu_preempt_depth();
	unsigned long flags;
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
	struct rcu_node *rnp = rdp->mynode;
@@ -649,7 +650,7 @@ static void rcu_exp_handler(void *unused)
	 * critical section.  If also enabled or idle, immediately
	 * report the quiescent state, otherwise defer.
	 */
	if (!rcu_preempt_depth()) {
	if (!depth) {
		if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
		    rcu_dynticks_curr_cpu_in_eqs()) {
			rcu_report_exp_rdp(rdp);
@@ -673,7 +674,7 @@ static void rcu_exp_handler(void *unused)
	 * can have caused this quiescent state to already have been
	 * reported, so we really do need to check ->expmask.
	 */
	if (rcu_preempt_depth() > 0) {
	if (depth > 0) {
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
		if (rnp->expmask & rdp->grpmask) {
			rdp->exp_deferred_qs = true;
@@ -683,30 +684,8 @@ static void rcu_exp_handler(void *unused)
		return;
	}

	/*
	 * The final and least likely case is where the interrupted
	 * code was just about to or just finished exiting the RCU-preempt
	 * read-side critical section, and no, we can't tell which.
	 * So either way, set ->deferred_qs to flag later code that
	 * a quiescent state is required.
	 *
	 * If the CPU is fully enabled (or if some buggy RCU-preempt
	 * read-side critical section is being used from idle), just
	 * invoke rcu_preempt_deferred_qs() to immediately report the
	 * quiescent state.  We cannot use rcu_read_unlock_special()
	 * because we are in an interrupt handler, which will cause that
	 * function to take an early exit without doing anything.
	 *
	 * Otherwise, force a context switch after the CPU enables everything.
	 */
	rdp->exp_deferred_qs = true;
	if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
	    WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) {
		rcu_preempt_deferred_qs(t);
	} else {
		set_tsk_need_resched(t);
		set_preempt_need_resched();
	}
	// Finally, negative nesting depth should not happen.
	WARN_ON_ONCE(1);
}

/* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
+7 −15
Original line number Diff line number Diff line
@@ -345,9 +345,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
	return READ_ONCE(rnp->gp_tasks) != NULL;
}

/* Bias and limit values for ->rcu_read_lock_nesting. */
#define RCU_NEST_BIAS INT_MAX
#define RCU_NEST_NMAX (-INT_MAX / 2)
/* limit value for ->rcu_read_lock_nesting. */
#define RCU_NEST_PMAX (INT_MAX / 2)

static void rcu_preempt_read_enter(void)
@@ -355,9 +353,9 @@ static void rcu_preempt_read_enter(void)
	current->rcu_read_lock_nesting++;
}

static void rcu_preempt_read_exit(void)
static int rcu_preempt_read_exit(void)
{
	current->rcu_read_lock_nesting--;
	return --current->rcu_read_lock_nesting;
}

static void rcu_preempt_depth_set(int val)
@@ -390,21 +388,15 @@ void __rcu_read_unlock(void)
{
	struct task_struct *t = current;

	if (rcu_preempt_depth() != 1) {
		rcu_preempt_read_exit();
	} else {
	if (rcu_preempt_read_exit() == 0) {
		barrier();  /* critical section before exit code. */
		rcu_preempt_depth_set(-RCU_NEST_BIAS);
		barrier();  /* assign before ->rcu_read_unlock_special load */
		if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
			rcu_read_unlock_special(t);
		barrier();  /* ->rcu_read_unlock_special load before assign */
		rcu_preempt_depth_set(0);
	}
	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
		int rrln = rcu_preempt_depth();

		WARN_ON_ONCE(rrln < 0 && rrln > RCU_NEST_NMAX);
		WARN_ON_ONCE(rrln < 0 || rrln > RCU_NEST_PMAX);
	}
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -556,7 +548,7 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
	return (__this_cpu_read(rcu_data.exp_deferred_qs) ||
		READ_ONCE(t->rcu_read_unlock_special.s)) &&
	       rcu_preempt_depth() <= 0;
	       rcu_preempt_depth() == 0;
}

/*
@@ -692,7 +684,7 @@ static void rcu_flavor_sched_clock_irq(int user)
	} else if (rcu_preempt_need_deferred_qs(t)) {
		rcu_preempt_deferred_qs(t); /* Report deferred QS. */
		return;
	} else if (!rcu_preempt_depth()) {
	} else if (!WARN_ON_ONCE(rcu_preempt_depth())) {
		rcu_qs(); /* Report immediate QS. */
		return;
	}