Unverified Commit f20ae83d authored by openeuler-ci-bot's avatar openeuler-ci-bot Committed by Gitee
Browse files

!9241 v2 Bugfix backport for rcu

Merge Pull Request from: @ci-robot 
 
PR sync from: Wei Li <liwei391@huawei.com>
https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/VW2YPQE5JBKCW7MSZRNA4S2YCCCYVHE2/ 
Backport 3 bugfix patches for rcu from mainline.

v1 -> v2:
 - Fix conflict in patch 2

Frederic Weisbecker (3):
  rcu: Defer RCU kthreads wakeup when CPU is dying
  entry/rcu: Check TIF_RESCHED _after_ delayed RCU wake-up
  srcu: Fix callbacks acceleration mishandling


-- 
2.25.1
 
https://gitee.com/openeuler/kernel/issues/I9NZ3E 
 
Link:https://gitee.com/openeuler/kernel/pulls/9241

 

Reviewed-by: default avatarXiongfeng Wang <wangxiongfeng2@huawei.com>
Signed-off-by: default avatarJialin Zhang <zhangjialin11@huawei.com>
parents 651a5774 2afe3ba0
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -195,13 +195,14 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,

static void exit_to_user_mode_prepare(struct pt_regs *regs)
{
	unsigned long ti_work = read_thread_flags();
	unsigned long ti_work;

	lockdep_assert_irqs_disabled();

	/* Flush pending rcuog wakeup before the last need_resched() check */
	rcu_nocb_flush_deferred_wakeup();

	ti_work = read_thread_flags();
	if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
		ti_work = exit_to_user_mode_loop(regs, ti_work);

+29 −2
Original line number Diff line number Diff line
@@ -829,10 +829,37 @@ static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
	spin_lock_irqsave_rcu_node(sdp, flags);
	if (rhp)
		rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
	/*
	 * The snapshot for acceleration must be taken _before_ the read of the
	 * current gp sequence used for advancing, otherwise advancing may fail
	 * and acceleration may then fail too.
	 *
	 * This could happen if:
	 *
	 *  1) The RCU_WAIT_TAIL segment has callbacks (gp_num = X + 4) and the
	 *     RCU_NEXT_READY_TAIL also has callbacks (gp_num = X + 8).
	 *
	 *  2) The grace period for RCU_WAIT_TAIL is seen as started but not
	 *     completed so rcu_seq_current() returns X + SRCU_STATE_SCAN1.
	 *
	 *  3) This value is passed to rcu_segcblist_advance() which can't move
	 *     any segment forward and fails.
	 *
	 *  4) srcu_gp_start_if_needed() still proceeds with callback acceleration.
	 *     But then the call to rcu_seq_snap() observes the grace period for the
	 *     RCU_WAIT_TAIL segment as completed and the subsequent one for the
	 *     RCU_NEXT_READY_TAIL segment as started (ie: X + 4 + SRCU_STATE_SCAN1)
	 *     so it returns a snapshot of the next grace period, which is X + 12.
	 *
	 *  5) The value of X + 12 is passed to rcu_segcblist_accelerate() but the
	 *     freshly enqueued callback in RCU_NEXT_TAIL can't move to
	 *     RCU_NEXT_READY_TAIL which already has callbacks for a previous grace
	 *     period (gp_num = X + 8). So acceleration fails.
	 */
	s = rcu_seq_snap(&ssp->srcu_gp_seq);
	rcu_segcblist_advance(&sdp->srcu_cblist,
			      rcu_seq_current(&ssp->srcu_gp_seq));
	s = rcu_seq_snap(&ssp->srcu_gp_seq);
	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
	WARN_ON_ONCE(!rcu_segcblist_accelerate(&sdp->srcu_cblist, s) && rhp);
	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
		sdp->srcu_gp_seq_needed = s;
		needgp = true;
+33 −1
Original line number Diff line number Diff line
@@ -1483,6 +1483,38 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
	return needmore;
}

static void swake_up_one_online_ipi(void *arg)
{
	struct swait_queue_head *wqh = arg;

	swake_up_one(wqh);
}

static void swake_up_one_online(struct swait_queue_head *wqh)
{
	int cpu = get_cpu();

	/*
	 * If called from rcutree_report_cpu_starting(), wake up
	 * is dangerous that late in the CPU-down hotplug process. The
	 * scheduler might queue an ignored hrtimer. Defer the wake up
	 * to an online CPU instead.
	 */
	if (unlikely(cpu_is_offline(cpu))) {
		int target;

		target = cpumask_any_and(housekeeping_cpumask(HK_FLAG_RCU),
					 cpu_online_mask);

		smp_call_function_single(target, swake_up_one_online_ipi,
					 wqh, 0);
		put_cpu();
	} else {
		put_cpu();
		swake_up_one(wqh);
	}
}

/*
 * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
 * interrupt or softirq handler, in which case we just might immediately
@@ -1507,7 +1539,7 @@ static void rcu_gp_kthread_wake(void)
		return;
	WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
	WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
	swake_up_one(&rcu_state.gp_wq);
	swake_up_one_online(&rcu_state.gp_wq);
}

/*
+1 −2
Original line number Diff line number Diff line
@@ -170,7 +170,6 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
	return ret;
}


/*
 * Report the exit from RCU read-side critical section for the last task
 * that queued itself during or before the current expedited preemptible-RCU
@@ -198,7 +197,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
			if (wake) {
				smp_mb(); /* EGP done before wake_up(). */
				swake_up_one(&rcu_state.expedited_wq);
				swake_up_one_online(&rcu_state.expedited_wq);
			}
			break;
		}