Commit 1f1893d7 authored by Thomas Gleixner's avatar Thomas Gleixner
Browse files

Merge branch 'timers/core' of...

Merge branch 'timers/core' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/core

Pull tick/NOHZ updates from Frederic Weisbecker:

 - A fix for rare jiffies update stalls that were reported by Paul McKenney

 - Tick side cleanups after RCU_FAST_NO_HZ removal

 - Handle softirqs on idle more gracefully

Link: https://lore.kernel.org/all/20220307233034.34550-1-frederic@kernel.org
parents 58dedf0a f96272a9
Loading
Loading
Loading
Loading
+10 −1
Original line number Diff line number Diff line
@@ -579,7 +579,16 @@ enum
	NR_SOFTIRQS
};

#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
/*
 * The following vectors can be safely ignored after ksoftirqd is parked:
 *
 * _ RCU:
 * 	1) rcutree_migrate_callbacks() migrates the queue.
 * 	2) rcu_report_dead() reports the final quiescent states.
 *
 * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
 */
#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))

/* map softirq index to softirq name. update 'softirq_to_name' in
 * kernel/softirq.c when adding a new softirq.
+1 −2
Original line number Diff line number Diff line
@@ -64,9 +64,8 @@ static inline void rcu_softirq_qs(void)
		rcu_tasks_qs(current, (preempt)); \
	} while (0)

static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
static inline int rcu_needs_cpu(void)
{
	*nextevt = KTIME_MAX;
	return 0;
}

+1 −1
Original line number Diff line number Diff line
@@ -19,7 +19,7 @@

void rcu_softirq_qs(void);
void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(u64 basem, u64 *nextevt);
int rcu_needs_cpu(void);
void rcu_cpu_stall_reset(void);

/*
+1 −2
Original line number Diff line number Diff line
@@ -1086,9 +1086,8 @@ void rcu_irq_enter_irqson(void)
 * Just check whether or not this CPU has non-offloaded RCU callbacks
 * queued.
 */
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
int rcu_needs_cpu(void)
{
	*nextevt = KTIME_MAX;
	return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
		!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
}
+61 −16
Original line number Diff line number Diff line
@@ -169,6 +169,8 @@ static ktime_t tick_init_jiffy_update(void)
	return period;
}

#define MAX_STALLED_JIFFIES 5

static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
{
	int cpu = smp_processor_id();
@@ -196,6 +198,21 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
	if (tick_do_timer_cpu == cpu)
		tick_do_update_jiffies64(now);

	/*
	 * If jiffies update stalled for too long (timekeeper in stop_machine()
	 * or VMEXIT'ed for several msecs), force an update.
	 */
	if (ts->last_tick_jiffies != jiffies) {
		ts->stalled_jiffies = 0;
		ts->last_tick_jiffies = READ_ONCE(jiffies);
	} else {
		if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) {
			tick_do_update_jiffies64(now);
			ts->stalled_jiffies = 0;
			ts->last_tick_jiffies = READ_ONCE(jiffies);
		}
	}

	if (ts->inidle)
		ts->got_idle_tick = 1;
}
@@ -768,7 +785,7 @@ static inline bool local_timer_softirq_pending(void)

static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
{
	u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
	u64 basemono, next_tick, delta, expires;
	unsigned long basejiff;
	unsigned int seq;

@@ -791,7 +808,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
	 * minimal delta which brings us back to this place
	 * immediately. Lather, rinse and repeat...
	 */
	if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
	if (rcu_needs_cpu() || arch_needs_cpu() ||
	    irq_work_needs_cpu() || local_timer_softirq_pending()) {
		next_tick = basemono + TICK_NSEC;
	} else {
@@ -802,10 +819,8 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
		 * disabled this also looks at the next expiring
		 * hrtimer.
		 */
		next_tmr = get_next_timer_interrupt(basejiff, basemono);
		ts->next_timer = next_tmr;
		/* Take the next rcu event into account */
		next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
		next_tick = get_next_timer_interrupt(basejiff, basemono);
		ts->next_timer = next_tick;
	}

	/*
@@ -984,6 +999,45 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
	__tick_nohz_full_update_tick(ts, ktime_get());
}

/*
 * A pending softirq outside an IRQ (or softirq disabled section) context
 * should be waiting for ksoftirqd to handle it. Therefore we shouldn't
 * reach here due to the need_resched() early check in can_stop_idle_tick().
 *
 * However if we are between CPUHP_AP_SMPBOOT_THREADS and CPU_TEARDOWN_CPU on the
 * cpu_down() process, softirqs can still be raised while ksoftirqd is parked,
 * triggering the below since wakep_softirqd() is ignored.
 *
 */
static bool report_idle_softirq(void)
{
	static int ratelimit;
	unsigned int pending = local_softirq_pending();

	if (likely(!pending))
		return false;

	/* Some softirqs claim to be safe against hotplug and ksoftirqd parking */
	if (!cpu_active(smp_processor_id())) {
		pending &= ~SOFTIRQ_HOTPLUG_SAFE_MASK;
		if (!pending)
			return false;
	}

	if (ratelimit < 10)
		return false;

	/* On RT, softirqs handling may be waiting on some lock */
	if (!local_bh_blocked())
		return false;

	pr_warn("NOHZ tick-stop error: local softirq work is pending, handler #%02x!!!\n",
		pending);
	ratelimit++;

	return true;
}

static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
{
	/*
@@ -1010,17 +1064,8 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
	if (need_resched())
		return false;

	if (unlikely(local_softirq_pending())) {
		static int ratelimit;

		if (ratelimit < 10 && !local_bh_blocked() &&
		    (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
			pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n",
				(unsigned int) local_softirq_pending());
			ratelimit++;
		}
	if (unlikely(report_idle_softirq()))
		return false;
	}

	if (tick_nohz_full_enabled()) {
		/*
Loading