Commit d38c8fe4 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

Merge branches 'doc.2022.06.21a', 'fixes.2022.07.19a', 'nocb.2022.07.19a',...

Merge branches 'doc.2022.06.21a', 'fixes.2022.07.19a', 'nocb.2022.07.19a', 'poll.2022.07.21a', 'rcu-tasks.2022.06.21a' and 'torture.2022.06.21a' into HEAD

doc.2022.06.21a: Documentation updates.
fixes.2022.07.19a: Miscellaneous fixes.
nocb.2022.07.19a: Callback-offload updates.
poll.2022.07.21a: Polled grace-period updates.
rcu-tasks.2022.06.21a: Tasks RCU updates.
torture.2022.06.21a: Torture-test updates.
Loading
Loading
Loading
Loading
+24 −0
Original line number Diff line number Diff line
@@ -3659,6 +3659,9 @@
			just as if they had also been called out in the
			rcu_nocbs= boot parameter.

			Note that this argument takes precedence over
			the CONFIG_RCU_NOCB_CPU_DEFAULT_ALL option.

	noiotrap	[SH] Disables trapped I/O port accesses.

	noirqdebug	[X86-32] Disables the code which attempts to detect and
@@ -4557,6 +4560,9 @@
			no-callback mode from boot but the mode may be
			toggled at runtime via cpusets.

			Note that this argument takes precedence over
			the CONFIG_RCU_NOCB_CPU_DEFAULT_ALL option.

	rcu_nocb_poll	[KNL]
			Rather than requiring that offloaded CPUs
			(specified by rcu_nocbs= above) explicitly
@@ -5799,6 +5805,24 @@
			expediting.  Set to zero to disable automatic
			expediting.

	srcutree.srcu_max_nodelay [KNL]
			Specifies the number of no-delay instances
			per jiffy for which the SRCU grace period
			worker thread will be rescheduled with zero
			delay. Beyond this limit, worker thread will
			be rescheduled with a sleep delay of one jiffy.

	srcutree.srcu_max_nodelay_phase [KNL]
			Specifies the per-grace-period phase, number of
			non-sleeping polls of readers. Beyond this limit,
			grace period worker thread will be rescheduled
			with a sleep delay of one jiffy, between each
			rescan of the readers, for a grace period phase.

	srcutree.srcu_retry_check_delay [KNL]
			Specifies number of microseconds of non-sleeping
			delay between each non-sleeping poll of readers.

	srcutree.small_contention_lim [KNL]
			Specifies the number of update-side contention
			events per jiffy will be tolerated before
+20 −8
Original line number Diff line number Diff line
@@ -41,6 +41,7 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier_tasks(void);
void rcu_barrier_tasks_rude(void);
void synchronize_rcu(void);
unsigned long get_completed_synchronize_rcu(void);

#ifdef CONFIG_PREEMPT_RCU

@@ -169,12 +170,23 @@ void synchronize_rcu_tasks(void);
# endif

# ifdef CONFIG_TASKS_TRACE_RCU
// Bits for ->trc_reader_special.b.need_qs field.
#define TRC_NEED_QS		0x1  // Task needs a quiescent state.
#define TRC_NEED_QS_CHECKED	0x2  // Task has been checked for needing quiescent state.

u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
void rcu_tasks_trace_qs_blkd(struct task_struct *t);

# define rcu_tasks_trace_qs(t)							\
	do {									\
		if (!likely(READ_ONCE((t)->trc_reader_checked)) &&	\
		    !unlikely(READ_ONCE((t)->trc_reader_nesting))) {	\
			smp_store_release(&(t)->trc_reader_checked, true); \
			smp_mb(); /* Readers partitioned by store. */	\
		int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting);	\
										\
		if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) &&	\
		    likely(!___rttq_nesting)) {					\
			rcu_trc_cmpxchg_need_qs((t), 0,	TRC_NEED_QS_CHECKED);	\
		} else if (___rttq_nesting && ___rttq_nesting != INT_MIN &&	\
			   !READ_ONCE((t)->trc_reader_special.b.blocked)) {	\
			rcu_tasks_trace_qs_blkd(t);				\
		}								\
	} while (0)
# else
@@ -184,7 +196,7 @@ void synchronize_rcu_tasks(void);
#define rcu_tasks_qs(t, preempt)					\
do {									\
	rcu_tasks_classic_qs((t), (preempt));				\
	rcu_tasks_trace_qs((t));					\
	rcu_tasks_trace_qs(t);						\
} while (0)

# ifdef CONFIG_TASKS_RUDE_RCU
+1 −1
Original line number Diff line number Diff line
@@ -75,7 +75,7 @@ static inline void rcu_read_unlock_trace(void)
	nesting = READ_ONCE(t->trc_reader_nesting) - 1;
	barrier(); // Critical section before disabling.
	// Disable IPI-based setting of .need_qs.
	WRITE_ONCE(t->trc_reader_nesting, INT_MIN);
	WRITE_ONCE(t->trc_reader_nesting, INT_MIN + nesting);
	if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
		WRITE_ONCE(t->trc_reader_nesting, nesting);
		return;  // We assume shallow reader nesting.
+20 −1
Original line number Diff line number Diff line
@@ -23,6 +23,16 @@ static inline void cond_synchronize_rcu(unsigned long oldstate)
	might_sleep();
}

static inline unsigned long start_poll_synchronize_rcu_expedited(void)
{
	return start_poll_synchronize_rcu();
}

static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
{
	cond_synchronize_rcu(oldstate);
}

extern void rcu_barrier(void);

static inline void synchronize_rcu_expedited(void)
@@ -38,7 +48,7 @@ static inline void synchronize_rcu_expedited(void)
 */
extern void kvfree(const void *addr);

static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
static inline void __kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
	if (head) {
		call_rcu(head, func);
@@ -51,6 +61,15 @@ static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
	kvfree((void *) func);
}

#ifdef CONFIG_KASAN_GENERIC
void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
#else
static inline void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
{
	__kvfree_call_rcu(head, func);
}
#endif

void rcu_qs(void);

static inline void rcu_softirq_qs(void)
+2 −0
Original line number Diff line number Diff line
@@ -40,6 +40,8 @@ bool rcu_eqs_special_set(int cpu);
void rcu_momentary_dyntick_idle(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
unsigned long start_poll_synchronize_rcu_expedited(void);
void cond_synchronize_rcu_expedited(unsigned long oldstate);
unsigned long get_state_synchronize_rcu(void);
unsigned long start_poll_synchronize_rcu(void);
bool poll_state_synchronize_rcu(unsigned long oldstate);
Loading