Commit 27c0f144 authored by Paul E. McKenney's avatar Paul E. McKenney
Browse files

rcutorture: Make grace-period kthread report match RCU flavor being tested



At the end of the test and after rcu_torture_writer() stalls, rcutorture
invokes show_rcu_gp_kthreads() in order to dump out information on the
RCU grace-period kthread.  This makes a lot of sense when testing vanilla
RCU, but not so much for the other flavors.  This commit therefore allows
per-flavor kthread-dump functions to be specified.

[ paulmck: Apply feedback from kernel test robot <lkp@intel.com>. ]
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent 77dc1741
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -533,4 +533,20 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
static inline void rcu_bind_current_to_nocb(void) { }
#endif

#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RCU)
void show_rcu_tasks_classic_gp_kthread(void);
#else
static inline void show_rcu_tasks_classic_gp_kthread(void) {}
#endif
#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_RUDE_RCU)
void show_rcu_tasks_rude_gp_kthread(void);
#else
static inline void show_rcu_tasks_rude_gp_kthread(void) {}
#endif
#if !defined(CONFIG_TINY_RCU) && defined(CONFIG_TASKS_TRACE_RCU)
void show_rcu_tasks_trace_gp_kthread(void);
#else
static inline void show_rcu_tasks_trace_gp_kthread(void) {}
#endif

#endif /* __LINUX_RCU_H */
+9 −2
Original line number Diff line number Diff line
@@ -317,6 +317,7 @@ struct rcu_torture_ops {
	void (*cb_barrier)(void);
	void (*fqs)(void);
	void (*stats)(void);
	void (*gp_kthread_dbg)(void);
	int (*stall_dur)(void);
	int irq_capable;
	int can_boost;
@@ -466,6 +467,7 @@ static struct rcu_torture_ops rcu_ops = {
	.cb_barrier	= rcu_barrier,
	.fqs		= rcu_force_quiescent_state,
	.stats		= NULL,
	.gp_kthread_dbg	= show_rcu_gp_kthreads,
	.stall_dur	= rcu_jiffies_till_stall_check,
	.irq_capable	= 1,
	.can_boost	= rcu_can_boost(),
@@ -693,6 +695,7 @@ static struct rcu_torture_ops tasks_ops = {
	.exp_sync	= synchronize_rcu_mult_test,
	.call		= call_rcu_tasks,
	.cb_barrier	= rcu_barrier_tasks,
	.gp_kthread_dbg	= show_rcu_tasks_classic_gp_kthread,
	.fqs		= NULL,
	.stats		= NULL,
	.irq_capable	= 1,
@@ -762,6 +765,7 @@ static struct rcu_torture_ops tasks_rude_ops = {
	.exp_sync	= synchronize_rcu_tasks_rude,
	.call		= call_rcu_tasks_rude,
	.cb_barrier	= rcu_barrier_tasks_rude,
	.gp_kthread_dbg	= show_rcu_tasks_rude_gp_kthread,
	.fqs		= NULL,
	.stats		= NULL,
	.irq_capable	= 1,
@@ -800,6 +804,7 @@ static struct rcu_torture_ops tasks_tracing_ops = {
	.exp_sync	= synchronize_rcu_tasks_trace,
	.call		= call_rcu_tasks_trace,
	.cb_barrier	= rcu_barrier_tasks_trace,
	.gp_kthread_dbg	= show_rcu_tasks_trace_gp_kthread,
	.fqs		= NULL,
	.stats		= NULL,
	.irq_capable	= 1,
@@ -1594,7 +1599,8 @@ rcu_torture_stats_print(void)
			sched_show_task(wtp);
			splatted = true;
		}
		show_rcu_gp_kthreads();
		if (cur_ops->gp_kthread_dbg)
			cur_ops->gp_kthread_dbg();
		rcu_ftrace_dump(DUMP_ALL);
	}
	rtcv_snap = rcu_torture_current_version;
@@ -2472,7 +2478,8 @@ rcu_torture_cleanup(void)
		return;
	}

	show_rcu_gp_kthreads();
	if (cur_ops->gp_kthread_dbg)
		cur_ops->gp_kthread_dbg();
	rcu_torture_read_exit_cleanup();
	rcu_torture_barrier_cleanup();
	rcu_torture_fwd_prog_cleanup();
+14 −16
Original line number Diff line number Diff line
@@ -290,7 +290,7 @@ static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
		".C"[!!data_race(rtp->cbs_head)],
		s);
}
#endif /* #ifndef CONFIG_TINY_RCU */
#endif // #ifndef CONFIG_TINY_RCU

static void exit_tasks_rcu_finish_trace(struct task_struct *t);

@@ -568,12 +568,13 @@ static int __init rcu_spawn_tasks_kthread(void)
}
core_initcall(rcu_spawn_tasks_kthread);

#ifndef CONFIG_TINY_RCU
static void show_rcu_tasks_classic_gp_kthread(void)
#if !defined(CONFIG_TINY_RCU)
void show_rcu_tasks_classic_gp_kthread(void)
{
	show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
}
#endif /* #ifndef CONFIG_TINY_RCU */
EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
#endif // !defined(CONFIG_TINY_RCU)

/* Do the srcu_read_lock() for the above synchronize_srcu().  */
void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
@@ -595,7 +596,6 @@ void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
}

#else /* #ifdef CONFIG_TASKS_RCU */
static inline void show_rcu_tasks_classic_gp_kthread(void) { }
void exit_tasks_rcu_start(void) { }
void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
#endif /* #else #ifdef CONFIG_TASKS_RCU */
@@ -696,16 +696,14 @@ static int __init rcu_spawn_tasks_rude_kthread(void)
}
core_initcall(rcu_spawn_tasks_rude_kthread);

#ifndef CONFIG_TINY_RCU
static void show_rcu_tasks_rude_gp_kthread(void)
#if !defined(CONFIG_TINY_RCU)
void show_rcu_tasks_rude_gp_kthread(void)
{
	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
}
#endif /* #ifndef CONFIG_TINY_RCU */

#else /* #ifdef CONFIG_TASKS_RUDE_RCU */
static void show_rcu_tasks_rude_gp_kthread(void) {}
#endif /* #else #ifdef CONFIG_TASKS_RUDE_RCU */
EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
#endif // !defined(CONFIG_TINY_RCU)
#endif /* #ifdef CONFIG_TASKS_RUDE_RCU */

////////////////////////////////////////////////////////////////////////
//
@@ -1199,8 +1197,8 @@ static int __init rcu_spawn_tasks_trace_kthread(void)
}
core_initcall(rcu_spawn_tasks_trace_kthread);

#ifndef CONFIG_TINY_RCU
static void show_rcu_tasks_trace_gp_kthread(void)
#if !defined(CONFIG_TINY_RCU)
void show_rcu_tasks_trace_gp_kthread(void)
{
	char buf[64];

@@ -1210,11 +1208,11 @@ static void show_rcu_tasks_trace_gp_kthread(void)
		data_race(n_heavy_reader_attempts));
	show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
}
#endif /* #ifndef CONFIG_TINY_RCU */
EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
#endif // !defined(CONFIG_TINY_RCU)

#else /* #ifdef CONFIG_TASKS_TRACE_RCU */
static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
static inline void show_rcu_tasks_trace_gp_kthread(void) {}
#endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */

#ifndef CONFIG_TINY_RCU