Commit 277ffe1b authored by Zhouyi Zhou's avatar Zhouyi Zhou Committed by Paul E. McKenney
Browse files

rcu: Improve tree.c comments and add code cleanups



This commit cleans up some comments and code in kernel/rcu/tree.c.

Signed-off-by: default avatarZhouyi Zhou <zhouzhouyi@gmail.com>
Signed-off-by: default avatarPaul E. McKenney <paulmck@kernel.org>
parent ce7c169d
Loading
Loading
Loading
Loading
+12 −12
Original line number Diff line number Diff line
@@ -202,7 +202,7 @@ EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
 * the need for long delays to increase some race probabilities with the
 * need for fast grace periods to increase other race probabilities.
 */
#define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays. */
#define PER_RCU_NODE_PERIOD 3	/* Number of grace periods between delays for debugging. */

/*
 * Compute the mask of online CPUs for the specified rcu_node structure.
@@ -937,7 +937,7 @@ EXPORT_SYMBOL_GPL(rcu_idle_exit);
 */
void noinstr rcu_user_exit(void)
{
	rcu_eqs_exit(1);
	rcu_eqs_exit(true);
}

/**
@@ -1203,7 +1203,7 @@ EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */

/*
 * We are reporting a quiescent state on behalf of some other CPU, so
 * When trying to report a quiescent state on behalf of some other CPU,
 * it is our responsibility to check for and handle potential overflow
 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
 * After all, the CPU might be in deep idle state, and thus executing no
@@ -2607,7 +2607,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
 * state, for example, user mode or idle loop.  It also schedules RCU
 * core processing.  If the current grace period has gone on too long,
 * it will ask the scheduler to manufacture a context switch for the sole
 * purpose of providing a providing the needed quiescent state.
 * purpose of providing the needed quiescent state.
 */
void rcu_sched_clock_irq(int user)
{
@@ -3236,7 +3236,7 @@ put_cached_bnode(struct kfree_rcu_cpu *krcp,

/*
 * This function is invoked in workqueue context after a grace period.
 * It frees all the objects queued on ->bhead_free or ->head_free.
 * It frees all the objects queued on ->bkvhead_free or ->head_free.
 */
static void kfree_rcu_work(struct work_struct *work)
{
@@ -3263,7 +3263,7 @@ static void kfree_rcu_work(struct work_struct *work)
	krwp->head_free = NULL;
	raw_spin_unlock_irqrestore(&krcp->lock, flags);

	// Handle two first channels.
	// Handle the first two channels.
	for (i = 0; i < FREE_N_CHANNELS; i++) {
		for (; bkvhead[i]; bkvhead[i] = bnext) {
			bnext = bkvhead[i]->next;
@@ -3530,11 +3530,11 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
}

/*
 * Queue a request for lazy invocation of appropriate free routine after a
 * grace period. Please note there are three paths are maintained, two are the
 * main ones that use array of pointers interface and third one is emergency
 * one, that is used only when the main path can not be maintained temporary,
 * due to memory pressure.
 * Queue a request for lazy invocation of the appropriate free routine
 * after a grace period.  Please note that three paths are maintained,
 * two for the common case using arrays of pointers and a third one that
 * is used only when the main paths cannot be used, for example, due to
 * memory pressure.
 *
 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
@@ -4708,7 +4708,7 @@ void __init rcu_init(void)
		rcutree_online_cpu(cpu);
	}

	/* Create workqueue for expedited GPs and for Tree SRCU. */
	/* Create workqueue for Tree SRCU and for expedited GPs. */
	rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
	WARN_ON(!rcu_gp_wq);
	rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);