Commit 14e292f8 authored by Peter Zijlstra's avatar Peter Zijlstra
Browse files

sched,rt: Use cpumask_any*_distribute()



Replace a bunch of cpumask_any*() instances with
cpumask_any*_distribute(), by injecting this little bit of random in
cpu selection, we reduce the chance two competing balance operations
working off the same lowest_mask pick the same CPU.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Reviewed-by: default avatarDaniel Bristot de Oliveira <bristot@redhat.com>
Link: https://lkml.kernel.org/r/20201023102347.190759694@infradead.org
parent 3015ef4b
Loading
Loading
Loading
Loading
+6 −0
Original line number Diff line number Diff line
@@ -199,6 +199,11 @@ static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
	return cpumask_next_and(-1, src1p, src2p);
}

static inline int cpumask_any_distribute(const struct cpumask *srcp)
{
	return cpumask_first(srcp);
}

#define for_each_cpu(cpu, mask)			\
	for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask)		\
@@ -252,6 +257,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node);
int cpumask_any_and_distribute(const struct cpumask *src1p,
			       const struct cpumask *src2p);
int cpumask_any_distribute(const struct cpumask *srcp);

/**
 * for_each_cpu - iterate over every cpu in a mask
+3 −3
Original line number Diff line number Diff line
@@ -2002,7 +2002,7 @@ static int find_later_rq(struct task_struct *task)
				return this_cpu;
			}

			best_cpu = cpumask_first_and(later_mask,
			best_cpu = cpumask_any_and_distribute(later_mask,
							      sched_domain_span(sd));
			/*
			 * Last chance: if a CPU being in both later_mask
@@ -2025,7 +2025,7 @@ static int find_later_rq(struct task_struct *task)
	if (this_cpu != -1)
		return this_cpu;

	cpu = cpumask_any(later_mask);
	cpu = cpumask_any_distribute(later_mask);
	if (cpu < nr_cpu_ids)
		return cpu;

+3 −3
Original line number Diff line number Diff line
@@ -1752,7 +1752,7 @@ static int find_lowest_rq(struct task_struct *task)
				return this_cpu;
			}

			best_cpu = cpumask_first_and(lowest_mask,
			best_cpu = cpumask_any_and_distribute(lowest_mask,
							      sched_domain_span(sd));
			if (best_cpu < nr_cpu_ids) {
				rcu_read_unlock();
@@ -1770,7 +1770,7 @@ static int find_lowest_rq(struct task_struct *task)
	if (this_cpu != -1)
		return this_cpu;

	cpu = cpumask_any(lowest_mask);
	cpu = cpumask_any_distribute(lowest_mask);
	if (cpu < nr_cpu_ids)
		return cpu;

+18 −0
Original line number Diff line number Diff line
@@ -267,3 +267,21 @@ int cpumask_any_and_distribute(const struct cpumask *src1p,
	return next;
}
EXPORT_SYMBOL(cpumask_any_and_distribute);

int cpumask_any_distribute(const struct cpumask *srcp)
{
	int next, prev;

	/* NOTE: our first selection will skip 0. */
	prev = __this_cpu_read(distribute_cpu_mask_prev);

	next = cpumask_next(prev, srcp);
	if (next >= nr_cpu_ids)
		next = cpumask_first(srcp);

	if (next < nr_cpu_ids)
		__this_cpu_write(distribute_cpu_mask_prev, next);

	return next;
}
EXPORT_SYMBOL(cpumask_any_distribute);