Commit 97c0054d authored by Will Deacon's avatar Will Deacon Committed by Peter Zijlstra
Browse files

cpuset: Cleanup cpuset_cpus_allowed_fallback() use in select_fallback_rq()



select_fallback_rq() only needs to recheck for an allowed CPU if the
affinity mask of the task has changed since the last check.

Return a 'bool' from cpuset_cpus_allowed_fallback() to indicate whether
the affinity mask was updated, and use this to elide the allowed check
when the mask has been left alone.

No functional change.

Suggested-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Link: https://lore.kernel.org/r/20210730112443.23245-5-will@kernel.org
parent 431c69fa
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -59,7 +59,7 @@ extern void cpuset_wait_for_hotplug(void);
extern void cpuset_read_lock(void);
extern void cpuset_read_unlock(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
#define cpuset_current_mems_allowed (current->mems_allowed)
void cpuset_init_current_mems_allowed(void);
@@ -188,8 +188,9 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,
	cpumask_copy(mask, task_cpu_possible_mask(p));
}

static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
{
	return false;
}

static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
+8 −2
Original line number Diff line number Diff line
@@ -3327,17 +3327,22 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 * which will not contain a sane cpumask during cases such as cpu hotplugging.
 * This is the absolute last resort for the scheduler and it is only used if
 * _every_ other avenue has been traveled.
 *
 * Returns true if the affinity of @tsk was changed, false otherwise.
 **/

void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
bool cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
	const struct cpumask *possible_mask = task_cpu_possible_mask(tsk);
	const struct cpumask *cs_mask;
	bool changed = false;

	rcu_read_lock();
	cs_mask = task_cs(tsk)->cpus_allowed;
	if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask))
	if (is_in_v2_mode() && cpumask_subset(cs_mask, possible_mask)) {
		do_set_cpus_allowed(tsk, cs_mask);
		changed = true;
	}
	rcu_read_unlock();

	/*
@@ -3357,6 +3362,7 @@ void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
	 * if required.
	 */
	return changed;
}

void __init cpuset_init_current_mems_allowed(void)
+1 −2
Original line number Diff line number Diff line
@@ -3141,8 +3141,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
		/* No more Mr. Nice Guy. */
		switch (state) {
		case cpuset:
			if (IS_ENABLED(CONFIG_CPUSETS)) {
				cpuset_cpus_allowed_fallback(p);
			if (cpuset_cpus_allowed_fallback(p)) {
				state = possible;
				break;
			}