Commit 363ab6f1 authored by Mike Travis's avatar Mike Travis Committed by Thomas Gleixner
Browse files

core: use performance variant for_each_cpu_mask_nr



Change references from for_each_cpu_mask to for_each_cpu_mask_nr
where appropriate

Reviewed-by: default avatarPaul Jackson <pj@sgi.com>
Reviewed-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 068b1277
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -390,7 +390,7 @@ void __ref enable_nonboot_cpus(void)
		goto out;

	printk("Enabling non-boot CPUs ...\n");
	for_each_cpu_mask(cpu, frozen_cpus) {
	for_each_cpu_mask_nr(cpu, frozen_cpus) {
		error = _cpu_up(cpu, 1);
		if (!error) {
			printk("CPU%d is up\n", cpu);
+1 −1
Original line number Diff line number Diff line
@@ -92,7 +92,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
		 */
		cpumask = rcp->cpumask;
		cpu_clear(rdp->cpu, cpumask);
		for_each_cpu_mask(cpu, cpumask)
		for_each_cpu_mask_nr(cpu, cpumask)
			smp_send_reschedule(cpu);
	}
}
+5 −5
Original line number Diff line number Diff line
@@ -657,7 +657,7 @@ rcu_try_flip_idle(void)

	/* Now ask each CPU for acknowledgement of the flip. */

	for_each_cpu_mask(cpu, rcu_cpu_online_map) {
	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
		per_cpu(rcu_flip_flag, cpu) = rcu_flipped;
		dyntick_save_progress_counter(cpu);
	}
@@ -675,7 +675,7 @@ rcu_try_flip_waitack(void)
	int cpu;

	RCU_TRACE_ME(rcupreempt_trace_try_flip_a1);
	for_each_cpu_mask(cpu, rcu_cpu_online_map)
	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
		if (rcu_try_flip_waitack_needed(cpu) &&
		    per_cpu(rcu_flip_flag, cpu) != rcu_flip_seen) {
			RCU_TRACE_ME(rcupreempt_trace_try_flip_ae1);
@@ -707,7 +707,7 @@ rcu_try_flip_waitzero(void)
	/* Check to see if the sum of the "last" counters is zero. */

	RCU_TRACE_ME(rcupreempt_trace_try_flip_z1);
	for_each_cpu_mask(cpu, rcu_cpu_online_map)
	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
		sum += RCU_DATA_CPU(cpu)->rcu_flipctr[lastidx];
	if (sum != 0) {
		RCU_TRACE_ME(rcupreempt_trace_try_flip_ze1);
@@ -722,7 +722,7 @@ rcu_try_flip_waitzero(void)
	smp_mb();  /*  ^^^^^^^^^^^^ */

	/* Call for a memory barrier from each CPU. */
	for_each_cpu_mask(cpu, rcu_cpu_online_map) {
	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map) {
		per_cpu(rcu_mb_flag, cpu) = rcu_mb_needed;
		dyntick_save_progress_counter(cpu);
	}
@@ -742,7 +742,7 @@ rcu_try_flip_waitmb(void)
	int cpu;

	RCU_TRACE_ME(rcupreempt_trace_try_flip_m1);
	for_each_cpu_mask(cpu, rcu_cpu_online_map)
	for_each_cpu_mask_nr(cpu, rcu_cpu_online_map)
		if (rcu_try_flip_waitmb_needed(cpu) &&
		    per_cpu(rcu_mb_flag, cpu) != rcu_mb_done) {
			RCU_TRACE_ME(rcupreempt_trace_try_flip_me1);
+18 −18
Original line number Diff line number Diff line
@@ -2271,7 +2271,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
		/* Tally up the load of all CPUs in the group */
		avg_load = 0;

		for_each_cpu_mask(i, group->cpumask) {
		for_each_cpu_mask_nr(i, group->cpumask) {
			/* Bias balancing toward cpus of our domain */
			if (local_group)
				load = source_load(i, load_idx);
@@ -2313,7 +2313,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
	/* Traverse only the allowed CPUs */
	cpus_and(*tmp, group->cpumask, p->cpus_allowed);

	for_each_cpu_mask(i, *tmp) {
	for_each_cpu_mask_nr(i, *tmp) {
		load = weighted_cpuload(i);

		if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3296,7 +3296,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
		max_cpu_load = 0;
		min_cpu_load = ~0UL;

		for_each_cpu_mask(i, group->cpumask) {
		for_each_cpu_mask_nr(i, group->cpumask) {
			struct rq *rq;

			if (!cpu_isset(i, *cpus))
@@ -3560,7 +3560,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
	unsigned long max_load = 0;
	int i;

	for_each_cpu_mask(i, group->cpumask) {
	for_each_cpu_mask_nr(i, group->cpumask) {
		unsigned long wl;

		if (!cpu_isset(i, *cpus))
@@ -4100,7 +4100,7 @@ static void run_rebalance_domains(struct softirq_action *h)
		int balance_cpu;

		cpu_clear(this_cpu, cpus);
		for_each_cpu_mask(balance_cpu, cpus) {
		for_each_cpu_mask_nr(balance_cpu, cpus) {
			/*
			 * If this cpu gets work to do, stop the load balancing
			 * work being done for other cpus. Next load
@@ -6832,7 +6832,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,

	cpus_clear(*covered);

	for_each_cpu_mask(i, *span) {
	for_each_cpu_mask_nr(i, *span) {
		struct sched_group *sg;
		int group = group_fn(i, cpu_map, &sg, tmpmask);
		int j;
@@ -6843,7 +6843,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
		cpus_clear(sg->cpumask);
		sg->__cpu_power = 0;

		for_each_cpu_mask(j, *span) {
		for_each_cpu_mask_nr(j, *span) {
			if (group_fn(j, cpu_map, NULL, tmpmask) != group)
				continue;

@@ -7043,7 +7043,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
	if (!sg)
		return;
	do {
		for_each_cpu_mask(j, sg->cpumask) {
		for_each_cpu_mask_nr(j, sg->cpumask) {
			struct sched_domain *sd;

			sd = &per_cpu(phys_domains, j);
@@ -7068,7 +7068,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{
	int cpu, i;

	for_each_cpu_mask(cpu, *cpu_map) {
	for_each_cpu_mask_nr(cpu, *cpu_map) {
		struct sched_group **sched_group_nodes
			= sched_group_nodes_bycpu[cpu];

@@ -7302,7 +7302,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
	/*
	 * Set up domains for cpus specified by the cpu_map.
	 */
	for_each_cpu_mask(i, *cpu_map) {
	for_each_cpu_mask_nr(i, *cpu_map) {
		struct sched_domain *sd = NULL, *p;
		SCHED_CPUMASK_VAR(nodemask, allmasks);

@@ -7374,7 +7374,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

#ifdef CONFIG_SCHED_SMT
	/* Set up CPU (sibling) groups */
	for_each_cpu_mask(i, *cpu_map) {
	for_each_cpu_mask_nr(i, *cpu_map) {
		SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
		SCHED_CPUMASK_VAR(send_covered, allmasks);

@@ -7391,7 +7391,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

#ifdef CONFIG_SCHED_MC
	/* Set up multi-core groups */
	for_each_cpu_mask(i, *cpu_map) {
	for_each_cpu_mask_nr(i, *cpu_map) {
		SCHED_CPUMASK_VAR(this_core_map, allmasks);
		SCHED_CPUMASK_VAR(send_covered, allmasks);

@@ -7458,7 +7458,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
			goto error;
		}
		sched_group_nodes[i] = sg;
		for_each_cpu_mask(j, *nodemask) {
		for_each_cpu_mask_nr(j, *nodemask) {
			struct sched_domain *sd;

			sd = &per_cpu(node_domains, j);
@@ -7504,21 +7504,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,

	/* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT
	for_each_cpu_mask(i, *cpu_map) {
	for_each_cpu_mask_nr(i, *cpu_map) {
		struct sched_domain *sd = &per_cpu(cpu_domains, i);

		init_sched_groups_power(i, sd);
	}
#endif
#ifdef CONFIG_SCHED_MC
	for_each_cpu_mask(i, *cpu_map) {
	for_each_cpu_mask_nr(i, *cpu_map) {
		struct sched_domain *sd = &per_cpu(core_domains, i);

		init_sched_groups_power(i, sd);
	}
#endif

	for_each_cpu_mask(i, *cpu_map) {
	for_each_cpu_mask_nr(i, *cpu_map) {
		struct sched_domain *sd = &per_cpu(phys_domains, i);

		init_sched_groups_power(i, sd);
@@ -7538,7 +7538,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
#endif

	/* Attach the domains */
	for_each_cpu_mask(i, *cpu_map) {
	for_each_cpu_mask_nr(i, *cpu_map) {
		struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
		sd = &per_cpu(cpu_domains, i);
@@ -7621,7 +7621,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)

	unregister_sched_domain_sysctl();

	for_each_cpu_mask(i, *cpu_map)
	for_each_cpu_mask_nr(i, *cpu_map)
		cpu_attach_domain(NULL, &def_root_domain, i);
	synchronize_sched();
	arch_destroy_sched_domains(cpu_map, &tmpmask);
+1 −1
Original line number Diff line number Diff line
@@ -1022,7 +1022,7 @@ static int wake_idle(int cpu, struct task_struct *p)
		    || ((sd->flags & SD_WAKE_IDLE_FAR)
			&& !task_hot(p, task_rq(p)->clock, sd))) {
			cpus_and(tmp, sd->span, p->cpus_allowed);
			for_each_cpu_mask(i, tmp) {
			for_each_cpu_mask_nr(i, tmp) {
				if (idle_cpu(i)) {
					if (i != task_cpu(p)) {
						schedstat_inc(p,
Loading