Commit 5ae36401 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Michael Ellerman
Browse files

powerpc: Replace deprecated CPU-hotplug functions.



The functions get_online_cpus() and put_online_cpus() have been
deprecated during the CPU hotplug rework. They map directly to
cpus_read_lock() and cpus_read_unlock().

Replace deprecated CPU-hotplug functions with the official version.
The behavior remains unchanged.

Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210803141621.780504-4-bigeasy@linutronix.de
parent c00103ab
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -429,7 +429,7 @@ static void rtas_event_scan(struct work_struct *w)

	do_event_scan();

	get_online_cpus();
	cpus_read_lock();

	/* raw_ OK because just using CPU as starting point. */
	cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
@@ -451,7 +451,7 @@ static void rtas_event_scan(struct work_struct *w)
	schedule_delayed_work_on(cpu, &event_scan_work,
		__round_jiffies_relative(event_scan_delay, cpu));

	put_online_cpus();
	cpus_read_unlock();
}

#ifdef CONFIG_PPC64
+5 −5
Original line number Diff line number Diff line
@@ -137,23 +137,23 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
 * exist in the system. We use a counter of VMs to track this.
 *
 * One of the operations we need to block is onlining of secondaries, so we
 * protect hv_vm_count with get/put_online_cpus().
 * protect hv_vm_count with cpus_read_lock/unlock().
 */
static atomic_t hv_vm_count;

void kvm_hv_vm_activated(void)
{
	get_online_cpus();
	cpus_read_lock();
	atomic_inc(&hv_vm_count);
	put_online_cpus();
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);

void kvm_hv_vm_deactivated(void)
{
	get_online_cpus();
	cpus_read_lock();
	atomic_dec(&hv_vm_count);
	put_online_cpus();
	cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);

+2 −2
Original line number Diff line number Diff line
@@ -199,12 +199,12 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
	 */
	power7_fastsleep_workaround_exit = false;

	get_online_cpus();
	cpus_read_lock();
	primary_thread_mask = cpu_online_cores_map();
	on_each_cpu_mask(&primary_thread_mask,
				pnv_fastsleep_workaround_apply,
				&err, 1);
	put_online_cpus();
	cpus_read_unlock();
	if (err) {
		pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
		goto fail;
+4 −4
Original line number Diff line number Diff line
@@ -186,7 +186,7 @@ static void disable_nest_pmu_counters(void)
	int nid, cpu;
	const struct cpumask *l_cpumask;

	get_online_cpus();
	cpus_read_lock();
	for_each_node_with_cpus(nid) {
		l_cpumask = cpumask_of_node(nid);
		cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
@@ -195,7 +195,7 @@ static void disable_nest_pmu_counters(void)
		opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
				       get_hard_smp_processor_id(cpu));
	}
	put_online_cpus();
	cpus_read_unlock();
}

static void disable_core_pmu_counters(void)
@@ -203,7 +203,7 @@ static void disable_core_pmu_counters(void)
	cpumask_t cores_map;
	int cpu, rc;

	get_online_cpus();
	cpus_read_lock();
	/* Disable the IMC Core functions */
	cores_map = cpu_online_cores_map();
	for_each_cpu(cpu, &cores_map) {
@@ -213,7 +213,7 @@ static void disable_core_pmu_counters(void)
			pr_err("%s: Failed to stop Core (cpu = %d)\n",
				__FUNCTION__, cpu);
	}
	put_online_cpus();
	cpus_read_unlock();
}

int get_max_nest_dev(void)