Commit 0f613bfa authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra
Browse files

locking/atomic: treewide: use raw_atomic*_<op>()



Now that we have raw_atomic*_<op>() definitions, there's no need to use
arch_atomic*_<op>() definitions outside of the low-level atomic
definitions.

Move treewide users of arch_atomic*_<op>() over to the equivalent
raw_atomic*_<op>().

There should be no functional change as a result of this patch.

Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-19-mark.rutland@arm.com
parent c9268ac6
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -417,9 +417,9 @@ noinstr static void nmi_ipi_lock_start(unsigned long *flags)
{
	raw_local_irq_save(*flags);
	hard_irq_disable();
	while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
	while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
		raw_local_irq_restore(*flags);
		spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
		spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
		raw_local_irq_save(*flags);
		hard_irq_disable();
	}
@@ -427,15 +427,15 @@ noinstr static void nmi_ipi_lock_start(unsigned long *flags)

noinstr static void nmi_ipi_lock(void)
{
	while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
		spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
	while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
		spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0);
}

noinstr static void nmi_ipi_unlock(void)
{
	smp_mb();
	WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1);
	arch_atomic_set(&__nmi_ipi_lock, 0);
	WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1);
	raw_atomic_set(&__nmi_ipi_lock, 0);
}

noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
+2 −2
Original line number Diff line number Diff line
@@ -1799,7 +1799,7 @@ struct bp_patching_desc *try_get_desc(void)
{
	struct bp_patching_desc *desc = &bp_desc;

	if (!arch_atomic_inc_not_zero(&desc->refs))
	if (!raw_atomic_inc_not_zero(&desc->refs))
		return NULL;

	return desc;
@@ -1810,7 +1810,7 @@ static __always_inline void put_desc(void)
	struct bp_patching_desc *desc = &bp_desc;

	smp_mb__before_atomic();
	arch_atomic_dec(&desc->refs);
	raw_atomic_dec(&desc->refs);
}

static __always_inline void *text_poke_addr(struct text_poke_loc *tp)
+8 −8
Original line number Diff line number Diff line
@@ -1022,12 +1022,12 @@ static noinstr int mce_start(int *no_way_out)
	if (!timeout)
		return ret;

	arch_atomic_add(*no_way_out, &global_nwo);
	raw_atomic_add(*no_way_out, &global_nwo);
	/*
	 * Rely on the implied barrier below, such that global_nwo
	 * is updated before mce_callin.
	 */
	order = arch_atomic_inc_return(&mce_callin);
	order = raw_atomic_inc_return(&mce_callin);
	arch_cpumask_clear_cpu(smp_processor_id(), &mce_missing_cpus);

	/* Enable instrumentation around calls to external facilities */
@@ -1036,10 +1036,10 @@ static noinstr int mce_start(int *no_way_out)
	/*
	 * Wait for everyone.
	 */
	while (arch_atomic_read(&mce_callin) != num_online_cpus()) {
	while (raw_atomic_read(&mce_callin) != num_online_cpus()) {
		if (mce_timed_out(&timeout,
				  "Timeout: Not all CPUs entered broadcast exception handler")) {
			arch_atomic_set(&global_nwo, 0);
			raw_atomic_set(&global_nwo, 0);
			goto out;
		}
		ndelay(SPINUNIT);
@@ -1054,7 +1054,7 @@ static noinstr int mce_start(int *no_way_out)
		/*
		 * Monarch: Starts executing now, the others wait.
		 */
		arch_atomic_set(&mce_executing, 1);
		raw_atomic_set(&mce_executing, 1);
	} else {
		/*
		 * Subject: Now start the scanning loop one by one in
@@ -1062,10 +1062,10 @@ static noinstr int mce_start(int *no_way_out)
		 * This way when there are any shared banks it will be
		 * only seen by one CPU before cleared, avoiding duplicates.
		 */
		while (arch_atomic_read(&mce_executing) < order) {
		while (raw_atomic_read(&mce_executing) < order) {
			if (mce_timed_out(&timeout,
					  "Timeout: Subject CPUs unable to finish machine check processing")) {
				arch_atomic_set(&global_nwo, 0);
				raw_atomic_set(&global_nwo, 0);
				goto out;
			}
			ndelay(SPINUNIT);
@@ -1075,7 +1075,7 @@ static noinstr int mce_start(int *no_way_out)
	/*
	 * Cache the global no_way_out state.
	 */
	*no_way_out = arch_atomic_read(&global_nwo);
	*no_way_out = raw_atomic_read(&global_nwo);

	ret = order;

+1 −1
Original line number Diff line number Diff line
@@ -496,7 +496,7 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
	 */
	sev_es_nmi_complete();
	if (IS_ENABLED(CONFIG_NMI_CHECK_CPU))
		arch_atomic_long_inc(&nsp->idt_calls);
		raw_atomic_long_inc(&nsp->idt_calls);

	if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id()))
		return;
+2 −2
Original line number Diff line number Diff line
@@ -101,11 +101,11 @@ u64 __pvclock_clocksource_read(struct pvclock_vcpu_time_info *src, bool dowd)
	 * updating at the same time, and one of them could be slightly behind,
	 * making the assumption that last_value always go forward fail to hold.
	 */
	last = arch_atomic64_read(&last_value);
	last = raw_atomic64_read(&last_value);
	do {
		if (ret <= last)
			return last;
	} while (!arch_atomic64_try_cmpxchg(&last_value, &last, ret));
	} while (!raw_atomic64_try_cmpxchg(&last_value, &last, ret));

	return ret;
}
Loading