Commit 19235e47 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar
Browse files

cpuidle, arm64: Fix the ARM64 cpuidle logic



The recent cpuidle changes started triggering RCU splats on
Juno development boards:

  | =============================
  | WARNING: suspicious RCU usage
  | -----------------------------
  | include/trace/events/ipi.h:19 suspicious rcu_dereference_check() usage!

Fix cpuidle on ARM64:

 - ... by introducing a new 'is_rcu' flag to the cpuidle helpers & make
   ARM64 use it, as ARM64 wants to keep RCU active longer and wants to
   do the ct_cpuidle_enter()/exit() dance itself.

 - Also update the PSCI driver accordingly.

 - This also removes the last known RCU_NONIDLE() user as a bonus.

Reported-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Tested-by: default avatarSudeep Holla <sudeep.holla@arm.com>
Tested-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/Y8Z31UbzG3LJgAXE@hirez.programming.kicks-ass.net

--
parent 3b8645e9
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -67,10 +67,10 @@ __cpuidle int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
	u32 state = lpi->address;

	if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags))
		return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter,
		return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(psci_cpu_suspend_enter,
						lpi->index, state);
	else
		return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter,
		return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter,
					     lpi->index, state);
}
#endif
+11 −1
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/pgtable.h>
#include <linux/cpuidle.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
@@ -104,6 +105,10 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
	 * From this point debug exceptions are disabled to prevent
	 * updates to mdscr register (saved and restored along with
	 * general purpose registers) from kernel debuggers.
	 *
	 * Strictly speaking the trace_hardirqs_off() here is superfluous,
	 * hardirqs should be firmly off by now. This really ought to use
	 * something like raw_local_daif_save().
	 */
	flags = local_daif_save();

@@ -120,6 +125,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
	 */
	arm_cpuidle_save_irq_context(&context);

	ct_cpuidle_enter();

	if (__cpu_suspend_enter(&state)) {
		/* Call the suspend finisher */
		ret = fn(arg);
@@ -133,8 +140,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
		 */
		if (!ret)
			ret = -EOPNOTSUPP;

		ct_cpuidle_exit();
	} else {
		RCU_NONIDLE(__cpu_suspend_exit());
		ct_cpuidle_exit();
		__cpu_suspend_exit();
	}

	arm_cpuidle_restore_irq_context(&context);
+1 −5
Original line number Diff line number Diff line
@@ -69,16 +69,12 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
	else
		pm_runtime_put_sync_suspend(pd_dev);

	ct_cpuidle_enter();

	state = psci_get_domain_state();
	if (!state)
		state = states[idx];

	ret = psci_cpu_suspend_enter(state) ? -1 : idx;

	ct_cpuidle_exit();

	if (s2idle)
		dev_pm_genpd_resume(pd_dev);
	else
@@ -192,7 +188,7 @@ static __cpuidle int psci_enter_idle_state(struct cpuidle_device *dev,
{
	u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);

	return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state[idx]);
	return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, idx, state[idx]);
}

static const struct of_device_id psci_idle_state_match[] = {
+11 −0
Original line number Diff line number Diff line
@@ -462,11 +462,22 @@ int psci_cpu_suspend_enter(u32 state)
	if (!psci_power_state_loses_context(state)) {
		struct arm_cpuidle_irq_context context;

		ct_cpuidle_enter();
		arm_cpuidle_save_irq_context(&context);
		ret = psci_ops.cpu_suspend(state, 0);
		arm_cpuidle_restore_irq_context(&context);
		ct_cpuidle_exit();
	} else {
		/*
		 * ARM64 cpu_suspend() wants to do ct_cpuidle_*() itself.
		 */
		if (!IS_ENABLED(CONFIG_ARM64))
			ct_cpuidle_enter();

		ret = cpu_suspend(state, psci_suspend_finisher);

		if (!IS_ENABLED(CONFIG_ARM64))
			ct_cpuidle_exit();
	}

	return ret;
+15 −7
Original line number Diff line number Diff line
@@ -307,7 +307,7 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter,			\
				idx,					\
				state,					\
				is_retention)				\
				is_retention, is_rcu)			\
({									\
	int __ret = 0;							\
									\
@@ -319,8 +319,10 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
	if (!is_retention)						\
		__ret =  cpu_pm_enter();				\
	if (!__ret) {							\
		if (!is_rcu)						\
			ct_cpuidle_enter();				\
		__ret = low_level_idle_enter(state);			\
		if (!is_rcu)						\
			ct_cpuidle_exit();				\
		if (!is_retention)					\
			cpu_pm_exit();					\
@@ -330,15 +332,21 @@ extern s64 cpuidle_governor_latency_req(unsigned int cpu);
})

#define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx)	\
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0)
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0, 0)

#define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx)	\
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1)
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1, 0)

#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state)	\
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0)
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 0)

#define CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(low_level_idle_enter, idx, state)	\
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0, 1)

#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state)	\
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1)
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 0)

#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM_RCU(low_level_idle_enter, idx, state)	\
	__CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1, 1)

#endif /* _LINUX_CPUIDLE_H */