Commit 33de0aa4 authored by Marc Zyngier's avatar Marc Zyngier Committed by Thomas Gleixner
Browse files

genirq: Always limit the affinity to online CPUs



When booting with maxcpus=<small number> (or even loading a driver
while most CPUs are offline), it is pretty easy to observe managed
affinities containing a mix of online and offline CPUs being passed
to the irqchip driver.

This means that the irqchip cannot trust the affinity passed down
from the core code, which is a bit annoying and requires (at least
in theory) all drivers to implement some sort of affinity narrowing.

In order to address this, always limit the cpumask to the set of
online CPUs.

Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20220405185040.206297-3-maz@kernel.org
parent d802057c
Loading
Loading
Loading
Loading
+17 −8
Original line number Diff line number Diff line
@@ -222,11 +222,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
{
	struct irq_desc *desc = irq_data_to_desc(data);
	struct irq_chip *chip = irq_data_get_irq_chip(data);
	const struct cpumask  *prog_mask;
	int ret;

	static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
	static struct cpumask tmp_mask;

	if (!chip || !chip->irq_set_affinity)
		return -EINVAL;

	raw_spin_lock(&tmp_mask_lock);
	/*
	 * If this is a managed interrupt and housekeeping is enabled on
	 * it check whether the requested affinity mask intersects with
@@ -248,24 +253,28 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
	 */
	if (irqd_affinity_is_managed(data) &&
	    housekeeping_enabled(HK_TYPE_MANAGED_IRQ)) {
		const struct cpumask *hk_mask, *prog_mask;

		static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
		static struct cpumask tmp_mask;
		const struct cpumask *hk_mask;

		hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);

		raw_spin_lock(&tmp_mask_lock);
		cpumask_and(&tmp_mask, mask, hk_mask);
		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
			prog_mask = mask;
		else
			prog_mask = &tmp_mask;
		ret = chip->irq_set_affinity(data, prog_mask, force);
		raw_spin_unlock(&tmp_mask_lock);
	} else {
		ret = chip->irq_set_affinity(data, mask, force);
		prog_mask = mask;
	}

	/* Make sure we only provide online CPUs to the irqchip */
	cpumask_and(&tmp_mask, prog_mask, cpu_online_mask);
	if (!cpumask_empty(&tmp_mask))
		ret = chip->irq_set_affinity(data, &tmp_mask, force);
	else
		ret = -EINVAL;

	raw_spin_unlock(&tmp_mask_lock);

	switch (ret) {
	case IRQ_SET_MASK_OK:
	case IRQ_SET_MASK_OK_DONE: