Commit 87b5374b authored by Rafael J. Wysocki's avatar Rafael J. Wysocki
Browse files

Merge branches 'acpi-scan', 'acpi-pm', 'acpi-resource' and 'acpi-ec'

Merge ACPI device enumeration changes, ACPI power management update,
ACPI resources management updates and an EC driver update for 6.5-rc1:

 - Reduce ACPI device enumeration overhead related to devices with
   dependencies (Rafael Wysocki).

 - Fix the handling of Microsoft LPS0 _DSM for suspend-to-idle (Mario
   Limonciello).

 - Fix section mismatch warning in the ACPI suspend-to-idle code (Arnd
   Bergmann).

 - Drop several ACPI resource management quirks related to IRQ ovverides
   on AMD "Zen" systems (Mario Limonciello).

 - Modify the ACPI EC driver to make it only clear the EC GPE status
   when handling the GPE (Jeremy Compostella).

* acpi-scan:
  ACPI: scan: Reduce overhead related to devices with dependencies

* acpi-pm:
  ACPI: x86: s2idle: Adjust Microsoft LPS0 _DSM handling sequence
  ACPI: PM: s2idle: fix section mismatch warning

* acpi-resource:
  ACPI: resource: Remove "Zen" specific match and quirks

* acpi-ec:
  ACPI: EC: Clear GPE on interrupt handling only
Loading
Loading
Loading
Loading
+16 −15
Original line number Diff line number Diff line
@@ -662,21 +662,6 @@ static void advance_transaction(struct acpi_ec *ec, bool interrupt)

	ec_dbg_stm("%s (%d)", interrupt ? "IRQ" : "TASK", smp_processor_id());

	/*
	 * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
	 * changes to always trigger a GPE interrupt.
	 *
	 * GPE STS is a W1C register, which means:
	 *
	 * 1. Software can clear it without worrying about clearing the other
	 *    GPEs' STS bits when the hardware sets them in parallel.
	 *
	 * 2. As long as software can ensure only clearing it when it is set,
	 *    hardware won't set it in parallel.
	 */
	if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
		acpi_clear_gpe(NULL, ec->gpe);

	status = acpi_ec_read_status(ec);

	/*
@@ -1287,6 +1272,22 @@ static void acpi_ec_handle_interrupt(struct acpi_ec *ec)
	unsigned long flags;

	spin_lock_irqsave(&ec->lock, flags);

	/*
	 * Clear GPE_STS upfront to allow subsequent hardware GPE_STS 0->1
	 * changes to always trigger a GPE interrupt.
	 *
	 * GPE STS is a W1C register, which means:
	 *
	 * 1. Software can clear it without worrying about clearing the other
	 *    GPEs' STS bits when the hardware sets them in parallel.
	 *
	 * 2. As long as software can ensure only clearing it when it is set,
	 *    hardware won't set it in parallel.
	 */
	if (ec->gpe >= 0 && acpi_ec_gpe_status_set(ec))
		acpi_clear_gpe(NULL, ec->gpe);

	advance_transaction(ec, true);
	spin_unlock_irqrestore(&ec->lock, flags);
}
+0 −60
Original line number Diff line number Diff line
@@ -470,52 +470,6 @@ static const struct dmi_system_id asus_laptop[] = {
	{ }
};

static const struct dmi_system_id lenovo_laptop[] = {
	{
		.ident = "LENOVO IdeaPad Flex 5 14ALC7",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
			DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
		},
	},
	{
		.ident = "LENOVO IdeaPad Flex 5 16ALC7",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
			DMI_MATCH(DMI_PRODUCT_NAME, "82RA"),
		},
	},
	{ }
};

static const struct dmi_system_id tongfang_gm_rg[] = {
	{
		.ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD",
		.matches = {
			DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
		},
	},
	{ }
};

static const struct dmi_system_id maingear_laptop[] = {
	{
		.ident = "MAINGEAR Vector Pro 2 15",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
			DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
		}
	},
	{
		.ident = "MAINGEAR Vector Pro 2 17",
		.matches = {
			DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
			DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"),
		},
	},
	{ }
};

static const struct dmi_system_id lg_laptop[] = {
	{
		.ident = "LG Electronics 17U70P",
@@ -539,10 +493,6 @@ struct irq_override_cmp {
static const struct irq_override_cmp override_table[] = {
	{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
	{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
	{ lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
	{ lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
	{ tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
	{ maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
	{ lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
};

@@ -562,16 +512,6 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
			return entry->override;
	}

#ifdef CONFIG_X86
	/*
	 * IRQ override isn't needed on modern AMD Zen systems and
	 * this override breaks active low IRQs on AMD Ryzen 6000 and
	 * newer systems. Skip it.
	 */
	if (boot_cpu_has(X86_FEATURE_ZEN))
		return false;
#endif

	return true;
}

+61 −20
Original line number Diff line number Diff line
@@ -2029,8 +2029,6 @@ static u32 acpi_scan_check_dep(acpi_handle handle, bool check_dep)
	return count;
}

static bool acpi_bus_scan_second_pass;

static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
				      struct acpi_device **adev_p)
{
@@ -2050,10 +2048,8 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
			return AE_OK;

		/* Bail out if there are dependencies. */
		if (acpi_scan_check_dep(handle, check_dep) > 0) {
			acpi_bus_scan_second_pass = true;
		if (acpi_scan_check_dep(handle, check_dep) > 0)
			return AE_CTRL_DEPTH;
		}

		fallthrough;
	case ACPI_TYPE_ANY:	/* for ACPI_ROOT_OBJECT */
@@ -2301,6 +2297,12 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
	return true;
}

static void acpi_scan_delete_dep_data(struct acpi_dep_data *dep)
{
	list_del(&dep->node);
	kfree(dep);
}

static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
{
	struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer);
@@ -2311,8 +2313,10 @@ static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
			acpi_dev_put(adev);
	}

	list_del(&dep->node);
	kfree(dep);
	if (dep->free_when_met)
		acpi_scan_delete_dep_data(dep);
	else
		dep->met = true;

	return 0;
}
@@ -2406,6 +2410,55 @@ struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
}
EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev);

static void acpi_scan_postponed_branch(acpi_handle handle)
{
	struct acpi_device *adev = NULL;

	if (ACPI_FAILURE(acpi_bus_check_add(handle, false, &adev)))
		return;

	acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
			    acpi_bus_check_add_2, NULL, NULL, (void **)&adev);
	acpi_bus_attach(adev, NULL);
}

static void acpi_scan_postponed(void)
{
	struct acpi_dep_data *dep, *tmp;

	mutex_lock(&acpi_dep_list_lock);

	list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
		acpi_handle handle = dep->consumer;

		/*
		 * In case there are multiple acpi_dep_list entries with the
		 * same consumer, skip the current entry if the consumer device
		 * object corresponding to it is present already.
		 */
		if (!acpi_fetch_acpi_dev(handle)) {
			/*
			 * Even though the lock is released here, tmp is
			 * guaranteed to be valid, because none of the list
			 * entries following dep is marked as "free when met"
			 * and so they cannot be deleted.
			 */
			mutex_unlock(&acpi_dep_list_lock);

			acpi_scan_postponed_branch(handle);

			mutex_lock(&acpi_dep_list_lock);
		}

		if (dep->met)
			acpi_scan_delete_dep_data(dep);
		else
			dep->free_when_met = true;
	}

	mutex_unlock(&acpi_dep_list_lock);
}

/**
 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
 * @handle: Root of the namespace scope to scan.
@@ -2424,8 +2477,6 @@ int acpi_bus_scan(acpi_handle handle)
{
	struct acpi_device *device = NULL;

	acpi_bus_scan_second_pass = false;

	/* Pass 1: Avoid enumerating devices with missing dependencies. */

	if (ACPI_SUCCESS(acpi_bus_check_add(handle, true, &device)))
@@ -2438,19 +2489,9 @@ int acpi_bus_scan(acpi_handle handle)

	acpi_bus_attach(device, (void *)true);

	if (!acpi_bus_scan_second_pass)
		return 0;

	/* Pass 2: Enumerate all of the remaining devices. */

	device = NULL;

	if (ACPI_SUCCESS(acpi_bus_check_add(handle, false, &device)))
		acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
				    acpi_bus_check_add_2, NULL, NULL,
				    (void **)&device);

	acpi_bus_attach(device, NULL);
	acpi_scan_postponed();

	return 0;
}
+1 −1
Original line number Diff line number Diff line
@@ -848,7 +848,7 @@ void __weak acpi_s2idle_setup(void)
	s2idle_set_ops(&acpi_s2idle_ops);
}

static void acpi_sleep_suspend_setup(void)
static void __init acpi_sleep_suspend_setup(void)
{
	bool suspend_ops_needed = false;
	int i;
+7 −7
Original line number Diff line number Diff line
@@ -485,11 +485,11 @@ int acpi_s2idle_prepare_late(void)
					ACPI_LPS0_ENTRY,
					lps0_dsm_func_mask, lps0_dsm_guid);
	if (lps0_dsm_func_mask_microsoft > 0) {
		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
		/* modern standby entry */
		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_ENTRY,
				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
		acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY,
				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);
	}

	list_for_each_entry(handler, &lps0_s2idle_devops_head, list_node) {
@@ -524,11 +524,6 @@ void acpi_s2idle_restore_early(void)
		if (handler->restore)
			handler->restore();

	/* Modern standby exit */
	if (lps0_dsm_func_mask_microsoft > 0)
		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);

	/* LPS0 exit */
	if (lps0_dsm_func_mask > 0)
		acpi_sleep_run_lps0_dsm(acpi_s2idle_vendor_amd() ?
@@ -539,6 +534,11 @@ void acpi_s2idle_restore_early(void)
		acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT,
				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);

	/* Modern standby exit */
	if (lps0_dsm_func_mask_microsoft > 0)
		acpi_sleep_run_lps0_dsm(ACPI_LPS0_MS_EXIT,
				lps0_dsm_func_mask_microsoft, lps0_dsm_guid_microsoft);

	/* Screen on */
	if (lps0_dsm_func_mask_microsoft > 0)
		acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON,
Loading