Commit bbefb7ed authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman
Browse files

Merge tag 'thunderbolt-for-v6.3-rc4' of...

Merge tag 'thunderbolt-for-v6.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-linus

Mika writes:

thunderbolt: Fixes for v6.3-rc4

This includes following fixes and quirks for v6.3-rc:

  - Quirk to disable CL-states on AMD USB4 host routers
  - Fix memory leak in lane margining
  - Correct the retimer access flows
  - Quirk to limit USB3 bandwidth on certain Intel USB4 host routers
  - Fix usage of scale field when allocting USB3 bandwidth
  - Fix interrupt "auto clear" on non-Intel USB4 host routers.

There are also two commits that are not fixes themselves but are needed
for the USB3 bandwidth quirk and for the interrupt auto clear fix to
work.

All these have been in linux-next with no reported issues.

* tag 'thunderbolt-for-v6.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt:
  thunderbolt: Rename shadowed variables bit to interrupt_bit and auto_clear_bit
  thunderbolt: Disable interrupt auto clear for rings
  thunderbolt: Use const qualifier for `ring_interrupt_index`
  thunderbolt: Use scale field when allocating USB3 bandwidth
  thunderbolt: Limit USB3 bandwidth of certain Intel USB4 host routers
  thunderbolt: Call tb_check_quirks() after initializing adapters
  thunderbolt: Add missing UNSET_INBOUND_SBTX for retimer access
  thunderbolt: Fix memory leak in margining
  thunderbolt: Add quirk to disable CLx
parents bbf860ed 58cdfe6f
Loading
Loading
Loading
Loading
+6 −6
Original line number Diff line number Diff line
@@ -942,6 +942,7 @@ static void margining_port_remove(struct tb_port *port)

	snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
	parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
	if (parent)
		debugfs_remove_recursive(debugfs_lookup("margining", parent));

	kfree(port->usb4->margining);
@@ -967,19 +968,18 @@ static void margining_switch_init(struct tb_switch *sw)

static void margining_switch_remove(struct tb_switch *sw)
{
	struct tb_port *upstream, *downstream;
	struct tb_switch *parent_sw;
	struct tb_port *downstream;
	u64 route = tb_route(sw);

	if (!route)
		return;

	/*
	 * Upstream is removed with the router itself but we need to
	 * remove the downstream port margining directory.
	 */
	upstream = tb_upstream_port(sw);
	parent_sw = tb_switch_parent(sw);
	downstream = tb_port_at(route, parent_sw);

	margining_port_remove(upstream);
	margining_port_remove(downstream);
}

+30 −19
Original line number Diff line number Diff line
@@ -46,7 +46,7 @@
#define QUIRK_AUTO_CLEAR_INT	BIT(0)
#define QUIRK_E2E		BIT(1)

static int ring_interrupt_index(struct tb_ring *ring)
static int ring_interrupt_index(const struct tb_ring *ring)
{
	int bit = ring->hop;
	if (!ring->is_tx)
@@ -63,13 +63,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
{
	int reg = REG_RING_INTERRUPT_BASE +
		  ring_interrupt_index(ring) / 32 * 4;
	int bit = ring_interrupt_index(ring) & 31;
	int mask = 1 << bit;
	int interrupt_bit = ring_interrupt_index(ring) & 31;
	int mask = 1 << interrupt_bit;
	u32 old, new;

	if (ring->irq > 0) {
		u32 step, shift, ivr, misc;
		void __iomem *ivr_base;
		int auto_clear_bit;
		int index;

		if (ring->is_tx)
@@ -77,18 +78,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
		else
			index = ring->hop + ring->nhi->hop_count;

		if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
		/*
			 * Ask the hardware to clear interrupt status
			 * bits automatically since we already know
			 * which interrupt was triggered.
		 * Intel routers support a bit that isn't part of
		 * the USB4 spec to ask the hardware to clear
		 * interrupt status bits automatically since
		 * we already know which interrupt was triggered.
		 *
		 * Other routers explicitly disable auto-clear
		 * to prevent conditions that may occur where two
		 * MSIX interrupts are simultaneously active and
		 * reading the register clears both of them.
		 */
		misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
			if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
				misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
				iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
			}
		}
		if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
			auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
		else
			auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
		if (!(misc & auto_clear_bit))
			iowrite32(misc | auto_clear_bit,
				  ring->nhi->iobase + REG_DMA_MISC);

		ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
		step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
@@ -108,7 +116,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)

	dev_dbg(&ring->nhi->pdev->dev,
		"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
		active ? "enabling" : "disabling", reg, bit, old, new);
		active ? "enabling" : "disabling", reg, interrupt_bit, old, new);

	if (new == old)
		dev_WARN(&ring->nhi->pdev->dev,
@@ -393,13 +401,16 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete);

static void ring_clear_msix(const struct tb_ring *ring)
{
	int bit;

	if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
		return;

	bit = ring_interrupt_index(ring) & 31;
	if (ring->is_tx)
		ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
		iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
	else
		ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
		iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
			  4 * (ring->nhi->hop_count / 32));
}

+4 −2
Original line number Diff line number Diff line
@@ -77,12 +77,13 @@ struct ring_desc {

/*
 * three bitfields: tx, rx, rx overflow
 * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
 * cleared on read. New interrupts are fired only after ALL registers have been
 * Every bitfield contains one bit for every hop (REG_HOP_COUNT).
 * New interrupts are fired only after ALL registers have been
 * read (even those containing only disabled rings).
 */
#define REG_RING_NOTIFY_BASE	0x37800
#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
#define REG_RING_INT_CLEAR	0x37808

/*
 * two bitfields: rx, tx
@@ -105,6 +106,7 @@ struct ring_desc {

#define REG_DMA_MISC			0x39864
#define REG_DMA_MISC_INT_AUTO_CLEAR     BIT(2)
#define REG_DMA_MISC_DISABLE_AUTO_CLEAR	BIT(17)

#define REG_INMAIL_DATA			0x39900

+44 −0
Original line number Diff line number Diff line
@@ -20,6 +20,25 @@ static void quirk_dp_credit_allocation(struct tb_switch *sw)
	}
}

static void quirk_clx_disable(struct tb_switch *sw)
{
	sw->quirks |= QUIRK_NO_CLX;
	tb_sw_dbg(sw, "disabling CL states\n");
}

static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
{
	struct tb_port *port;

	tb_switch_for_each_port(sw, port) {
		if (!tb_port_is_usb3_down(port))
			continue;
		port->max_bw = 16376;
		tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n",
			    port->max_bw);
	}
}

struct tb_quirk {
	u16 hw_vendor_id;
	u16 hw_device_id;
@@ -37,6 +56,31 @@ static const struct tb_quirk tb_quirks[] = {
	 * DP buffers.
	 */
	{ 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
	/*
	 * Limit the maximum USB3 bandwidth for the following Intel USB4
	 * host routers due to a hardware issue.
	 */
	{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000,
		  quirk_usb3_maximum_bandwidth },
	{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000,
		  quirk_usb3_maximum_bandwidth },
	{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000,
		  quirk_usb3_maximum_bandwidth },
	{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000,
		  quirk_usb3_maximum_bandwidth },
	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000,
		  quirk_usb3_maximum_bandwidth },
	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000,
		  quirk_usb3_maximum_bandwidth },
	{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
		  quirk_usb3_maximum_bandwidth },
	/*
	 * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
	 */
	{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
	{ 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable },
	{ 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable },
	{ 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable },
};

/**
+21 −2
Original line number Diff line number Diff line
@@ -187,6 +187,22 @@ static ssize_t nvm_authenticate_show(struct device *dev,
	return ret;
}

static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
{
	int i;

	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
		usb4_port_retimer_set_inbound_sbtx(port, i);
}

static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
{
	int i;

	for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
		usb4_port_retimer_unset_inbound_sbtx(port, i);
}

static ssize_t nvm_authenticate_store(struct device *dev,
	struct device_attribute *attr, const char *buf, size_t count)
{
@@ -213,6 +229,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
	rt->auth_status = 0;

	if (val) {
		tb_retimer_set_inbound_sbtx(rt->port);
		if (val == AUTHENTICATE_ONLY) {
			ret = tb_retimer_nvm_authenticate(rt, true);
		} else {
@@ -232,6 +249,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
	}

exit_unlock:
	tb_retimer_unset_inbound_sbtx(rt->port);
	mutex_unlock(&rt->tb->lock);
exit_rpm:
	pm_runtime_mark_last_busy(&rt->dev);
@@ -440,8 +458,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
	 * Enable sideband channel for each retimer. We can do this
	 * regardless whether there is device connected or not.
	 */
	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
		usb4_port_retimer_set_inbound_sbtx(port, i);
	tb_retimer_set_inbound_sbtx(port);

	/*
	 * Before doing anything else, read the authentication status.
@@ -464,6 +481,8 @@ int tb_retimer_scan(struct tb_port *port, bool add)
			break;
	}

	tb_retimer_unset_inbound_sbtx(port);

	if (!last_idx)
		return 0;

Loading