Commit 95f8f1cb authored by Mika Westerberg's avatar Mika Westerberg
Browse files

thunderbolt: Move port CL state functions into correct place in switch.c



They should be close to other functions dealing with USB4 ports. No
functional impact.

Signed-off-by: default avatarMika Westerberg <mika.westerberg@linux.intel.com>
parent 7f333ace
Loading
Loading
Loading
Loading
+106 −106
Original line number Diff line number Diff line
@@ -1229,6 +1229,112 @@ int tb_port_update_credits(struct tb_port *port)
	return tb_port_do_update_credits(port->dual_link_port);
}

static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
{
	u32 phy;
	int ret;

	ret = tb_port_read(port, &phy, TB_CFG_PORT,
			   port->cap_phy + LANE_ADP_CS_1, 1);
	if (ret)
		return ret;

	if (secondary)
		phy |= LANE_ADP_CS_1_PMS;
	else
		phy &= ~LANE_ADP_CS_1_PMS;

	return tb_port_write(port, &phy, TB_CFG_PORT,
			     port->cap_phy + LANE_ADP_CS_1, 1);
}

static int tb_port_pm_secondary_enable(struct tb_port *port)
{
	return __tb_port_pm_secondary_set(port, true);
}

static int tb_port_pm_secondary_disable(struct tb_port *port)
{
	return __tb_port_pm_secondary_set(port, false);
}

/* Called for USB4 or Titan Ridge routers only */
static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
{
	u32 mask, val;
	bool ret;

	/* Don't enable CLx in case of two single-lane links */
	if (!port->bonded && port->dual_link_port)
		return false;

	/* Don't enable CLx in case of inter-domain link */
	if (port->xdomain)
		return false;

	if (tb_switch_is_usb4(port->sw)) {
		if (!usb4_port_clx_supported(port))
			return false;
	} else if (!tb_lc_is_clx_supported(port)) {
		return false;
	}

	switch (clx) {
	case TB_CL1:
		/* CL0s and CL1 are enabled and supported together */
		mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
		break;

	/* For now we support only CL0s and CL1. Not CL2 */
	case TB_CL2:
	default:
		return false;
	}

	ret = tb_port_read(port, &val, TB_CFG_PORT,
			   port->cap_phy + LANE_ADP_CS_0, 1);
	if (ret)
		return false;

	return !!(val & mask);
}

static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
{
	u32 phy, mask;
	int ret;

	/* CL0s and CL1 are enabled and supported together */
	if (clx == TB_CL1)
		mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
	else
		/* For now we support only CL0s and CL1. Not CL2 */
		return -EOPNOTSUPP;

	ret = tb_port_read(port, &phy, TB_CFG_PORT,
			   port->cap_phy + LANE_ADP_CS_1, 1);
	if (ret)
		return ret;

	if (enable)
		phy |= mask;
	else
		phy &= ~mask;

	return tb_port_write(port, &phy, TB_CFG_PORT,
			     port->cap_phy + LANE_ADP_CS_1, 1);
}

static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
{
	return __tb_port_clx_set(port, clx, false);
}

static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
{
	return __tb_port_clx_set(port, clx, true);
}

static int tb_port_start_lane_initialization(struct tb_port *port)
{
	int ret;
@@ -3361,35 +3467,6 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
	return NULL;
}

static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
{
	u32 phy;
	int ret;

	ret = tb_port_read(port, &phy, TB_CFG_PORT,
			   port->cap_phy + LANE_ADP_CS_1, 1);
	if (ret)
		return ret;

	if (secondary)
		phy |= LANE_ADP_CS_1_PMS;
	else
		phy &= ~LANE_ADP_CS_1_PMS;

	return tb_port_write(port, &phy, TB_CFG_PORT,
			     port->cap_phy + LANE_ADP_CS_1, 1);
}

static int tb_port_pm_secondary_enable(struct tb_port *port)
{
	return __tb_port_pm_secondary_set(port, true);
}

static int tb_port_pm_secondary_disable(struct tb_port *port)
{
	return __tb_port_pm_secondary_set(port, false);
}

static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
{
	struct tb_switch *parent = tb_switch_parent(sw);
@@ -3408,83 +3485,6 @@ static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
	return tb_port_pm_secondary_disable(down);
}

/* Called for USB4 or Titan Ridge routers only */
static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
{
	u32 mask, val;
	bool ret;

	/* Don't enable CLx in case of two single-lane links */
	if (!port->bonded && port->dual_link_port)
		return false;

	/* Don't enable CLx in case of inter-domain link */
	if (port->xdomain)
		return false;

	if (tb_switch_is_usb4(port->sw)) {
		if (!usb4_port_clx_supported(port))
			return false;
	} else if (!tb_lc_is_clx_supported(port)) {
		return false;
	}

	switch (clx) {
	case TB_CL1:
		/* CL0s and CL1 are enabled and supported together */
		mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
		break;

	/* For now we support only CL0s and CL1. Not CL2 */
	case TB_CL2:
	default:
		return false;
	}

	ret = tb_port_read(port, &val, TB_CFG_PORT,
			   port->cap_phy + LANE_ADP_CS_0, 1);
	if (ret)
		return false;

	return !!(val & mask);
}

static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable)
{
	u32 phy, mask;
	int ret;

	/* CL0s and CL1 are enabled and supported together */
	if (clx == TB_CL1)
		mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
	else
		/* For now we support only CL0s and CL1. Not CL2 */
		return -EOPNOTSUPP;

	ret = tb_port_read(port, &phy, TB_CFG_PORT,
			   port->cap_phy + LANE_ADP_CS_1, 1);
	if (ret)
		return ret;

	if (enable)
		phy |= mask;
	else
		phy &= ~mask;

	return tb_port_write(port, &phy, TB_CFG_PORT,
			     port->cap_phy + LANE_ADP_CS_1, 1);
}

static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx)
{
	return __tb_port_clx_set(port, clx, false);
}

static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx)
{
	return __tb_port_clx_set(port, clx, true);
}

static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
{
	struct tb_switch *parent = tb_switch_parent(sw);