Commit 180b0689 authored by Mika Westerberg's avatar Mika Westerberg
Browse files

thunderbolt: Allow multiple DMA tunnels over a single XDomain connection



Currently we have had an artificial limitation of a single DMA tunnel
per XDomain connection. However, hardware wise there is no such limit
and software based connection manager can take advantage of all the DMA
rings available on the host to establish tunnels.

For this reason make the tb_xdomain_[enable|disable]_paths() to take the
DMA ring and HopID as parameter instead of storing them in the struct
tb_xdomain. We also add API functions to allocate input and output
HopIDs of the XDomain connection that the service drivers can use
instead of hard-coding.

Also convert the two existing service drivers over to this API.

Signed-off-by: default avatarMika Westerberg <mika.westerberg@linux.intel.com>
parent 5cfdd300
Loading
Loading
Loading
Loading
+38 −11
Original line number Diff line number Diff line
@@ -28,7 +28,6 @@
#define TBNET_LOGOUT_TIMEOUT	100

#define TBNET_RING_SIZE		256
#define TBNET_LOCAL_PATH	0xf
#define TBNET_LOGIN_RETRIES	60
#define TBNET_LOGOUT_RETRIES	5
#define TBNET_MATCH_FRAGS_ID	BIT(1)
@@ -154,8 +153,8 @@ struct tbnet_ring {
 * @login_sent: ThunderboltIP login message successfully sent
 * @login_received: ThunderboltIP login message received from the remote
 *		    host
 * @transmit_path: HopID the other end needs to use building the
 *		   opposite side path.
 * @local_transmit_path: HopID we are using to send out packets
 * @remote_transmit_path: HopID the other end is using to send packets to us
 * @connection_lock: Lock serializing access to @login_sent,
 *		     @login_received and @transmit_path.
 * @login_retries: Number of login retries currently done
@@ -184,7 +183,8 @@ struct tbnet {
	atomic_t command_id;
	bool login_sent;
	bool login_received;
	u32 transmit_path;
	int local_transmit_path;
	int remote_transmit_path;
	struct mutex connection_lock;
	int login_retries;
	struct delayed_work login_work;
@@ -257,7 +257,7 @@ static int tbnet_login_request(struct tbnet *net, u8 sequence)
			  atomic_inc_return(&net->command_id));

	request.proto_version = TBIP_LOGIN_PROTO_VERSION;
	request.transmit_path = TBNET_LOCAL_PATH;
	request.transmit_path = net->local_transmit_path;

	return tb_xdomain_request(xd, &request, sizeof(request),
				  TB_CFG_PKG_XDOMAIN_RESP, &reply,
@@ -364,10 +364,10 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
	mutex_lock(&net->connection_lock);

	if (net->login_sent && net->login_received) {
		int retries = TBNET_LOGOUT_RETRIES;
		int ret, retries = TBNET_LOGOUT_RETRIES;

		while (send_logout && retries-- > 0) {
			int ret = tbnet_logout_request(net);
			ret = tbnet_logout_request(net);
			if (ret != -ETIMEDOUT)
				break;
		}
@@ -377,8 +377,16 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
		tbnet_free_buffers(&net->rx_ring);
		tbnet_free_buffers(&net->tx_ring);

		if (tb_xdomain_disable_paths(net->xd))
		ret = tb_xdomain_disable_paths(net->xd,
					       net->local_transmit_path,
					       net->rx_ring.ring->hop,
					       net->remote_transmit_path,
					       net->tx_ring.ring->hop);
		if (ret)
			netdev_warn(net->dev, "failed to disable DMA paths\n");

		tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
		net->remote_transmit_path = 0;
	}

	net->login_retries = 0;
@@ -424,7 +432,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
		if (!ret) {
			mutex_lock(&net->connection_lock);
			net->login_received = true;
			net->transmit_path = pkg->transmit_path;
			net->remote_transmit_path = pkg->transmit_path;

			/* If we reached the number of max retries or
			 * previous logout, schedule another round of
@@ -597,12 +605,18 @@ static void tbnet_connected_work(struct work_struct *work)
	if (!connected)
		return;

	ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
	if (ret != net->remote_transmit_path) {
		netdev_err(net->dev, "failed to allocate Rx HopID\n");
		return;
	}

	/* Both logins successful so enable the high-speed DMA paths and
	 * start the network device queue.
	 */
	ret = tb_xdomain_enable_paths(net->xd, TBNET_LOCAL_PATH,
	ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
				      net->rx_ring.ring->hop,
				      net->transmit_path,
				      net->remote_transmit_path,
				      net->tx_ring.ring->hop);
	if (ret) {
		netdev_err(net->dev, "failed to enable DMA paths\n");
@@ -629,6 +643,7 @@ static void tbnet_connected_work(struct work_struct *work)
err_stop_rings:
	tb_ring_stop(net->rx_ring.ring);
	tb_ring_stop(net->tx_ring.ring);
	tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
}

static void tbnet_login_work(struct work_struct *work)
@@ -851,6 +866,7 @@ static int tbnet_open(struct net_device *dev)
	struct tb_xdomain *xd = net->xd;
	u16 sof_mask, eof_mask;
	struct tb_ring *ring;
	int hopid;

	netif_carrier_off(dev);

@@ -862,6 +878,15 @@ static int tbnet_open(struct net_device *dev)
	}
	net->tx_ring.ring = ring;

	hopid = tb_xdomain_alloc_out_hopid(xd, -1);
	if (hopid < 0) {
		netdev_err(dev, "failed to allocate Tx HopID\n");
		tb_ring_free(net->tx_ring.ring);
		net->tx_ring.ring = NULL;
		return hopid;
	}
	net->local_transmit_path = hopid;

	sof_mask = BIT(TBIP_PDF_FRAME_START);
	eof_mask = BIT(TBIP_PDF_FRAME_END);

@@ -893,6 +918,8 @@ static int tbnet_stop(struct net_device *dev)

	tb_ring_free(net->rx_ring.ring);
	net->rx_ring.ring = NULL;

	tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path);
	tb_ring_free(net->tx_ring.ring);
	net->tx_ring.ring = NULL;

+31 −4
Original line number Diff line number Diff line
@@ -13,7 +13,6 @@
#include <linux/sizes.h>
#include <linux/thunderbolt.h>

#define DMA_TEST_HOPID			8
#define DMA_TEST_TX_RING_SIZE		64
#define DMA_TEST_RX_RING_SIZE		256
#define DMA_TEST_FRAME_SIZE		SZ_4K
@@ -72,7 +71,9 @@ static const char * const dma_test_result_names[] = {
 * @svc: XDomain service the driver is bound to
 * @xd: XDomain the service belongs to
 * @rx_ring: Software ring holding RX frames
 * @rx_hopid: HopID used for receiving frames
 * @tx_ring: Software ring holding TX frames
 * @tx_hopid: HopID used for sending fames
 * @packets_to_send: Number of packets to send
 * @packets_to_receive: Number of packets to receive
 * @packets_sent: Actual number of packets sent
@@ -92,7 +93,9 @@ struct dma_test {
	const struct tb_service *svc;
	struct tb_xdomain *xd;
	struct tb_ring *rx_ring;
	int rx_hopid;
	struct tb_ring *tx_ring;
	int tx_hopid;
	unsigned int packets_to_send;
	unsigned int packets_to_receive;
	unsigned int packets_sent;
@@ -119,10 +122,12 @@ static void *dma_test_pattern;
static void dma_test_free_rings(struct dma_test *dt)
{
	if (dt->rx_ring) {
		tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid);
		tb_ring_free(dt->rx_ring);
		dt->rx_ring = NULL;
	}
	if (dt->tx_ring) {
		tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid);
		tb_ring_free(dt->tx_ring);
		dt->tx_ring = NULL;
	}
@@ -151,6 +156,14 @@ static int dma_test_start_rings(struct dma_test *dt)

		dt->tx_ring = ring;
		e2e_tx_hop = ring->hop;

		ret = tb_xdomain_alloc_out_hopid(xd, -1);
		if (ret < 0) {
			dma_test_free_rings(dt);
			return ret;
		}

		dt->tx_hopid = ret;
	}

	if (dt->packets_to_receive) {
@@ -168,11 +181,19 @@ static int dma_test_start_rings(struct dma_test *dt)
		}

		dt->rx_ring = ring;

		ret = tb_xdomain_alloc_in_hopid(xd, -1);
		if (ret < 0) {
			dma_test_free_rings(dt);
			return ret;
		}

	ret = tb_xdomain_enable_paths(dt->xd, DMA_TEST_HOPID,
		dt->rx_hopid = ret;
	}

	ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
				      dt->tx_ring ? dt->tx_ring->hop : 0,
				      DMA_TEST_HOPID,
				      dt->rx_hopid,
				      dt->rx_ring ? dt->rx_ring->hop : 0);
	if (ret) {
		dma_test_free_rings(dt);
@@ -189,12 +210,18 @@ static int dma_test_start_rings(struct dma_test *dt)

static void dma_test_stop_rings(struct dma_test *dt)
{
	int ret;

	if (dt->rx_ring)
		tb_ring_stop(dt->rx_ring);
	if (dt->tx_ring)
		tb_ring_stop(dt->tx_ring);

	if (tb_xdomain_disable_paths(dt->xd))
	ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
				       dt->tx_ring ? dt->tx_ring->hop : 0,
				       dt->rx_hopid,
				       dt->rx_ring ? dt->rx_ring->hop : 0);
	if (ret)
		dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");

	dma_test_free_rings(dt);
+19 −5
Original line number Diff line number Diff line
@@ -791,6 +791,10 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
 * @tb: Domain enabling the DMA paths
 * @xd: XDomain DMA paths are created to
 * @transmit_path: HopID we are using to send out packets
 * @transmit_ring: DMA ring used to send out packets
 * @receive_path: HopID the other end is using to send packets to us
 * @receive_ring: DMA ring used to receive packets from @receive_path
 *
 * Calls connection manager specific method to enable DMA paths to the
 * XDomain in question.
@@ -799,18 +803,25 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
 * particular returns %-ENOTSUPP if the connection manager
 * implementation does not support XDomains.
 */
int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
				    int transmit_path, int transmit_ring,
				    int receive_path, int receive_ring)
{
	if (!tb->cm_ops->approve_xdomain_paths)
		return -ENOTSUPP;

	return tb->cm_ops->approve_xdomain_paths(tb, xd);
	return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
			transmit_ring, receive_path, receive_ring);
}

/**
 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
 * @tb: Domain disabling the DMA paths
 * @xd: XDomain whose DMA paths are disconnected
 * @transmit_path: HopID we are using to send out packets
 * @transmit_ring: DMA ring used to send out packets
 * @receive_path: HopID the other end is using to send packets to us
 * @receive_ring: DMA ring used to receive packets from @receive_path
 *
 * Calls connection manager specific method to disconnect DMA paths to
 * the XDomain in question.
@@ -819,12 +830,15 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
 * particular returns %-ENOTSUPP if the connection manager
 * implementation does not support XDomains.
 */
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
				       int transmit_path, int transmit_ring,
				       int receive_path, int receive_ring)
{
	if (!tb->cm_ops->disconnect_xdomain_paths)
		return -ENOTSUPP;

	return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
	return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
			transmit_ring, receive_path, receive_ring);
}

static int disconnect_xdomain(struct device *dev, void *data)
@@ -835,7 +849,7 @@ static int disconnect_xdomain(struct device *dev, void *data)

	xd = tb_to_xdomain(dev);
	if (xd && xd->tb == tb)
		ret = tb_xdomain_disable_paths(xd);
		ret = tb_xdomain_disable_all_paths(xd);

	return ret;
}
+20 −12
Original line number Diff line number Diff line
@@ -557,7 +557,9 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
	return 0;
}

static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
					int transmit_path, int transmit_ring,
					int receive_path, int receive_ring)
{
	struct icm_fr_pkg_approve_xdomain_response reply;
	struct icm_fr_pkg_approve_xdomain request;
@@ -568,10 +570,10 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
	request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));

	request.transmit_path = xd->transmit_path;
	request.transmit_ring = xd->transmit_ring;
	request.receive_path = xd->receive_path;
	request.receive_ring = xd->receive_ring;
	request.transmit_path = transmit_path;
	request.transmit_ring = transmit_ring;
	request.receive_path = receive_path;
	request.receive_ring = receive_ring;

	memset(&reply, 0, sizeof(reply));
	ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
@@ -585,7 +587,9 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
	return 0;
}

static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
					   int transmit_path, int transmit_ring,
					   int receive_path, int receive_ring)
{
	u8 phy_port;
	u8 cmd;
@@ -1122,7 +1126,9 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
	return 0;
}

static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
					int transmit_path, int transmit_ring,
					int receive_path, int receive_ring)
{
	struct icm_tr_pkg_approve_xdomain_response reply;
	struct icm_tr_pkg_approve_xdomain request;
@@ -1132,10 +1138,10 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
	request.hdr.code = ICM_APPROVE_XDOMAIN;
	request.route_hi = upper_32_bits(xd->route);
	request.route_lo = lower_32_bits(xd->route);
	request.transmit_path = xd->transmit_path;
	request.transmit_ring = xd->transmit_ring;
	request.receive_path = xd->receive_path;
	request.receive_ring = xd->receive_ring;
	request.transmit_path = transmit_path;
	request.transmit_ring = transmit_ring;
	request.receive_path = receive_path;
	request.receive_ring = receive_ring;
	memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));

	memset(&reply, 0, sizeof(reply));
@@ -1176,7 +1182,9 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
	return 0;
}

static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
					   int transmit_path, int transmit_ring,
					   int receive_path, int receive_ring)
{
	int ret;

+30 −18
Original line number Diff line number Diff line
@@ -1075,7 +1075,9 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
	return 0;
}

static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
				    int transmit_path, int transmit_ring,
				    int receive_path, int receive_ring)
{
	struct tb_cm *tcm = tb_priv(tb);
	struct tb_port *nhi_port, *dst_port;
@@ -1087,9 +1089,8 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);

	mutex_lock(&tb->lock);
	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
				     xd->transmit_path, xd->receive_ring,
				     xd->receive_path);
	tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
				     transmit_ring, receive_path, receive_ring);
	if (!tunnel) {
		mutex_unlock(&tb->lock);
		return -ENOMEM;
@@ -1108,29 +1109,40 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
	return 0;
}

static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
					  int transmit_path, int transmit_ring,
					  int receive_path, int receive_ring)
{
	struct tb_port *dst_port;
	struct tb_tunnel *tunnel;
	struct tb_cm *tcm = tb_priv(tb);
	struct tb_port *nhi_port, *dst_port;
	struct tb_tunnel *tunnel, *n;
	struct tb_switch *sw;

	sw = tb_to_switch(xd->dev.parent);
	dst_port = tb_port_at(xd->route, sw);
	nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);

	/*
	 * It is possible that the tunnel was already teared down (in
	 * case of cable disconnect) so it is fine if we cannot find it
	 * here anymore.
	 */
	tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
	list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
		if (!tb_tunnel_is_dma(tunnel))
			continue;
		if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
			continue;

		if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
					receive_path, receive_ring))
			tb_deactivate_and_free_tunnel(tunnel);
	}
}

static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
				       int transmit_path, int transmit_ring,
				       int receive_path, int receive_ring)
{
	if (!xd->is_unplugged) {
		mutex_lock(&tb->lock);
		__tb_disconnect_xdomain_paths(tb, xd);
		__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
					      transmit_ring, receive_path,
					      receive_ring);
		mutex_unlock(&tb->lock);
	}
	return 0;
@@ -1206,12 +1218,12 @@ static void tb_handle_hotplug(struct work_struct *work)
			 * tb_xdomain_remove() so setting XDomain as
			 * unplugged here prevents deadlock if they call
			 * tb_xdomain_disable_paths(). We will tear down
			 * the path below.
			 * all the tunnels below.
			 */
			xd->is_unplugged = true;
			tb_xdomain_remove(xd);
			port->xdomain = NULL;
			__tb_disconnect_xdomain_paths(tb, xd);
			__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
			tb_xdomain_put(xd);
			tb_port_unconfigure_xdomain(port);
		} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
Loading