Commit 6cc74443 authored by Dexuan Cui's avatar Dexuan Cui Committed by Jakub Kicinski
Browse files

net: mana: Add RX fencing



RX fencing allows the driver to know that any prior change to the RQs has
finished, e.g. when the RQs are disabled/enabled or the hashkey/indirection
table are changed, RX fencing is required.

Remove the previous workaround "ssleep(1)" and add the real support for
RX fencing as the PF driver supports the MANA_FENCE_RQ request now (any
old PF driver not supporting the request won't be used in production).

Signed-off-by: default avatarDexuan Cui <decui@microsoft.com>
Reviewed-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Link: https://lore.kernel.org/r/20211216001748.8751-1-decui@microsoft.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 431b9b4d
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -289,6 +289,8 @@ struct mana_rxq {

	struct mana_cq rx_cq;

	struct completion fence_event;

	struct net_device *ndev;

	/* Total number of receive buffers to be allocated */
+64 −5
Original line number Diff line number Diff line
@@ -750,6 +750,61 @@ static int mana_create_eq(struct mana_context *ac)
	return err;
}

static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
{
	struct mana_fence_rq_resp resp = {};
	struct mana_fence_rq_req req = {};
	int err;

	init_completion(&rxq->fence_event);

	mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
			     sizeof(req), sizeof(resp));
	req.wq_obj_handle =  rxq->rxobj;

	err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
				sizeof(resp));
	if (err) {
		netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
			   rxq->rxq_idx, err);
		return err;
	}

	err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
	if (err || resp.hdr.status) {
		netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
			   rxq->rxq_idx, err, resp.hdr.status);
		if (!err)
			err = -EPROTO;

		return err;
	}

	if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
		netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
			   rxq->rxq_idx);
		return -ETIMEDOUT;
	}

	return 0;
}

static void mana_fence_rqs(struct mana_port_context *apc)
{
	unsigned int rxq_idx;
	struct mana_rxq *rxq;
	int err;

	for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
		rxq = apc->rxqs[rxq_idx];
		err = mana_fence_rq(apc, rxq);

		/* In case of any error, use sleep instead. */
		if (err)
			msleep(100);
	}
}

static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
{
	u32 used_space_old;
@@ -1023,7 +1078,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
		return;

	case CQE_RX_OBJECT_FENCE:
		netdev_err(ndev, "RX Fencing is unsupported\n");
		complete(&rxq->fence_event);
		return;

	default:
@@ -1617,6 +1672,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
		    bool update_hash, bool update_tab)
{
	u32 queue_idx;
	int err;
	int i;

	if (update_tab) {
@@ -1626,7 +1682,13 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
		}
	}

	return mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
	err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
	if (err)
		return err;

	mana_fence_rqs(apc);

	return 0;
}

static int mana_init_port(struct net_device *ndev)
@@ -1773,9 +1835,6 @@ static int mana_dealloc_queues(struct net_device *ndev)
		return err;
	}

	/* TODO: Implement RX fencing */
	ssleep(1);

	mana_destroy_vport(apc);

	return 0;