Commit b601135e authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ENETC-mqprio-taprio-cleanup'

Vladimir Oltean says:

====================
net: ENETC mqprio/taprio cleanup

Please excuse the increased patch set size compared to v4's 15 patches,
but Claudiu stirred up the pot :) when he pointed out that the mqprio
TXQ validation procedure is still incorrect, so I had to fix that, and
then do some consolidation work so that taprio doesn't duplicate
mqprio's bugs. Compared to v4, 3 patches are new and 1 was dropped for now
("net/sched: taprio: mask off bits in gate mask that exceed number of TCs"),
since there's not really much to gain from it. Since the previous patch
set has largely been reviewed, I hope that a delta overview will help
and make up for the large size.

v4->v5:
- new patches:
  "[08/17] net/sched: mqprio: allow reverse TC:TXQ mappings"
  "[11/17] net/sched: taprio: centralize mqprio qopt validation"
  "[12/17] net/sched: refactor mqprio qopt reconstruction to a library function"
- changed patches worth revisiting:
  "[09/17] net/sched: mqprio: allow offloading drivers to request queue
  count validation"
v4 at:
https://patchwork.kernel.org/project/netdevbpf/cover/20230130173145.475943-1-vladimir.oltean@nxp.com/

v3->v4:
- adjusted patch 07/15 to not remove "#include <net/pkt_sched.h>" from
  ti cpsw
https://patchwork.kernel.org/project/netdevbpf/cover/20230127001516.592984-1-vladimir.oltean@nxp.com/

v2->v3:
- move min_num_stack_tx_queues definition so it doesn't conflict with
  the ethtool mm patches I haven't submitted yet for enetc (and also to
  make use of a 4 byte hole)
- warn and mask off excess TCs in gate mask instead of failing
- finally CC qdisc maintainers
v2 at:
https://patchwork.kernel.org/project/netdevbpf/patch/20230126125308.1199404-16-vladimir.oltean@nxp.com/

v1->v2:
- patches 1->4 are new
- update some header inclusions in drivers
- fix typo (said "taprio" instead of "mqprio")
- better enetc mqprio error handling
- dynamically reconstruct mqprio configuration in taprio offload
- also let stmmac and tsnep use per-TXQ gate_mask
v1 (RFC) at:
https://patchwork.kernel.org/project/netdevbpf/cover/20230120141537.1350744-1-vladimir.oltean@nxp.com/



The main goal of this patch set is to make taprio pass the mqprio queue
configuration structure down to ndo_setup_tc() - patch 13/17. But mqprio
itself is not in the best shape currently, so there are some
consolidation patches on that as well.

Next, there are some consolidation patches in the enetc driver's
handling of TX queues and their traffic class assignment. Then, there is
a consolidation between the TX queue configuration for mqprio and
taprio.

Finally, there is a change in the meaning of the gate_mask passed by
taprio through ndo_setup_tc(). We introduce a capability through which
drivers can request the gate mask to be per TXQ. The default is changed
so that it is per TC.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 84371145 06b1c911
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -21,6 +21,7 @@
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
#include <net/pkt_sched.h>
#include <linux/filter.h>

MODULE_LICENSE("GPL v2");
+1 −1
Original line number Diff line number Diff line
@@ -4,7 +4,7 @@
#ifndef __CXGB4_TC_MQPRIO_H__
#define __CXGB4_TC_MQPRIO_H__

#include <net/pkt_cls.h>
#include <net/pkt_sched.h>

#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128

+21 −0
Original line number Diff line number Diff line
@@ -403,12 +403,33 @@ static int tsnep_taprio(struct tsnep_adapter *adapter,
	return 0;
}

static int tsnep_tc_query_caps(struct tsnep_adapter *adapter,
			       struct tc_query_caps_base *base)
{
	switch (base->type) {
	case TC_SETUP_QDISC_TAPRIO: {
		struct tc_taprio_caps *caps = base->caps;

		if (!adapter->gate_control)
			return -EOPNOTSUPP;

		caps->gate_mask_per_txq = true;

		return 0;
	}
	default:
		return -EOPNOTSUPP;
	}
}

int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
		   void *type_data)
{
	struct tsnep_adapter *adapter = netdev_priv(netdev);

	switch (type) {
	case TC_QUERY_CAPS:
		return tsnep_tc_query_caps(adapter, type_data);
	case TC_SETUP_QDISC_TAPRIO:
		return tsnep_taprio(adapter, type_data);
	default:
+69 −37
Original line number Diff line number Diff line
@@ -2609,21 +2609,25 @@ static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
	return err;
}

int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv)
{
	int i;

	for (i = 0; i < priv->num_tx_rings; i++)
		netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i,
			   priv->tx_ring[i]->prio);
}

static void enetc_reset_tc_mqprio(struct net_device *ndev)
{
	struct enetc_ndev_priv *priv = netdev_priv(ndev);
	struct tc_mqprio_qopt *mqprio = type_data;
	struct enetc_hw *hw = &priv->si->hw;
	struct enetc_bdr *tx_ring;
	int num_stack_tx_queues;
	u8 num_tc;
	int i;

	num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
	num_tc = mqprio->num_tc;

	if (!num_tc) {
	netdev_reset_tc(ndev);
	netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
	priv->min_num_stack_tx_queues = num_possible_cpus();
@@ -2635,37 +2639,65 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
		enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
	}

		return 0;
	enetc_debug_tx_ring_prios(priv);
}

	/* Check if we have enough BD rings available to accommodate all TCs */
	if (num_tc > num_stack_tx_queues) {
		netdev_err(ndev, "Max %d traffic classes supported\n",
			   priv->num_tx_rings);
		return -EINVAL;
int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
	struct enetc_ndev_priv *priv = netdev_priv(ndev);
	struct tc_mqprio_qopt *mqprio = type_data;
	struct enetc_hw *hw = &priv->si->hw;
	int num_stack_tx_queues = 0;
	u8 num_tc = mqprio->num_tc;
	struct enetc_bdr *tx_ring;
	int offset, count;
	int err, tc, q;

	if (!num_tc) {
		enetc_reset_tc_mqprio(ndev);
		return 0;
	}

	/* For the moment, we use only one BD ring per TC.
	 *
	 * Configure num_tc BD rings with increasing priorities.
	err = netdev_set_num_tc(ndev, num_tc);
	if (err)
		return err;

	for (tc = 0; tc < num_tc; tc++) {
		offset = mqprio->offset[tc];
		count = mqprio->count[tc];
		num_stack_tx_queues += count;

		err = netdev_set_tc_queue(ndev, tc, count, offset);
		if (err)
			goto err_reset_tc;

		for (q = offset; q < offset + count; q++) {
			tx_ring = priv->tx_ring[q];
			/* The prio_tc_map is skb_tx_hash()'s way of selecting
			 * between TX queues based on skb->priority. As such,
			 * there's nothing to offload based on it.
			 * Make the mqprio "traffic class" be the priority of
			 * this ring group, and leave the Tx IPV to traffic
			 * class mapping as its default mapping value of 1:1.
			 */
	for (i = 0; i < num_tc; i++) {
		tx_ring = priv->tx_ring[i];
		tx_ring->prio = i;
			tx_ring->prio = tc;
			enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
		}
	}

	/* Reset the number of netdev queues based on the TC count */
	netif_set_real_num_tx_queues(ndev, num_tc);
	priv->min_num_stack_tx_queues = num_tc;
	err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
	if (err)
		goto err_reset_tc;

	netdev_set_num_tc(ndev, num_tc);
	priv->min_num_stack_tx_queues = num_stack_tx_queues;

	/* Each TC is associated with one netdev queue */
	for (i = 0; i < num_tc; i++)
		netdev_set_tc_queue(ndev, i, 1, i);
	enetc_debug_tx_ring_prios(priv);

	return 0;

err_reset_tc:
	enetc_reset_tc_mqprio(ndev);
	return err;
}
EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);

+13 −14
Original line number Diff line number Diff line
@@ -136,29 +136,21 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
{
	struct tc_taprio_qopt_offload *taprio = type_data;
	struct enetc_ndev_priv *priv = netdev_priv(ndev);
	struct enetc_hw *hw = &priv->si->hw;
	struct enetc_bdr *tx_ring;
	int err;
	int i;
	int err, i;

	/* TSD and Qbv are mutually exclusive in hardware */
	for (i = 0; i < priv->num_tx_rings; i++)
		if (priv->tx_ring[i]->tsd_enable)
			return -EBUSY;

	for (i = 0; i < priv->num_tx_rings; i++) {
		tx_ring = priv->tx_ring[i];
		tx_ring->prio = taprio->enable ? i : 0;
		enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
	}
	err = enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
	if (err)
		return err;

	err = enetc_setup_taprio(ndev, taprio);
	if (err) {
		for (i = 0; i < priv->num_tx_rings; i++) {
			tx_ring = priv->tx_ring[i];
			tx_ring->prio = taprio->enable ? 0 : i;
			enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
		}
		taprio->mqprio.qopt.num_tc = 0;
		enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
	}

	return err;
@@ -1611,6 +1603,13 @@ int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
	struct enetc_si *si = priv->si;

	switch (base->type) {
	case TC_SETUP_QDISC_MQPRIO: {
		struct tc_mqprio_caps *caps = base->caps;

		caps->validate_queue_counts = true;

		return 0;
	}
	case TC_SETUP_QDISC_TAPRIO: {
		struct tc_taprio_caps *caps = base->caps;

Loading