Commit b10d10a7 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge tag 'mlx5-updates-2023-07-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-07-24

1) Generalize devcom implementation to be independent of number of ports
   or device's GUID.

2) Save memory on command interface statistics.

3) General code cleanups

* tag 'mlx5-updates-2023-07-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: Give esw_offloads_load/unload_rep() "mlx5_" prefix
  net/mlx5: Make mlx5_eswitch_load/unload_vport() static
  net/mlx5: Make mlx5_esw_offloads_rep_load/unload() static
  net/mlx5: Remove pointless devlink_rate checks
  net/mlx5: Don't check vport->enabled in port ops
  net/mlx5e: Make flow classification filters static
  net/mlx5e: Remove duplicate code for user flow
  net/mlx5: Allocate command stats with xarray
  net/mlx5: split mlx5_cmd_init() to probe and reload routines
  net/mlx5: Remove redundant cmdif revision check
  net/mlx5: Re-organize mlx5_cmd struct
  net/mlx5e: E-Switch, Allow devcom initialization on more vports
  net/mlx5e: E-Switch, Register devcom device with switch id key
  net/mlx5: Devcom, Infrastructure changes
  net/mlx5: Use shared code for checking lag is supported
====================

Link: https://lore.kernel.org/r/20230727183914.69229-1-saeed@kernel.org


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 97d0dca7 9eca8bb8
Loading
Loading
Loading
Loading
+114 −109
Original line number Diff line number Diff line
@@ -162,18 +162,18 @@ static int cmd_alloc_index(struct mlx5_cmd *cmd)
	int ret;

	spin_lock_irqsave(&cmd->alloc_lock, flags);
	ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
	if (ret < cmd->max_reg_cmds)
		clear_bit(ret, &cmd->bitmask);
	ret = find_first_bit(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
	if (ret < cmd->vars.max_reg_cmds)
		clear_bit(ret, &cmd->vars.bitmask);
	spin_unlock_irqrestore(&cmd->alloc_lock, flags);

	return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
	return ret < cmd->vars.max_reg_cmds ? ret : -ENOMEM;
}

static void cmd_free_index(struct mlx5_cmd *cmd, int idx)
{
	lockdep_assert_held(&cmd->alloc_lock);
	set_bit(idx, &cmd->bitmask);
	set_bit(idx, &cmd->vars.bitmask);
}

static void cmd_ent_get(struct mlx5_cmd_work_ent *ent)
@@ -192,7 +192,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)

	if (ent->idx >= 0) {
		cmd_free_index(cmd, ent->idx);
		up(ent->page_queue ? &cmd->pages_sem : &cmd->sem);
		up(ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem);
	}

	cmd_free_ent(ent);
@@ -202,7 +202,7 @@ static void cmd_ent_put(struct mlx5_cmd_work_ent *ent)

static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
{
	return cmd->cmd_buf + (idx << cmd->log_stride);
	return cmd->cmd_buf + (idx << cmd->vars.log_stride);
}

static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg)
@@ -974,7 +974,7 @@ static void cmd_work_handler(struct work_struct *work)
	cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));

	complete(&ent->handling);
	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
	sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
	down(sem);
	if (!ent->page_queue) {
		alloc_ret = cmd_alloc_index(cmd);
@@ -994,9 +994,9 @@ static void cmd_work_handler(struct work_struct *work)
		}
		ent->idx = alloc_ret;
	} else {
		ent->idx = cmd->max_reg_cmds;
		ent->idx = cmd->vars.max_reg_cmds;
		spin_lock_irqsave(&cmd->alloc_lock, flags);
		clear_bit(ent->idx, &cmd->bitmask);
		clear_bit(ent->idx, &cmd->vars.bitmask);
		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
	}

@@ -1225,8 +1225,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
		goto out_free;

	ds = ent->ts2 - ent->ts1;
	if (ent->op < MLX5_CMD_OP_MAX) {
		stats = &cmd->stats[ent->op];
	stats = xa_load(&cmd->stats, ent->op);
	if (stats) {
		spin_lock_irq(&stats->lock);
		stats->sum += ds;
		++stats->n;
@@ -1548,7 +1548,6 @@ static void clean_debug_files(struct mlx5_core_dev *dev)
	if (!mlx5_debugfs_root)
		return;

	mlx5_cmdif_debugfs_cleanup(dev);
	debugfs_remove_recursive(dbg->dbg_root);
}

@@ -1563,8 +1562,6 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
	debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops);
	debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status);
	debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);

	mlx5_cmdif_debugfs_init(dev);
}

void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
@@ -1572,15 +1569,15 @@ void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
	struct mlx5_cmd *cmd = &dev->cmd;
	int i;

	for (i = 0; i < cmd->max_reg_cmds; i++)
		down(&cmd->sem);
	down(&cmd->pages_sem);
	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
		down(&cmd->vars.sem);
	down(&cmd->vars.pages_sem);

	cmd->allowed_opcode = opcode;

	up(&cmd->pages_sem);
	for (i = 0; i < cmd->max_reg_cmds; i++)
		up(&cmd->sem);
	up(&cmd->vars.pages_sem);
	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
		up(&cmd->vars.sem);
}

static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
@@ -1588,15 +1585,15 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
	struct mlx5_cmd *cmd = &dev->cmd;
	int i;

	for (i = 0; i < cmd->max_reg_cmds; i++)
		down(&cmd->sem);
	down(&cmd->pages_sem);
	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
		down(&cmd->vars.sem);
	down(&cmd->vars.pages_sem);

	cmd->mode = mode;

	up(&cmd->pages_sem);
	for (i = 0; i < cmd->max_reg_cmds; i++)
		up(&cmd->sem);
	up(&cmd->vars.pages_sem);
	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
		up(&cmd->vars.sem);
}

static int cmd_comp_notifier(struct notifier_block *nb,
@@ -1655,7 +1652,7 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force

	/* there can be at most 32 command queues */
	vector = vec & 0xffffffff;
	for (i = 0; i < (1 << cmd->log_sz); i++) {
	for (i = 0; i < (1 << cmd->vars.log_sz); i++) {
		if (test_bit(i, &vector)) {
			ent = cmd->ent_arr[i];

@@ -1698,8 +1695,8 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force

			if (ent->callback) {
				ds = ent->ts2 - ent->ts1;
				if (ent->op < MLX5_CMD_OP_MAX) {
					stats = &cmd->stats[ent->op];
				stats = xa_load(&cmd->stats, ent->op);
				if (stats) {
					spin_lock_irqsave(&stats->lock, flags);
					stats->sum += ds;
					++stats->n;
@@ -1744,7 +1741,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
	/* wait for pending handlers to complete */
	mlx5_eq_synchronize_cmd_irq(dev);
	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
	vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
	if (!vector)
		goto no_trig;

@@ -1753,14 +1750,14 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
	 * to guarantee pending commands will not get freed in the meanwhile.
	 * For that reason, it also has to be done inside the alloc_lock.
	 */
	for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
	for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
		cmd_ent_get(cmd->ent_arr[i]);
	vector |= MLX5_TRIGGERED_CMD_COMP;
	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);

	mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
	mlx5_cmd_comp_handler(dev, vector, true);
	for_each_set_bit(i, &bitmask, (1 << cmd->log_sz))
	for_each_set_bit(i, &bitmask, (1 << cmd->vars.log_sz))
		cmd_ent_put(cmd->ent_arr[i]);
	return;

@@ -1773,22 +1770,22 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev)
	struct mlx5_cmd *cmd = &dev->cmd;
	int i;

	for (i = 0; i < cmd->max_reg_cmds; i++) {
		while (down_trylock(&cmd->sem)) {
	for (i = 0; i < cmd->vars.max_reg_cmds; i++) {
		while (down_trylock(&cmd->vars.sem)) {
			mlx5_cmd_trigger_completions(dev);
			cond_resched();
		}
	}

	while (down_trylock(&cmd->pages_sem)) {
	while (down_trylock(&cmd->vars.pages_sem)) {
		mlx5_cmd_trigger_completions(dev);
		cond_resched();
	}

	/* Unlock cmdif */
	up(&cmd->pages_sem);
	for (i = 0; i < cmd->max_reg_cmds; i++)
		up(&cmd->sem);
	up(&cmd->vars.pages_sem);
	for (i = 0; i < cmd->vars.max_reg_cmds; i++)
		up(&cmd->vars.sem);
}

static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
@@ -1858,7 +1855,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
		/* atomic context may not sleep */
		if (callback)
			return -EINVAL;
		down(&dev->cmd.throttle_sem);
		down(&dev->cmd.vars.throttle_sem);
	}

	pages_queue = is_manage_pages(in);
@@ -1903,7 +1900,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
	free_msg(dev, inb);
out_up:
	if (throttle_op)
		up(&dev->cmd.throttle_sem);
		up(&dev->cmd.vars.throttle_sem);
	return err;
}

@@ -1926,7 +1923,9 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
	if (!err || !(strcmp(namep, "unknown command opcode")))
		return;

	stats = &dev->cmd.stats[opcode];
	stats = xa_load(&dev->cmd.stats, opcode);
	if (!stats)
		return;
	spin_lock_irq(&stats->lock);
	stats->failed++;
	if (err < 0)
@@ -2190,19 +2189,8 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
	int size = sizeof(struct mlx5_cmd_prot_block);
	int align = roundup_pow_of_two(size);
	struct mlx5_cmd *cmd = &dev->cmd;
	u32 cmd_h, cmd_l;
	u16 cmd_if_rev;
	u32 cmd_l;
	int err;
	int i;

	memset(cmd, 0, sizeof(*cmd));
	cmd_if_rev = cmdif_rev(dev);
	if (cmd_if_rev != CMD_IF_REV) {
		mlx5_core_err(dev,
			      "Driver cmdif rev(%d) differs from firmware's(%d)\n",
			      CMD_IF_REV, cmd_if_rev);
		return -EINVAL;
	}

	cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
	if (!cmd->pool)
@@ -2212,62 +2200,16 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
	if (err)
		goto err_free_pool;

	cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
	cmd->log_sz = cmd_l >> 4 & 0xf;
	cmd->log_stride = cmd_l & 0xf;
	if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
		mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
			      1 << cmd->log_sz);
		err = -EINVAL;
		goto err_free_page;
	}

	if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
		mlx5_core_err(dev, "command queue size overflow\n");
		err = -EINVAL;
		goto err_free_page;
	}

	cmd->state = MLX5_CMDIF_STATE_DOWN;
	cmd->checksum_disabled = 1;
	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
	cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;

	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
	if (cmd->cmdif_rev > CMD_IF_REV) {
		mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
			      CMD_IF_REV, cmd->cmdif_rev);
		err = -EOPNOTSUPP;
		goto err_free_page;
	}

	spin_lock_init(&cmd->alloc_lock);
	spin_lock_init(&cmd->token_lock);
	for (i = 0; i < MLX5_CMD_OP_MAX; i++)
		spin_lock_init(&cmd->stats[i].lock);

	sema_init(&cmd->sem, cmd->max_reg_cmds);
	sema_init(&cmd->pages_sem, 1);
	sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2));

	cmd_h = (u32)((u64)(cmd->dma) >> 32);
	cmd_l = (u32)(cmd->dma);
	if (cmd_l & 0xfff) {
		mlx5_core_err(dev, "invalid command queue address\n");
		err = -ENOMEM;
		goto err_free_page;
		goto err_cmd_page;
	}
	cmd->checksum_disabled = 1;

	iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
	iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);

	/* Make sure firmware sees the complete address before we proceed */
	wmb();

	mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));

	cmd->mode = CMD_MODE_POLLING;
	cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
	spin_lock_init(&cmd->alloc_lock);
	spin_lock_init(&cmd->token_lock);

	create_msg_cache(dev);

@@ -2279,16 +2221,14 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
		goto err_cache;
	}

	create_debugfs_files(dev);
	mlx5_cmdif_debugfs_init(dev);

	return 0;

err_cache:
	destroy_msg_cache(dev);

err_free_page:
err_cmd_page:
	free_cmd_page(dev, cmd);

err_free_pool:
	dma_pool_destroy(cmd->pool);
	return err;
@@ -2298,13 +2238,78 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
{
	struct mlx5_cmd *cmd = &dev->cmd;

	clean_debug_files(dev);
	mlx5_cmdif_debugfs_cleanup(dev);
	destroy_workqueue(cmd->wq);
	destroy_msg_cache(dev);
	free_cmd_page(dev, cmd);
	dma_pool_destroy(cmd->pool);
}

int mlx5_cmd_enable(struct mlx5_core_dev *dev)
{
	struct mlx5_cmd *cmd = &dev->cmd;
	u32 cmd_h, cmd_l;

	memset(&cmd->vars, 0, sizeof(cmd->vars));
	cmd->vars.cmdif_rev = cmdif_rev(dev);
	if (cmd->vars.cmdif_rev != CMD_IF_REV) {
		mlx5_core_err(dev,
			      "Driver cmdif rev(%d) differs from firmware's(%d)\n",
			      CMD_IF_REV, cmd->vars.cmdif_rev);
		return -EINVAL;
	}

	cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
	cmd->vars.log_sz = cmd_l >> 4 & 0xf;
	cmd->vars.log_stride = cmd_l & 0xf;
	if (1 << cmd->vars.log_sz > MLX5_MAX_COMMANDS) {
		mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
			      1 << cmd->vars.log_sz);
		return -EINVAL;
	}

	if (cmd->vars.log_sz + cmd->vars.log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
		mlx5_core_err(dev, "command queue size overflow\n");
		return -EINVAL;
	}

	cmd->state = MLX5_CMDIF_STATE_DOWN;
	cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
	cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;

	sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
	sema_init(&cmd->vars.pages_sem, 1);
	sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));

	cmd_h = (u32)((u64)(cmd->dma) >> 32);
	cmd_l = (u32)(cmd->dma);
	if (WARN_ON(cmd_l & 0xfff))
		return -EINVAL;

	iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
	iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);

	/* Make sure firmware sees the complete address before we proceed */
	wmb();

	mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));

	cmd->mode = CMD_MODE_POLLING;
	cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;

	create_debugfs_files(dev);

	return 0;
}

void mlx5_cmd_disable(struct mlx5_core_dev *dev)
{
	struct mlx5_cmd *cmd = &dev->cmd;

	clean_debug_files(dev);
	flush_workqueue(cmd->wq);
}

void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
			enum mlx5_cmdif_state cmdif_state)
{
+31 −3
Original line number Diff line number Diff line
@@ -176,8 +176,8 @@ static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
	int ret;

	cmd = filp->private_data;
	weight = bitmap_weight(&cmd->bitmask, cmd->max_reg_cmds);
	field = cmd->max_reg_cmds - weight;
	weight = bitmap_weight(&cmd->vars.bitmask, cmd->vars.max_reg_cmds);
	field = cmd->vars.max_reg_cmds - weight;
	ret = snprintf(tbuf, sizeof(tbuf), "%d\n", field);
	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
@@ -188,6 +188,24 @@ static const struct file_operations slots_fops = {
	.read	= slots_read,
};

static struct mlx5_cmd_stats *
mlx5_cmdif_alloc_stats(struct xarray *stats_xa, int opcode)
{
	struct mlx5_cmd_stats *stats = kzalloc(sizeof(*stats), GFP_KERNEL);
	int err;

	if (!stats)
		return NULL;

	err = xa_insert(stats_xa, opcode, stats, GFP_KERNEL);
	if (err) {
		kfree(stats);
		return NULL;
	}
	spin_lock_init(&stats->lock);
	return stats;
}

void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
{
	struct mlx5_cmd_stats *stats;
@@ -200,10 +218,14 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)

	debugfs_create_file("slots_inuse", 0400, *cmd, &dev->cmd, &slots_fops);

	xa_init(&dev->cmd.stats);

	for (i = 0; i < MLX5_CMD_OP_MAX; i++) {
		stats = &dev->cmd.stats[i];
		namep = mlx5_command_str(i);
		if (strcmp(namep, "unknown command opcode")) {
			stats = mlx5_cmdif_alloc_stats(&dev->cmd.stats, i);
			if (!stats)
				continue;
			stats->root = debugfs_create_dir(namep, *cmd);

			debugfs_create_file("average", 0400, stats->root, stats,
@@ -224,7 +246,13 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)

void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
{
	struct mlx5_cmd_stats *stats;
	unsigned long i;

	debugfs_remove_recursive(dev->priv.dbg.cmdif_debugfs);
	xa_for_each(&dev->cmd.stats, i, stats)
		kfree(stats);
	xa_destroy(&dev->cmd.stats);
}

void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
+2 −4
Original line number Diff line number Diff line
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
#include "devlink.h"
#include "lag/lag.h"

/* intf dev list mutex */
static DEFINE_MUTEX(mlx5_intf_mutex);
@@ -587,10 +588,7 @@ static int next_phys_dev_lag(struct device *dev, const void *data)
	if (!mdev)
		return 0;

	if (!MLX5_CAP_GEN(mdev, vport_group_manager) ||
	    !MLX5_CAP_GEN(mdev, lag_master) ||
	    (MLX5_CAP_GEN(mdev, num_lag_ports) > MLX5_MAX_PORTS ||
	     MLX5_CAP_GEN(mdev, num_lag_ports) <= 1))
	if (!mlx5_lag_is_supported(mdev))
		return 0;

	return _next_phys_dev(mdev, data);
+0 −3
Original line number Diff line number Diff line
@@ -1167,9 +1167,6 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
		   const u8 hfunc);
int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
		    u32 *rule_locs);
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
+3 −3
Original line number Diff line number Diff line
@@ -2163,7 +2163,7 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
	return priv->channels.params.pflags;
}

int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
			   u32 *rule_locs)
{
	struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2181,7 +2181,7 @@ int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
	return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}

int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
	struct mlx5e_priv *priv = netdev_priv(dev);

Loading