Commit 758ce14a authored by Patrisious Haddad's avatar Patrisious Haddad Committed by Leon Romanovsky
Browse files

RDMA/mlx5: Implement MACsec gid addition and deletion



Handle MACsec IP ambiguity issue, since mlx5 hw can't support
programming both the MACsec and the physical gid when they have the same
IP address, because it wouldn't know to whom to steer the traffic.
Hence in such case we delete the physical gid from the hw gid table,
which would then cause all traffic sent over it to fail, and we'll only
be able to send traffic over the MACsec gid.

Signed-off-by: default avatarPatrisious Haddad <phaddad@nvidia.com>
Reviewed-by: default avatarRaed Salem <raeds@nvidia.com>
Reviewed-by: default avatarMark Zhang <markzhang@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
parent d4ece08f
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -28,3 +28,4 @@ mlx5_ib-$(CONFIG_INFINIBAND_USER_ACCESS) += devx.o \
					    fs.o \
					    qos.o \
					    std_types.o
mlx5_ib-$(CONFIG_MLX5_MACSEC) += macsec.o
+155 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */

#include "macsec.h"

struct mlx5_reserved_gids {
	int macsec_index;
	const struct ib_gid_attr *physical_gid;
};

int mlx5r_macsec_alloc_gids(struct mlx5_ib_dev *dev)
{
	int i, j, max_gids;

	if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
		mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
		return 0;
	}

	max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size);
	for (i = 0; i < dev->num_ports; i++) {
		dev->port[i].reserved_gids = kcalloc(max_gids,
						     sizeof(*dev->port[i].reserved_gids),
						     GFP_KERNEL);
		if (!dev->port[i].reserved_gids)
			goto err;

		for (j = 0; j < max_gids; j++)
			dev->port[i].reserved_gids[j].macsec_index = -1;
	}

	return 0;
err:
	while (i >= 0) {
		kfree(dev->port[i].reserved_gids);
		i--;
	}
	return -ENOMEM;
}

void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev)
{
	int i;

	if (!mlx5_is_macsec_roce_supported(dev->mdev))
		mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");

	for (i = 0; i < dev->num_ports; i++)
		kfree(dev->port[i].reserved_gids);
}

int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr)
{
	struct mlx5_ib_dev *dev = to_mdev(attr->device);
	const struct ib_gid_attr *physical_gid;
	struct mlx5_reserved_gids *mgids;
	struct net_device *ndev;
	int ret = 0;

	if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
		return 0;

	if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
		mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
		return 0;
	}

	rcu_read_lock();
	ndev = rcu_dereference(attr->ndev);
	if (!ndev) {
		rcu_read_unlock();
		return -ENODEV;
	}

	if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) {
		rcu_read_unlock();
		return 0;
	}
	rcu_read_unlock();

	physical_gid = rdma_find_gid(attr->device, &attr->gid,
				     attr->gid_type, NULL);
	if (IS_ERR(physical_gid))
		return 0;

	ret = set_roce_addr(to_mdev(physical_gid->device),
			    physical_gid->port_num,
			    physical_gid->index, NULL,
			    physical_gid);
	if (ret)
		goto gid_err;

	mgids = &dev->port[attr->port_num - 1].reserved_gids[physical_gid->index];
	mgids->macsec_index = attr->index;
	mgids->physical_gid = physical_gid;

	return 0;

gid_err:
	rdma_put_gid_attr(physical_gid);
	return ret;
}

void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr)
{
	struct mlx5_ib_dev *dev = to_mdev(attr->device);
	struct mlx5_reserved_gids *mgids;
	struct net_device *ndev;
	int i, max_gids;

	if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
		return;

	if (!mlx5_is_macsec_roce_supported(dev->mdev)) {
		mlx5_ib_dbg(dev, "RoCE MACsec not supported due to capabilities\n");
		return;
	}

	mgids = &dev->port[attr->port_num - 1].reserved_gids[attr->index];
	if (mgids->macsec_index != -1) { /* Checking if physical gid has ambiguous IP */
		rdma_put_gid_attr(mgids->physical_gid);
		mgids->macsec_index = -1;
		return;
	}

	rcu_read_lock();
	ndev = rcu_dereference(attr->ndev);
	if (!ndev) {
		rcu_read_unlock();
		return;
	}

	if (!netif_is_macsec(ndev) || !macsec_netdev_is_offloaded(ndev)) {
		rcu_read_unlock();
		return;
	}
	rcu_read_unlock();

	max_gids = MLX5_CAP_ROCE(dev->mdev, roce_address_table_size);
	for (i = 0; i < max_gids; i++) { /* Checking if macsec gid has ambiguous IP */
		mgids = &dev->port[attr->port_num - 1].reserved_gids[i];
		if (mgids->macsec_index == attr->index) {
			const struct ib_gid_attr *physical_gid = mgids->physical_gid;

			set_roce_addr(to_mdev(physical_gid->device),
				      physical_gid->port_num,
				      physical_gid->index,
				      &physical_gid->gid, physical_gid);

			rdma_put_gid_attr(physical_gid);
			mgids->macsec_index = -1;
			break;
		}
	}
}
+25 −0
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */

#ifndef __MLX5_MACSEC_H__
#define __MLX5_MACSEC_H__

#include <net/macsec.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_addr.h>
#include "mlx5_ib.h"

#ifdef CONFIG_MLX5_MACSEC
struct mlx5_reserved_gids;

int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr);
void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr);
int mlx5r_macsec_alloc_gids(struct mlx5_ib_dev *dev);
void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev);
#else
static inline int mlx5r_add_gid_macsec_operations(const struct ib_gid_attr *attr) { return 0; }
static inline void mlx5r_del_gid_macsec_operations(const struct ib_gid_attr *attr) {}
static inline int mlx5r_macsec_alloc_gids(struct mlx5_ib_dev *dev) { return 0; }
static inline void mlx5r_macsec_dealloc_gids(struct mlx5_ib_dev *dev) {}
#endif
#endif /* __MLX5_MACSEC_H__ */
+28 −9
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include "macsec.h"

#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
@@ -564,7 +565,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
	return err;
}

static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
		  unsigned int index, const union ib_gid *gid,
		  const struct ib_gid_attr *attr)
{
@@ -607,6 +608,12 @@ static int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
			   __always_unused void **context)
{
	int ret;

	ret = mlx5r_add_gid_macsec_operations(attr);
	if (ret)
		return ret;

	return set_roce_addr(to_mdev(attr->device), attr->port_num,
			     attr->index, &attr->gid, attr);
}
@@ -614,8 +621,15 @@ static int mlx5_ib_add_gid(const struct ib_gid_attr *attr,
static int mlx5_ib_del_gid(const struct ib_gid_attr *attr,
			   __always_unused void **context)
{
	return set_roce_addr(to_mdev(attr->device), attr->port_num,
	int ret;

	ret = set_roce_addr(to_mdev(attr->device), attr->port_num,
			    attr->index, NULL, attr);
	if (ret)
		return ret;

	mlx5r_del_gid_macsec_operations(attr);
	return 0;
}

__be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
@@ -3644,13 +3658,13 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
	mutex_destroy(&dev->cap_mask_mutex);
	WARN_ON(!xa_empty(&dev->sig_mrs));
	WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES));
	mlx5r_macsec_dealloc_gids(dev);
}

static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
	struct mlx5_core_dev *mdev = dev->mdev;
	int err;
	int i;
	int err, i;

	dev->ib_dev.node_type = RDMA_NODE_IB_CA;
	dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
@@ -3670,10 +3684,14 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
	if (err)
		return err;

	err = mlx5_ib_init_multiport_master(dev);
	err = mlx5r_macsec_alloc_gids(dev);
	if (err)
		return err;

	err = mlx5_ib_init_multiport_master(dev);
	if (err)
		goto err;

	err = set_has_smi_cap(dev);
	if (err)
		goto err_mp;
@@ -3697,7 +3715,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
	spin_lock_init(&dev->dm.lock);
	dev->dm.dev = mdev;
	return 0;

err:
	mlx5r_macsec_dealloc_gids(dev);
err_mp:
	mlx5_ib_cleanup_multiport_master(dev);
	return err;
+7 −0
Original line number Diff line number Diff line
@@ -26,6 +26,7 @@

#include "srq.h"
#include "qp.h"
#include "macsec.h"

#define mlx5_ib_dbg(_dev, format, arg...)                                      \
	dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
@@ -870,6 +871,9 @@ struct mlx5_ib_port {
	struct mlx5_ib_dbg_cc_params *dbg_cc_params;
	struct mlx5_roce roce;
	struct mlx5_eswitch_rep		*rep;
#ifdef CONFIG_MLX5_MACSEC
	struct mlx5_reserved_gids *reserved_gids;
#endif
};

struct mlx5_ib_dbg_param {
@@ -1648,4 +1652,7 @@ static inline bool mlx5_umem_needs_ats(struct mlx5_ib_dev *dev,
	return access_flags & IB_ACCESS_RELAXED_ORDERING;
}

int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
		  unsigned int index, const union ib_gid *gid,
		  const struct ib_gid_attr *attr);
#endif /* MLX5_IB_H */
Loading