Commit 108ff821 authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed
Browse files

net/mlx5: DR, Add modify-header-pattern ICM pool



There is a new ICM area for that memory, so we need to handle it as we
did for the others ICM types.
The patch added that specific pool with its requirements and management.

Signed-off-by: default avatarMuhammad Sammar <muhammads@nvidia.com>
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: default avatarAlex Vesker <valex@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 1e5cc736
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -112,7 +112,7 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
					steering/dr_ste_v2.o \
					steering/dr_cmd.o steering/dr_fw.o \
					steering/dr_action.o steering/fs_dr.o \
					steering/dr_definer.o \
					steering/dr_definer.o steering/dr_ptrn.o \
					steering/dr_dbg.o lib/smfs.o
#
# SF device
+6 −0
Original line number Diff line number Diff line
@@ -200,6 +200,12 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
	caps->hdr_modify_icm_addr =
		MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);

	caps->log_modify_pattern_icm_size =
		MLX5_CAP_DEV_MEM(mdev, log_header_modify_pattern_sw_icm_size);

	caps->hdr_modify_pattern_icm_addr =
		MLX5_CAP64_DEV_MEM(mdev, header_modify_pattern_sw_icm_start_address);

	caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);

	caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+42 −3
Original line number Diff line number Diff line
@@ -10,6 +10,33 @@
	 ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&	\
	  (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))

bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
{
	return false;
}

static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn)
{
	if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
		return 0;

	dmn->ptrn_mgr = mlx5dr_ptrn_mgr_create(dmn);
	if (!dmn->ptrn_mgr) {
		mlx5dr_err(dmn, "Couldn't create ptrn_mgr\n");
		return -ENOMEM;
	}

	return 0;
}

static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn)
{
	if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
		return;

	mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
}

static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
	/* Per vport cached FW FT for checksum recalculation, this
@@ -149,14 +176,22 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
		goto clean_uar;
	}

	ret = dr_domain_init_modify_header_resources(dmn);
	if (ret) {
		mlx5dr_err(dmn, "Couldn't create modify-header-resources\n");
		goto clean_mem_resources;
	}

	ret = mlx5dr_send_ring_alloc(dmn);
	if (ret) {
		mlx5dr_err(dmn, "Couldn't create send-ring\n");
		goto clean_mem_resources;
		goto clean_modify_hdr;
	}

	return 0;

clean_modify_hdr:
	dr_domain_destroy_modify_header_resources(dmn);
clean_mem_resources:
	dr_domain_uninit_mem_resources(dmn);
clean_uar:
@@ -170,6 +205,7 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
{
	mlx5dr_send_ring_free(dmn, dmn->send_ring);
	dr_domain_destroy_modify_header_resources(dmn);
	dr_domain_uninit_mem_resources(dmn);
	mlx5_put_uars_page(dmn->mdev, dmn->uar);
	mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
@@ -215,7 +251,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
	return 0;
}

static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
static int dr_domain_query_esw_mgr(struct mlx5dr_domain *dmn)
{
	return dr_domain_query_vport(dmn, 0, false,
				     &dmn->info.caps.vports.esw_manager_caps);
@@ -321,7 +357,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
	 * vports (vport 0, VFs and SFs) will be queried dynamically.
	 */

	ret = dr_domain_query_esw_mngr(dmn);
	ret = dr_domain_query_esw_mgr(dmn);
	if (ret) {
		mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
		goto free_vports_caps_xa;
@@ -435,6 +471,9 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
	dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
	dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
					    dmn->info.caps.log_icm_size);
	dmn->info.max_log_modify_hdr_pattern_icm_sz =
		min_t(u32, DR_CHUNK_SIZE_4K,
		      dmn->info.caps.log_modify_pattern_icm_size);

	if (!dmn->info.supp_sw_steering) {
		mlx5dr_err(dmn, "SW steering is not supported\n");
+29 −12
Original line number Diff line number Diff line
@@ -107,9 +107,9 @@ static struct mlx5dr_icm_mr *
dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
{
	struct mlx5_core_dev *mdev = pool->dmn->mdev;
	enum mlx5_sw_icm_type dm_type;
	enum mlx5_sw_icm_type dm_type = 0;
	struct mlx5dr_icm_mr *icm_mr;
	size_t log_align_base;
	size_t log_align_base = 0;
	int err;

	icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
@@ -121,14 +121,25 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
	icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
							       pool->icm_type);

	if (pool->icm_type == DR_ICM_TYPE_STE) {
	switch (pool->icm_type) {
	case DR_ICM_TYPE_STE:
		dm_type = MLX5_SW_ICM_TYPE_STEERING;
		log_align_base = ilog2(icm_mr->dm.length);
	} else {
		break;
	case DR_ICM_TYPE_MODIFY_ACTION:
		dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
		/* Align base is 64B */
		log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
		break;
	case DR_ICM_TYPE_MODIFY_HDR_PTRN:
		dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
		/* Align base is 64B */
		log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
		break;
	default:
		WARN_ON(pool->icm_type);
	}

	icm_mr->dm.type = dm_type;

	err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
@@ -493,27 +504,33 @@ struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
					       enum mlx5dr_icm_type icm_type)
{
	u32 num_of_chunks, entry_size, max_hot_size;
	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
	struct mlx5dr_icm_pool *pool;

	if (icm_type == DR_ICM_TYPE_STE)
		max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
	else
		max_log_chunk_sz = dmn->info.max_log_action_icm_sz;

	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return NULL;

	pool->dmn = dmn;
	pool->icm_type = icm_type;
	pool->max_log_chunk_sz = max_log_chunk_sz;
	pool->chunks_kmem_cache = dmn->chunks_kmem_cache;

	INIT_LIST_HEAD(&pool->buddy_mem_list);

	mutex_init(&pool->mutex);

	switch (icm_type) {
	case DR_ICM_TYPE_STE:
		pool->max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
		break;
	case DR_ICM_TYPE_MODIFY_ACTION:
		pool->max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
		break;
	case DR_ICM_TYPE_MODIFY_HDR_PTRN:
		pool->max_log_chunk_sz = dmn->info.max_log_modify_hdr_pattern_icm_sz;
		break;
	default:
		WARN_ON(icm_type);
	}

	entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type);

	max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+43 −0
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.

#include "dr_types.h"

struct mlx5dr_ptrn_mgr {
	struct mlx5dr_domain *dmn;
	struct mlx5dr_icm_pool *ptrn_icm_pool;
};

struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
{
	struct mlx5dr_ptrn_mgr *mgr;

	if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
		return NULL;

	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
	if (!mgr)
		return NULL;

	mgr->dmn = dmn;
	mgr->ptrn_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_HDR_PTRN);
	if (!mgr->ptrn_icm_pool) {
		mlx5dr_err(dmn, "Couldn't get modify-header-pattern memory\n");
		goto free_mgr;
	}

	return mgr;

free_mgr:
	kfree(mgr);
	return NULL;
}

void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
{
	if (!mgr)
		return;

	mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
	kfree(mgr);
}
Loading