Commit 34136153 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed
Browse files

net/mlx5e: kTLS, Use a single async context object per a callback bulk



A single async context object is sufficient to wait for the completions
of many callbacks.  Switch to using one instance per a bulk of commands.

Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarGal Pressman <gal@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 4d78a2eb
Loading
Loading
Loading
Loading
+25 −25
Original line number Diff line number Diff line
@@ -125,7 +125,7 @@ mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
/* struct for callback API management */
struct mlx5e_async_ctx {
	struct mlx5_async_work context;
	struct mlx5_async_ctx async_ctx;
	struct mlx5_async_ctx *async_ctx;
	struct mlx5e_ktls_offload_context_tx *priv_tx;
	int err;
	union {
@@ -134,33 +134,33 @@ struct mlx5e_async_ctx {
	};
};

static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
struct mlx5e_bulk_async_ctx {
	struct mlx5_async_ctx async_ctx;
	DECLARE_FLEX_ARRAY(struct mlx5e_async_ctx, arr);
};

static struct mlx5e_bulk_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
{
	struct mlx5e_async_ctx *bulk_async;
	struct mlx5e_bulk_async_ctx *bulk_async;
	int sz;
	int i;

	bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
	sz = struct_size(bulk_async, arr, n);
	bulk_async = kvzalloc(sz, GFP_KERNEL);
	if (!bulk_async)
		return NULL;

	for (i = 0; i < n; i++) {
		struct mlx5e_async_ctx *async = &bulk_async[i];
	mlx5_cmd_init_async_ctx(mdev, &bulk_async->async_ctx);

		mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
	}
	for (i = 0; i < n; i++)
		bulk_async->arr[i].async_ctx = &bulk_async->async_ctx;

	return bulk_async;
}

static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
static void mlx5e_bulk_async_cleanup(struct mlx5e_bulk_async_ctx *bulk_async)
{
	int i;

	for (i = 0; i < n; i++) {
		struct mlx5e_async_ctx *async = &bulk_async[i];

		mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
	}
	mlx5_cmd_cleanup_async_ctx(&bulk_async->async_ctx);
	kvfree(bulk_async);
}

@@ -208,7 +208,7 @@ mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw
			goto err_out;
	} else {
		async->priv_tx = priv_tx;
		err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
		err = mlx5e_ktls_create_tis_cb(mdev, async->async_ctx,
					       async->out_create, sizeof(async->out_create),
					       create_tis_callback, &async->context);
		if (err)
@@ -231,7 +231,7 @@ static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv
	}
	async->priv_tx = priv_tx;
	mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
				  &async->async_ctx,
				  async->async_ctx,
				  async->out_destroy, sizeof(async->out_destroy),
				  destroy_tis_callback, &async->context);
}
@@ -240,7 +240,7 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
					   struct list_head *list, int size)
{
	struct mlx5e_ktls_offload_context_tx *obj, *n;
	struct mlx5e_async_ctx *bulk_async;
	struct mlx5e_bulk_async_ctx *bulk_async;
	int i;

	bulk_async = mlx5e_bulk_async_init(mdev, size);
@@ -249,11 +249,11 @@ static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,

	i = 0;
	list_for_each_entry_safe(obj, n, list, list_node) {
		mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
		mlx5e_tls_priv_tx_cleanup(obj, &bulk_async->arr[i]);
		i++;
	}

	mlx5e_bulk_async_cleanup(bulk_async, size);
	mlx5e_bulk_async_cleanup(bulk_async);
}

/* Recycling pool API */
@@ -279,7 +279,7 @@ static void create_work(struct work_struct *work)
	struct mlx5e_tls_tx_pool *pool =
		container_of(work, struct mlx5e_tls_tx_pool, create_work);
	struct mlx5e_ktls_offload_context_tx *obj;
	struct mlx5e_async_ctx *bulk_async;
	struct mlx5e_bulk_async_ctx *bulk_async;
	LIST_HEAD(local_list);
	int i, j, err = 0;

@@ -288,7 +288,7 @@ static void create_work(struct work_struct *work)
		return;

	for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async->arr[i]);
		if (IS_ERR(obj)) {
			err = PTR_ERR(obj);
			break;
@@ -297,13 +297,13 @@ static void create_work(struct work_struct *work)
	}

	for (j = 0; j < i; j++) {
		struct mlx5e_async_ctx *async = &bulk_async[j];
		struct mlx5e_async_ctx *async = &bulk_async->arr[j];

		if (!err && async->err)
			err = async->err;
	}
	atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
	mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
	mlx5e_bulk_async_cleanup(bulk_async);
	if (err)
		goto err_out;