Commit 03d25cf7 authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files

Merge branch 'support-256-bit-tls-keys-with-device-offload'

Gal Pressman says:

====================
Support 256 bit TLS keys with device offload

This series adds support for 256 bit TLS keys with device offload, and a
cleanup patch to remove repeating code:
- Patches #1-2 add cipher sizes descriptors which allow reducing the
  amount of code duplications.
- Patch #3 allows 256 bit keys to be TX offloaded in the tls module (RX
  already supported).
- Patch #4 adds 256 bit keys support to the mlx5 driver.
====================

Link: https://lore.kernel.org/r/20220920130150.3546-1-gal@nvidia.com


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 8db3d514 4960c414
Loading
Loading
Loading
Loading
+6 −1
Original line number Diff line number Diff line
@@ -25,7 +25,8 @@ static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
	if (!MLX5_CAP_GEN(mdev, log_max_dek))
		return false;

	return MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128);
	return (MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_128) ||
		MLX5_CAP_TLS(mdev, tls_1_2_aes_gcm_256));
}

static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
@@ -36,6 +37,10 @@ static inline bool mlx5e_ktls_type_check(struct mlx5_core_dev *mdev,
		if (crypto_info->version == TLS_1_2_VERSION)
			return MLX5_CAP_TLS(mdev,  tls_1_2_aes_gcm_128);
		break;
	case TLS_CIPHER_AES_GCM_256:
		if (crypto_info->version == TLS_1_2_VERSION)
			return MLX5_CAP_TLS(mdev,  tls_1_2_aes_gcm_256);
		break;
	}

	return false;
+40 −5
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ struct mlx5e_ktls_rx_resync_ctx {
};

struct mlx5e_ktls_offload_context_rx {
	struct tls12_crypto_info_aes_gcm_128 crypto_info;
	union mlx5e_crypto_info crypto_info;
	struct accel_rule rule;
	struct sock *sk;
	struct mlx5e_rq_stats *rq_stats;
@@ -362,7 +362,6 @@ static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
				    struct mlx5e_channel *c)
{
	struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
	struct mlx5e_ktls_resync_resp *ktls_resync;
	struct mlx5e_icosq *sq;
	bool trigger_poll;
@@ -373,7 +372,31 @@ static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_r

	spin_lock_bh(&ktls_resync->lock);
	spin_lock_bh(&priv_rx->lock);
	memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
	switch (priv_rx->crypto_info.crypto_info.cipher_type) {
	case TLS_CIPHER_AES_GCM_128: {
		struct tls12_crypto_info_aes_gcm_128 *info =
			&priv_rx->crypto_info.crypto_info_128;

		memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
		       sizeof(info->rec_seq));
		break;
	}
	case TLS_CIPHER_AES_GCM_256: {
		struct tls12_crypto_info_aes_gcm_256 *info =
			&priv_rx->crypto_info.crypto_info_256;

		memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be,
		       sizeof(info->rec_seq));
		break;
	}
	default:
		WARN_ONCE(1, "Unsupported cipher type %u\n",
			  priv_rx->crypto_info.crypto_info.cipher_type);
		spin_unlock_bh(&priv_rx->lock);
		spin_unlock_bh(&ktls_resync->lock);
		return;
	}

	if (list_empty(&priv_rx->list)) {
		list_add_tail(&priv_rx->list, &ktls_resync->list);
		trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state);
@@ -604,8 +627,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,

	INIT_LIST_HEAD(&priv_rx->list);
	spin_lock_init(&priv_rx->lock);
	priv_rx->crypto_info  =
	switch (crypto_info->cipher_type) {
	case TLS_CIPHER_AES_GCM_128:
		priv_rx->crypto_info.crypto_info_128 =
			*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
		break;
	case TLS_CIPHER_AES_GCM_256:
		priv_rx->crypto_info.crypto_info_256 =
			*(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
		break;
	default:
		WARN_ONCE(1, "Unsupported cipher type %u\n",
			  crypto_info->cipher_type);
		return -EOPNOTSUPP;
	}

	rxq = mlx5e_ktls_sk_get_rxq(sk);
	priv_rx->rxq = rxq;
+35 −6
Original line number Diff line number Diff line
@@ -93,7 +93,7 @@ struct mlx5e_ktls_offload_context_tx {
	bool ctx_post_pending;
	/* control / resync */
	struct list_head list_node; /* member of the pool */
	struct tls12_crypto_info_aes_gcm_128 crypto_info;
	union mlx5e_crypto_info crypto_info;
	struct tls_offload_context_tx *tx_ctx;
	struct mlx5_core_dev *mdev;
	struct mlx5e_tls_sw_stats *sw_stats;
@@ -485,8 +485,20 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
		goto err_create_key;

	priv_tx->expected_seq = start_offload_tcp_sn;
	priv_tx->crypto_info  =
	switch (crypto_info->cipher_type) {
	case TLS_CIPHER_AES_GCM_128:
		priv_tx->crypto_info.crypto_info_128 =
			*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
		break;
	case TLS_CIPHER_AES_GCM_256:
		priv_tx->crypto_info.crypto_info_256 =
			*(struct tls12_crypto_info_aes_gcm_256 *)crypto_info;
		break;
	default:
		WARN_ONCE(1, "Unsupported cipher type %u\n",
			  crypto_info->cipher_type);
		return -EOPNOTSUPP;
	}
	priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);

	mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
@@ -671,14 +683,31 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
		      struct mlx5e_ktls_offload_context_tx *priv_tx,
		      u64 rcd_sn)
{
	struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
	__be64 rn_be = cpu_to_be64(rcd_sn);
	bool skip_static_post;
	u16 rec_seq_sz;
	char *rec_seq;

	switch (priv_tx->crypto_info.crypto_info.cipher_type) {
	case TLS_CIPHER_AES_GCM_128: {
		struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info.crypto_info_128;

		rec_seq = info->rec_seq;
		rec_seq_sz = sizeof(info->rec_seq);
		break;
	}
	case TLS_CIPHER_AES_GCM_256: {
		struct tls12_crypto_info_aes_gcm_256 *info = &priv_tx->crypto_info.crypto_info_256;

		rec_seq = info->rec_seq;
		rec_seq_sz = sizeof(info->rec_seq);
		break;
	}
	default:
		WARN_ONCE(1, "Unsupported cipher type %u\n",
			  priv_tx->crypto_info.crypto_info.cipher_type);
		return;
	}

	skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
	if (!skip_static_post)
+23 −4
Original line number Diff line number Diff line
@@ -21,7 +21,7 @@ enum {

static void
fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
		   struct tls12_crypto_info_aes_gcm_128 *info,
		   union mlx5e_crypto_info *crypto_info,
		   u32 key_id, u32 resync_tcp_sn)
{
	char *initial_rn, *gcm_iv;
@@ -32,7 +32,26 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,

	ctx = params->ctx;

	switch (crypto_info->crypto_info.cipher_type) {
	case TLS_CIPHER_AES_GCM_128: {
		struct tls12_crypto_info_aes_gcm_128 *info =
			&crypto_info->crypto_info_128;

		EXTRACT_INFO_FIELDS;
		break;
	}
	case TLS_CIPHER_AES_GCM_256: {
		struct tls12_crypto_info_aes_gcm_256 *info =
			&crypto_info->crypto_info_256;

		EXTRACT_INFO_FIELDS;
		break;
	}
	default:
		WARN_ONCE(1, "Unsupported cipher type %u\n",
			  crypto_info->crypto_info.cipher_type);
		return;
	}

	gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
	initial_rn  = MLX5_ADDR_OF(tls_static_params, ctx, initial_record_number);
@@ -54,7 +73,7 @@ fill_static_params(struct mlx5_wqe_tls_static_params_seg *params,
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
			       u16 pc, u32 sqn,
			       struct tls12_crypto_info_aes_gcm_128 *info,
			       union mlx5e_crypto_info *crypto_info,
			       u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
			       bool fence, enum tls_offload_ctx_dir direction)
{
@@ -75,7 +94,7 @@ mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
	ucseg->flags = MLX5_UMR_INLINE;
	ucseg->bsf_octowords = cpu_to_be16(MLX5_ST_SZ_BYTES(tls_static_params) / 16);

	fill_static_params(&wqe->params, info, key_id, resync_tcp_sn);
	fill_static_params(&wqe->params, crypto_info, key_id, resync_tcp_sn);
}

static void
+7 −1
Original line number Diff line number Diff line
@@ -27,6 +27,12 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx);
void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, u32 seq, u8 *rcd_sn);

union mlx5e_crypto_info {
	struct tls_crypto_info crypto_info;
	struct tls12_crypto_info_aes_gcm_128 crypto_info_128;
	struct tls12_crypto_info_aes_gcm_256 crypto_info_256;
};

struct mlx5e_set_tls_static_params_wqe {
	struct mlx5_wqe_ctrl_seg ctrl;
	struct mlx5_wqe_umr_ctrl_seg uctrl;
@@ -72,7 +78,7 @@ struct mlx5e_get_tls_progress_params_wqe {
void
mlx5e_ktls_build_static_params(struct mlx5e_set_tls_static_params_wqe *wqe,
			       u16 pc, u32 sqn,
			       struct tls12_crypto_info_aes_gcm_128 *info,
			       union mlx5e_crypto_info *crypto_info,
			       u32 tis_tir_num, u32 key_id, u32 resync_tcp_sn,
			       bool fence, enum tls_offload_ctx_dir direction);
void
Loading