Commit 5c312574 authored by Paolo Abeni's avatar Paolo Abeni
Browse files

Merge tag 'mlx5-fixes-2023-01-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2023-01-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net: mlx5: eliminate anonymous module_init & module_exit
  net/mlx5: E-switch, Fix switchdev mode after devlink reload
  net/mlx5e: Protect global IPsec ASO
  net/mlx5e: Remove optimization which prevented update of ESN state
  net/mlx5e: Set decap action based on attr for sample
  net/mlx5e: QoS, Fix wrongfully setting parent_element_id on MODIFY_SCHEDULING_ELEMENT
  net/mlx5: E-switch, Fix setting of reserved fields on MODIFY_SCHEDULING_ELEMENT
  net/mlx5e: Remove redundant xsk pointer check in mlx5e_mpwrq_validate_xsk
  net/mlx5e: Avoid false lock dependency warning on tc_ht even more
  net/mlx5: fix missing mutex_unlock in mlx5_fw_fatal_reporter_err_work()
====================

Link: https://lore.kernel.org/r/


Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 55ba18dc 2c1e1b94
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
		if (child->bw_share == old_bw_share)
			continue;

		err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
		err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
					       child->max_average_bw, child->hw_id);
		if (!err && err_one) {
			err = err_one;
@@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
	mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
	mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);

	err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
	err = mlx5_qos_update_node(htb->mdev, bw_share,
				   max_average_bw, node->hw_id);
	if (err) {
		NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+1 −2
Original line number Diff line number Diff line
@@ -578,7 +578,6 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
{
	enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
	u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
	bool unaligned = xsk ? xsk->unaligned : false;
	u16 max_mtu_pkts;

	if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
@@ -591,7 +590,7 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
	 * needed number of WQEs exceeds the maximum.
	 */
	max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
			     mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned));
			     mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
	if (params->log_rq_mtu_frames > max_mtu_pkts) {
		mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
			      1 << params->log_rq_mtu_frames, xsk->chunk_size);
+2 −3
Original line number Diff line number Diff line
@@ -477,7 +477,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
	struct mlx5e_sample_flow *sample_flow;
	struct mlx5e_sample_attr *sample_attr;
	struct mlx5_flow_attr *pre_attr;
	u32 tunnel_id = attr->tunnel_id;
	struct mlx5_eswitch *esw;
	u32 default_tbl_id;
	u32 obj_id;
@@ -522,7 +521,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
	restore_obj.sample.group_id = sample_attr->group_num;
	restore_obj.sample.rate = sample_attr->rate;
	restore_obj.sample.trunc_size = sample_attr->trunc_size;
	restore_obj.sample.tunnel_id = tunnel_id;
	restore_obj.sample.tunnel_id = attr->tunnel_id;
	err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
	if (err)
		goto err_obj_id;
@@ -548,7 +547,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
	/* For decap action, do decap in the original flow table instead of the
	 * default flow table.
	 */
	if (tunnel_id)
	if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
		pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
	pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
	pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
+2 −5
Original line number Diff line number Diff line
@@ -122,11 +122,8 @@ struct mlx5e_ipsec_aso {
	u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
	dma_addr_t dma_addr;
	struct mlx5_aso *aso;
	/* IPsec ASO caches data on every query call,
	 * so in nested calls, we can use this boolean to save
	 * recursive calls to mlx5e_ipsec_aso_query()
	 */
	u8 use_cache : 1;
	/* Protect ASO WQ access, as it is global to whole IPsec */
	spinlock_t lock;
};

struct mlx5e_ipsec {
+6 −6
Original line number Diff line number Diff line
@@ -320,7 +320,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
	if (ret)
		goto unlock;

	aso->use_cache = true;
	if (attrs->esn_trigger &&
	    !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
		u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
@@ -333,7 +332,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
		    !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
		    !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
			xfrm_state_check_expire(sa_entry->x);
	aso->use_cache = false;

unlock:
	spin_unlock(&sa_entry->x->lock);
@@ -398,6 +396,7 @@ int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
		goto err_aso_create;
	}

	spin_lock_init(&aso->lock);
	ipsec->nb.notifier_call = mlx5e_ipsec_event;
	mlx5_notifier_register(mdev, &ipsec->nb);

@@ -456,13 +455,12 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
	struct mlx5e_hw_objs *res;
	struct mlx5_aso_wqe *wqe;
	u8 ds_cnt;
	int ret;

	lockdep_assert_held(&sa_entry->x->lock);
	if (aso->use_cache)
		return 0;

	res = &mdev->mlx5e_res.hw_objs;

	spin_lock_bh(&aso->lock);
	memset(aso->ctx, 0, sizeof(aso->ctx));
	wqe = mlx5_aso_get_wqe(aso->aso);
	ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
@@ -477,7 +475,9 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
	mlx5e_ipsec_aso_copy(ctrl, data);

	mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
	return mlx5_aso_poll_cq(aso->aso, false);
	ret = mlx5_aso_poll_cq(aso->aso, false);
	spin_unlock_bh(&aso->lock);
	return ret;
}

void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
Loading