Commit a1aed456 authored by Guangbin Huang's avatar Guangbin Huang Committed by David S. Miller
Browse files

net: hns3: add query vf ring and vector map relation



This patch adds a new mailbox opcode to query map relation between
vf ring and vector.

Signed-off-by: default avatarGuangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 416eedb6
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE {
	HCLGE_MBX_PUSH_PROMISC_INFO,	/* (PF -> VF) push vf promisc info */
	HCLGE_MBX_VF_UNINIT,            /* (VF -> PF) vf is unintializing */
	HCLGE_MBX_HANDLE_VF_TBL,	/* (VF -> PF) store/clear hw table */
	HCLGE_MBX_GET_RING_VECTOR_MAP,	/* (VF -> PF) get ring-to-vector map */

	HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
	HCLGE_MBX_PUSH_LINK_STATUS,	/* (M7 -> PF) get port link status */
+83 −0
Original line number Diff line number Diff line
@@ -251,6 +251,81 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
	return ret;
}

static int hclge_query_ring_vector_map(struct hclge_vport *vport,
				       struct hnae3_ring_chain_node *ring_chain,
				       struct hclge_desc *desc)
{
	struct hclge_ctrl_vector_chain_cmd *req =
		(struct hclge_ctrl_vector_chain_cmd *)desc->data;
	struct hclge_dev *hdev = vport->back;
	u16 tqp_type_and_id;
	int status;

	hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_ADD_RING_TO_VECTOR, true);

	tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[0]);
	hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
			hnae3_get_bit(ring_chain->flag, HNAE3_RING_TYPE_B));
	hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
			ring_chain->tqp_index);
	req->tqp_type_and_id[0] = cpu_to_le16(tqp_type_and_id);
	req->vfid = vport->vport_id;

	status = hclge_cmd_send(&hdev->hw, desc, 1);
	if (status)
		dev_err(&hdev->pdev->dev,
			"Get VF ring vector map info fail, status is %d.\n",
			status);

	return status;
}

static int hclge_get_vf_ring_vector_map(struct hclge_vport *vport,
					struct hclge_mbx_vf_to_pf_cmd *req,
					struct hclge_respond_to_vf_msg *resp)
{
#define HCLGE_LIMIT_RING_NUM			1
#define HCLGE_RING_TYPE_OFFSET			0
#define HCLGE_TQP_INDEX_OFFSET			1
#define HCLGE_INT_GL_INDEX_OFFSET		2
#define HCLGE_VECTOR_ID_OFFSET			3
#define HCLGE_RING_VECTOR_MAP_INFO_LEN		4
	struct hnae3_ring_chain_node ring_chain;
	struct hclge_desc desc;
	struct hclge_ctrl_vector_chain_cmd *data =
		(struct hclge_ctrl_vector_chain_cmd *)desc.data;
	u16 tqp_type_and_id;
	u8 int_gl_index;
	int ret;

	req->msg.ring_num = HCLGE_LIMIT_RING_NUM;

	memset(&ring_chain, 0, sizeof(ring_chain));
	ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
	if (ret)
		return ret;

	ret = hclge_query_ring_vector_map(vport, &ring_chain, &desc);
	if (ret) {
		hclge_free_vector_ring_chain(&ring_chain);
		return ret;
	}

	tqp_type_and_id = le16_to_cpu(data->tqp_type_and_id[0]);
	int_gl_index = hnae3_get_field(tqp_type_and_id,
				       HCLGE_INT_GL_IDX_M, HCLGE_INT_GL_IDX_S);

	resp->data[HCLGE_RING_TYPE_OFFSET] = req->msg.param[0].ring_type;
	resp->data[HCLGE_TQP_INDEX_OFFSET] = req->msg.param[0].tqp_index;
	resp->data[HCLGE_INT_GL_INDEX_OFFSET] = int_gl_index;
	resp->data[HCLGE_VECTOR_ID_OFFSET] = data->int_vector_id_l;
	resp->len = HCLGE_RING_VECTOR_MAP_INFO_LEN;

	hclge_free_vector_ring_chain(&ring_chain);

	return ret;
}

static void hclge_set_vf_promisc_mode(struct hclge_vport *vport,
				      struct hclge_mbx_vf_to_pf_cmd *req)
{
@@ -755,6 +830,14 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
			ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
								req);
			break;
		case HCLGE_MBX_GET_RING_VECTOR_MAP:
			ret = hclge_get_vf_ring_vector_map(vport, req,
							   &resp_msg);
			if (ret)
				dev_err(&hdev->pdev->dev,
					"PF fail(%d) to get VF ring vector map\n",
					ret);
			break;
		case HCLGE_MBX_SET_PROMISC_MODE:
			hclge_set_vf_promisc_mode(vport, req);
			break;