Commit 1566e7d6 authored by Dexuan Cui's avatar Dexuan Cui Committed by Paolo Abeni
Browse files

net: mana: Add the Linux MANA PF driver



This minimal PF driver runs on bare metal.
Currently Ethernet TX/RX works. SR-IOV management is not supported yet.

Signed-off-by: default avatarDexuan Cui <decui@microsoft.com>
Co-developed-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent 8bca4589
Loading
Loading
Loading
Loading
+10 −0
Original line number Diff line number Diff line
@@ -348,6 +348,7 @@ struct gdma_context {
	struct completion	eq_test_event;
	u32			test_event_eq_id;

	bool			is_pf;
	void __iomem		*bar0_va;
	void __iomem		*shm_base;
	void __iomem		*db_page_base;
@@ -469,6 +470,15 @@ struct gdma_eqe {
#define GDMA_REG_DB_PAGE_SIZE	0x10
#define GDMA_REG_SHM_OFFSET	0x18

#define GDMA_PF_REG_DB_PAGE_SIZE	0xD0
#define GDMA_PF_REG_DB_PAGE_OFF		0xC8
#define GDMA_PF_REG_SHM_OFF		0x70

#define GDMA_SRIOV_REG_CFG_BASE_OFF	0x108

#define MANA_PF_DEVICE_ID 0x00B9
#define MANA_VF_DEVICE_ID 0x00BA

struct gdma_posted_wqe_info {
	u32 wqe_size_in_bu;
};
+36 −3
Original line number Diff line number Diff line
@@ -18,7 +18,24 @@ static u64 mana_gd_r64(struct gdma_context *g, u64 offset)
	return readq(g->bar0_va + offset);
}

static void mana_gd_init_registers(struct pci_dev *pdev)
static void mana_gd_init_pf_regs(struct pci_dev *pdev)
{
	struct gdma_context *gc = pci_get_drvdata(pdev);
	void __iomem *sriov_base_va;
	u64 sriov_base_off;

	gc->db_page_size = mana_gd_r32(gc, GDMA_PF_REG_DB_PAGE_SIZE) & 0xFFFF;
	gc->db_page_base = gc->bar0_va +
				mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);

	sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);

	sriov_base_va = gc->bar0_va + sriov_base_off;
	gc->shm_base = sriov_base_va +
			mana_gd_r64(gc, sriov_base_off + GDMA_PF_REG_SHM_OFF);
}

static void mana_gd_init_vf_regs(struct pci_dev *pdev)
{
	struct gdma_context *gc = pci_get_drvdata(pdev);

@@ -30,6 +47,16 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
	gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
}

static void mana_gd_init_registers(struct pci_dev *pdev)
{
	struct gdma_context *gc = pci_get_drvdata(pdev);

	if (gc->is_pf)
		mana_gd_init_pf_regs(pdev);
	else
		mana_gd_init_vf_regs(pdev);
}

static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
	struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -1304,6 +1331,11 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
	mana_gd_remove_irqs(pdev);
}

static bool mana_is_pf(unsigned short dev_id)
{
	return dev_id == MANA_PF_DEVICE_ID;
}

static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct gdma_context *gc;
@@ -1340,10 +1372,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	if (!bar0_va)
		goto free_gc;

	gc->is_pf = mana_is_pf(pdev->device);
	gc->bar0_va = bar0_va;
	gc->dev = &pdev->dev;


	err = mana_gd_setup(pdev);
	if (err)
		goto unmap_bar;
@@ -1438,7 +1470,8 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
#endif

static const struct pci_device_id mana_id_table[] = {
	{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, 0x00BA) },
	{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
	{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
	{ }
};

+17 −1
Original line number Diff line number Diff line
@@ -158,6 +158,14 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
			hwc->rxq->msg_buf->gpa_mkey = val;
			hwc->txq->msg_buf->gpa_mkey = val;
			break;

		case HWC_INIT_DATA_PF_DEST_RQ_ID:
			hwc->pf_dest_vrq_id = val;
			break;

		case HWC_INIT_DATA_PF_DEST_CQ_ID:
			hwc->pf_dest_vrcq_id = val;
			break;
		}

		break;
@@ -773,10 +781,13 @@ void mana_hwc_destroy_channel(struct gdma_context *gc)
int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
			  const void *req, u32 resp_len, void *resp)
{
	struct gdma_context *gc = hwc->gdma_dev->gdma_context;
	struct hwc_work_request *tx_wr;
	struct hwc_wq *txq = hwc->txq;
	struct gdma_req_hdr *req_msg;
	struct hwc_caller_ctx *ctx;
	u32 dest_vrcq = 0;
	u32 dest_vrq = 0;
	u16 msg_id;
	int err;

@@ -803,7 +814,12 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,

	tx_wr->msg_size = req_len;

	err = mana_hwc_post_tx_wqe(txq, tx_wr, 0, 0, false);
	if (gc->is_pf) {
		dest_vrq = hwc->pf_dest_vrq_id;
		dest_vrcq = hwc->pf_dest_vrcq_id;
	}

	err = mana_hwc_post_tx_wqe(txq, tx_wr, dest_vrq, dest_vrcq, false);
	if (err) {
		dev_err(hwc->dev, "HWC: Failed to post send WQE: %d\n", err);
		goto out;
+5 −0
Original line number Diff line number Diff line
@@ -20,6 +20,8 @@
#define HWC_INIT_DATA_MAX_NUM_CQS	7
#define HWC_INIT_DATA_PDID		8
#define HWC_INIT_DATA_GPA_MKEY		9
#define HWC_INIT_DATA_PF_DEST_RQ_ID	10
#define HWC_INIT_DATA_PF_DEST_CQ_ID	11

/* Structures labeled with "HW DATA" are exchanged with the hardware. All of
 * them are naturally aligned and hence don't need __packed.
@@ -178,6 +180,9 @@ struct hw_channel_context {
	struct semaphore sema;
	struct gdma_resource inflight_msg_res;

	u32 pf_dest_vrq_id;
	u32 pf_dest_vrcq_id;

	struct hwc_caller_ctx *caller_ctx;
};

+64 −0
Original line number Diff line number Diff line
@@ -374,6 +374,7 @@ struct mana_port_context {
	unsigned int num_queues;

	mana_handle_t port_handle;
	mana_handle_t pf_filter_handle;

	u16 port_idx;

@@ -420,6 +421,12 @@ enum mana_command_code {
	MANA_FENCE_RQ		= 0x20006,
	MANA_CONFIG_VPORT_RX	= 0x20007,
	MANA_QUERY_VPORT_CONFIG	= 0x20008,

	/* Privileged commands for the PF mode */
	MANA_REGISTER_FILTER	= 0x28000,
	MANA_DEREGISTER_FILTER	= 0x28001,
	MANA_REGISTER_HW_PORT	= 0x28003,
	MANA_DEREGISTER_HW_PORT	= 0x28004,
};

/* Query Device Configuration */
@@ -547,6 +554,63 @@ struct mana_cfg_rx_steer_resp {
	struct gdma_resp_hdr hdr;
}; /* HW DATA */

/* Register HW vPort */
struct mana_register_hw_vport_req {
	struct gdma_req_hdr hdr;
	u16 attached_gfid;
	u8 is_pf_default_vport;
	u8 reserved1;
	u8 allow_all_ether_types;
	u8 reserved2;
	u8 reserved3;
	u8 reserved4;
}; /* HW DATA */

struct mana_register_hw_vport_resp {
	struct gdma_resp_hdr hdr;
	mana_handle_t hw_vport_handle;
}; /* HW DATA */

/* Deregister HW vPort */
struct mana_deregister_hw_vport_req {
	struct gdma_req_hdr hdr;
	mana_handle_t hw_vport_handle;
}; /* HW DATA */

struct mana_deregister_hw_vport_resp {
	struct gdma_resp_hdr hdr;
}; /* HW DATA */

/* Register filter */
struct mana_register_filter_req {
	struct gdma_req_hdr hdr;
	mana_handle_t vport;
	u8 mac_addr[6];
	u8 reserved1;
	u8 reserved2;
	u8 reserved3;
	u8 reserved4;
	u16 reserved5;
	u32 reserved6;
	u32 reserved7;
	u32 reserved8;
}; /* HW DATA */

struct mana_register_filter_resp {
	struct gdma_resp_hdr hdr;
	mana_handle_t filter_handle;
}; /* HW DATA */

/* Deregister filter */
struct mana_deregister_filter_req {
	struct gdma_req_hdr hdr;
	mana_handle_t filter_handle;
}; /* HW DATA */

struct mana_deregister_filter_resp {
	struct gdma_resp_hdr hdr;
}; /* HW DATA */

#define MANA_MAX_NUM_QUEUES 64

#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
Loading