Commit 79b0872b authored by Jakub Kicinski's avatar Jakub Kicinski
Browse files
Long Li says:

====================
Introduce Microsoft Azure Network Adapter (MANA) RDMA driver [netdev prep]

The first 11 patches which modify the MANA Ethernet driver to support
RDMA driver.

* 'mana-shared-6.2' of https://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  net: mana: Define data structures for protection domain and memory registration
  net: mana: Define data structures for allocating doorbell page from GDMA
  net: mana: Define and process GDMA response code GDMA_STATUS_MORE_ENTRIES
  net: mana: Define max values for SGL entries
  net: mana: Move header files to a common location
  net: mana: Record port number in netdev
  net: mana: Export Work Queue functions for use by RDMA driver
  net: mana: Set the DMA device max segment size
  net: mana: Handle vport sharing between devices
  net: mana: Record the physical address for doorbell page region
  net: mana: Add support for auxiliary device
====================

Link: https://lore.kernel.org/all/1667502990-2559-1-git-send-email-longli@linuxonhyperv.com/


Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents edaf5df2 28c66cfa
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -9543,6 +9543,7 @@ F: include/asm-generic/hyperv-tlfs.h
F:	include/asm-generic/mshyperv.h
F:	include/clocksource/hyperv_timer.h
F:	include/linux/hyperv.h
F:	include/net/mana
F:	include/uapi/linux/hyperv.h
F:	net/vmw_vsock/hyperv_transport.c
F:	tools/hv/
+1 −0
Original line number Diff line number Diff line
@@ -19,6 +19,7 @@ config MICROSOFT_MANA
	tristate "Microsoft Azure Network Adapter (MANA) support"
	depends on PCI_MSI && X86_64
	depends on PCI_HYPERV
	select AUXILIARY_BUS
	help
	  This driver supports Microsoft Azure Network Adapter (MANA).
	  So far, the driver is only supported on X86_64.
+29 −11
Original line number Diff line number Diff line
@@ -6,7 +6,7 @@
#include <linux/utsname.h>
#include <linux/version.h>

#include "mana.h"
#include <net/mana/mana.h>

static u32 mana_gd_r32(struct gdma_context *g, u64 offset)
{
@@ -44,6 +44,9 @@ static void mana_gd_init_vf_regs(struct pci_dev *pdev)
	gc->db_page_base = gc->bar0_va +
				mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);

	gc->phys_db_page_base = gc->bar0_pa +
				mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);

	gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET);
}

@@ -149,6 +152,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req,

	return mana_hwc_send_request(hwc, req_len, req, resp_len, resp);
}
EXPORT_SYMBOL_NS(mana_gd_send_request, NET_MANA);

int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
			 struct gdma_mem_info *gmi)
@@ -194,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,
	req.type = queue->type;
	req.pdid = queue->gdma_dev->pdid;
	req.doolbell_id = queue->gdma_dev->doorbell;
	req.gdma_region = queue->mem_info.gdma_region;
	req.gdma_region = queue->mem_info.dma_region_handle;
	req.queue_size = queue->queue_size;
	req.log2_throttle_limit = queue->eq.log2_throttle_limit;
	req.eq_pci_msix_index = queue->eq.msix_index;
@@ -208,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc,

	queue->id = resp.queue_index;
	queue->eq.disable_needed = true;
	queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
	queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
	return 0;
}

@@ -667,24 +671,30 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
	return err;
}

static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region)
int mana_gd_destroy_dma_region(struct gdma_context *gc,
			       gdma_obj_handle_t dma_region_handle)
{
	struct gdma_destroy_dma_region_req req = {};
	struct gdma_general_resp resp = {};
	int err;

	if (gdma_region == GDMA_INVALID_DMA_REGION)
		return;
	if (dma_region_handle == GDMA_INVALID_DMA_REGION)
		return 0;

	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
			     sizeof(resp));
	req.gdma_region = gdma_region;
	req.dma_region_handle = dma_region_handle;

	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
	if (err || resp.hdr.status)
	if (err || resp.hdr.status) {
		dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
			err, resp.hdr.status);
		return -EPROTO;
	}

	return 0;
}
EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);

static int mana_gd_create_dma_region(struct gdma_dev *gd,
				     struct gdma_mem_info *gmi)
@@ -729,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
	if (err)
		goto out;

	if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
	if (resp.hdr.status ||
	    resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
		dev_err(gc->dev, "Failed to create DMA region: 0x%x\n",
			resp.hdr.status);
		err = -EPROTO;
		goto out;
	}

	gmi->gdma_region = resp.gdma_region;
	gmi->dma_region_handle = resp.dma_region_handle;
out:
	kfree(req);
	return err;
@@ -859,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
		return;
	}

	mana_gd_destroy_dma_region(gc, gmi->gdma_region);
	mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
	mana_gd_free_memory(gmi);
	kfree(queue);
}
@@ -1393,6 +1404,12 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
	if (err)
		goto release_region;

	err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
	if (err) {
		dev_err(&pdev->dev, "Failed to set dma device segment size\n");
		goto release_region;
	}

	err = -ENOMEM;
	gc = vzalloc(sizeof(*gc));
	if (!gc)
@@ -1400,6 +1417,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)

	mutex_init(&gc->eq_test_event_mutex);
	pci_set_drvdata(pdev, gc);
	gc->bar0_pa = pci_resource_start(pdev, 0);

	bar0_va = pci_iomap(pdev, bar, 0);
	if (!bar0_va)
+3 −3
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Copyright (c) 2021, Microsoft Corporation. */

#include "gdma.h"
#include "hw_channel.h"
#include <net/mana/gdma.h>
#include <net/mana/hw_channel.h>

static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
{
@@ -836,7 +836,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
		goto out;
	}

	if (ctx->status_code) {
	if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
		dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
			ctx->status_code);
		err = -EPROTO;
+1 −1
Original line number Diff line number Diff line
@@ -8,7 +8,7 @@
#include <linux/bpf_trace.h>
#include <net/xdp.h>

#include "mana.h"
#include <net/mana/mana.h>

void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev)
{
Loading