Commit 88d03307 authored by Gal Pressman's avatar Gal Pressman Committed by Jason Gunthorpe
Browse files

RDMA/efa: Unified getters/setters for device structs bitmask access

Use unified macros for device structs access instead of open coding the
shifts and masks over and over again.

Link: https://lore.kernel.org/r/20200225114010.21790-2-galpress@amazon.com


Reviewed-by: default avatarFiras JahJah <firasj@amazon.com>
Reviewed-by: default avatarYossi Leybovich <sleybo@amazon.com>
Signed-off-by: default avatarGal Pressman <galpress@amazon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent cfec045b
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
 * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
 * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
 */

#ifndef _EFA_ADMIN_CMDS_H_
@@ -801,21 +801,16 @@ struct efa_admin_mmio_req_read_less_resp {

/* create_qp_cmd */
#define EFA_ADMIN_CREATE_QP_CMD_SQ_VIRT_MASK                BIT(0)
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_SHIFT               1
#define EFA_ADMIN_CREATE_QP_CMD_RQ_VIRT_MASK                BIT(1)

/* reg_mr_cmd */
#define EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK      GENMASK(4, 0)
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_SHIFT     7
#define EFA_ADMIN_REG_MR_CMD_MEM_ADDR_PHY_MODE_EN_MASK      BIT(7)
#define EFA_ADMIN_REG_MR_CMD_LOCAL_WRITE_ENABLE_MASK        BIT(0)
#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_SHIFT       2
#define EFA_ADMIN_REG_MR_CMD_REMOTE_READ_ENABLE_MASK        BIT(2)

/* create_cq_cmd */
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_SHIFT 5
#define EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK BIT(5)
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_SHIFT                  6
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK                   BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK    GENMASK(4, 0)

+1 −3
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
 * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
 * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
 */

#ifndef _EFA_ADMIN_H_
@@ -121,9 +121,7 @@ struct efa_admin_aenq_entry {
/* aq_common_desc */
#define EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK            GENMASK(11, 0)
#define EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK                 BIT(0)
#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_SHIFT            1
#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK             BIT(1)
#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_SHIFT   2
#define EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK    BIT(2)

/* acq_common_desc */
+71 −83
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
 * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
 * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
 */

#include "efa_com.h"
@@ -16,21 +16,10 @@
#define EFA_ASYNC_QUEUE_DEPTH 16
#define EFA_ADMIN_QUEUE_DEPTH 32

#define MIN_EFA_VER\
	((EFA_ADMIN_API_VERSION_MAJOR << EFA_REGS_VERSION_MAJOR_VERSION_SHIFT) | \
	 (EFA_ADMIN_API_VERSION_MINOR & EFA_REGS_VERSION_MINOR_VERSION_MASK))

#define EFA_CTRL_MAJOR          0
#define EFA_CTRL_MINOR          0
#define EFA_CTRL_SUB_MINOR      1

#define MIN_EFA_CTRL_VER \
	(((EFA_CTRL_MAJOR) << \
	(EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
	((EFA_CTRL_MINOR) << \
	(EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
	(EFA_CTRL_SUB_MINOR))

#define EFA_DMA_ADDR_TO_UINT32_LOW(x)   ((u32)((u64)(x)))
#define EFA_DMA_ADDR_TO_UINT32_HIGH(x)  ((u32)(((u64)(x)) >> 32))

@@ -84,7 +73,7 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
	struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
	struct efa_admin_mmio_req_read_less_resp *read_resp;
	unsigned long exp_time;
	u32 mmio_read_reg;
	u32 mmio_read_reg = 0;
	u32 err;

	read_resp = mmio_read->read_resp;
@@ -94,10 +83,9 @@ static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)

	/* trash DMA req_id to identify when hardware is done */
	read_resp->req_id = mmio_read->seq_num + 0x9aL;
	mmio_read_reg = (offset << EFA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
			EFA_REGS_MMIO_REG_READ_REG_OFF_MASK;
	mmio_read_reg |= mmio_read->seq_num &
			 EFA_REGS_MMIO_REG_READ_REQ_ID_MASK;
	EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset);
	EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID,
		mmio_read->seq_num);

	writel(mmio_read_reg, edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);

@@ -137,9 +125,9 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev)
	struct efa_com_admin_queue *aq = &edev->aq;
	struct efa_com_admin_sq *sq = &aq->sq;
	u16 size = aq->depth * sizeof(*sq->entries);
	u32 aq_caps = 0;
	u32 addr_high;
	u32 addr_low;
	u32 aq_caps;

	sq->entries =
		dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL);
@@ -160,10 +148,9 @@ static int efa_com_admin_init_sq(struct efa_com_dev *edev)
	writel(addr_low, edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
	writel(addr_high, edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);

	aq_caps = aq->depth & EFA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
	aq_caps |= (sizeof(struct efa_admin_aq_entry) <<
			EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
			EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
	EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
	EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE,
		sizeof(struct efa_admin_aq_entry));

	writel(aq_caps, edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);

@@ -175,9 +162,9 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev)
	struct efa_com_admin_queue *aq = &edev->aq;
	struct efa_com_admin_cq *cq = &aq->cq;
	u16 size = aq->depth * sizeof(*cq->entries);
	u32 acq_caps = 0;
	u32 addr_high;
	u32 addr_low;
	u32 acq_caps;

	cq->entries =
		dma_alloc_coherent(aq->dmadev, size, &cq->dma_addr, GFP_KERNEL);
@@ -195,13 +182,11 @@ static int efa_com_admin_init_cq(struct efa_com_dev *edev)
	writel(addr_low, edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
	writel(addr_high, edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);

	acq_caps = aq->depth & EFA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
	acq_caps |= (sizeof(struct efa_admin_acq_entry) <<
			EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
			EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
	acq_caps |= (aq->msix_vector_idx <<
			EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_SHIFT) &
			EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR_MASK;
	EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
	EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE,
		sizeof(struct efa_admin_acq_entry));
	EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR,
		aq->msix_vector_idx);

	writel(acq_caps, edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);

@@ -212,7 +197,8 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
				   struct efa_aenq_handlers *aenq_handlers)
{
	struct efa_com_aenq *aenq = &edev->aenq;
	u32 addr_low, addr_high, aenq_caps;
	u32 addr_low, addr_high;
	u32 aenq_caps = 0;
	u16 size;

	if (!aenq_handlers) {
@@ -237,13 +223,11 @@ static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
	writel(addr_low, edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
	writel(addr_high, edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);

	aenq_caps = aenq->depth & EFA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
	aenq_caps |= (sizeof(struct efa_admin_aenq_entry) <<
		EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
		EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
	aenq_caps |= (aenq->msix_vector_idx
		      << EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_SHIFT) &
		     EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR_MASK;
	EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth);
	EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE,
		sizeof(struct efa_admin_aenq_entry));
	EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR,
		aenq->msix_vector_idx);
	writel(aenq_caps, edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);

	/*
@@ -280,8 +264,8 @@ static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
					struct efa_comp_ctx *comp_ctx)
{
	u16 cmd_id = comp_ctx->user_cqe->acq_common_descriptor.command &
		     EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
	u16 cmd_id = EFA_GET(&comp_ctx->user_cqe->acq_common_descriptor.command,
			     EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
	u16 ctx_id = cmd_id & (aq->depth - 1);

	ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
@@ -335,8 +319,8 @@ static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queu
	cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;

	cmd->aq_common_descriptor.command_id = cmd_id;
	cmd->aq_common_descriptor.flags |= aq->sq.phase &
		EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
	EFA_SET(&cmd->aq_common_descriptor.flags,
		EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);

	comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, true);
	if (!comp_ctx) {
@@ -427,8 +411,8 @@ static void efa_com_handle_single_admin_completion(struct efa_com_admin_queue *a
	struct efa_comp_ctx *comp_ctx;
	u16 cmd_id;

	cmd_id = cqe->acq_common_descriptor.command &
		 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
	cmd_id = EFA_GET(&cqe->acq_common_descriptor.command,
			 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);

	comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, false);
	if (!comp_ctx) {
@@ -743,7 +727,7 @@ int efa_com_admin_init(struct efa_com_dev *edev,
	int err;

	dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
	if (!(dev_sts & EFA_REGS_DEV_STS_READY_MASK)) {
	if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) {
		ibdev_err(edev->efa_dev,
			  "Device isn't ready, abort com init %#x\n", dev_sts);
		return -ENODEV;
@@ -778,8 +762,7 @@ int efa_com_admin_init(struct efa_com_dev *edev,
		goto err_destroy_cq;

	cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
	timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
		  EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
	timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
	if (timeout)
		/* the resolution of timeout reg is 100ms */
		aq->completion_timeout = timeout * 100000;
@@ -940,7 +923,9 @@ void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)

int efa_com_validate_version(struct efa_com_dev *edev)
{
	u32 min_ctrl_ver = 0;
	u32 ctrl_ver_masked;
	u32 min_ver = 0;
	u32 ctrl_ver;
	u32 ver;

@@ -953,33 +938,42 @@ int efa_com_validate_version(struct efa_com_dev *edev)
				      EFA_REGS_CONTROLLER_VERSION_OFF);

	ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
		  (ver & EFA_REGS_VERSION_MAJOR_VERSION_MASK) >>
			  EFA_REGS_VERSION_MAJOR_VERSION_SHIFT,
		  ver & EFA_REGS_VERSION_MINOR_VERSION_MASK);

	if (ver < MIN_EFA_VER) {
		  EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION),
		  EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION));

	EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION,
		EFA_ADMIN_API_VERSION_MAJOR);
	EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION,
		EFA_ADMIN_API_VERSION_MINOR);
	if (ver < min_ver) {
		ibdev_err(edev->efa_dev,
			  "EFA version is lower than the minimal version the driver supports\n");
		return -EOPNOTSUPP;
	}

	ibdev_dbg(edev->efa_dev,
	ibdev_dbg(
		edev->efa_dev,
		"efa controller version: %d.%d.%d implementation version %d\n",
		  (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
			  EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
		  (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
			  EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
		  (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
		  (ctrl_ver & EFA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
			  EFA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
		EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION),
		EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION),
		EFA_GET(&ctrl_ver,
			EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION),
		EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID));

	ctrl_ver_masked =
		(ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
		(ctrl_ver & EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
		(ctrl_ver & EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);

		EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) |
		EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) |
		EFA_GET(&ctrl_ver,
			EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION);

	EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION,
		EFA_CTRL_MAJOR);
	EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION,
		EFA_CTRL_MINOR);
	EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION,
		EFA_CTRL_SUB_MINOR);
	/* Validate the ctrl version without the implementation ID */
	if (ctrl_ver_masked < MIN_EFA_CTRL_VER) {
	if (ctrl_ver_masked < min_ctrl_ver) {
		ibdev_err(edev->efa_dev,
			  "EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
		return -EOPNOTSUPP;
@@ -1002,8 +996,7 @@ int efa_com_get_dma_width(struct efa_com_dev *edev)
	u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
	int width;

	width = (caps & EFA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
		EFA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
	width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH);

	ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);

@@ -1017,16 +1010,14 @@ int efa_com_get_dma_width(struct efa_com_dev *edev)
	return width;
}

static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout,
				u16 exp_state)
static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on)
{
	u32 val, i;

	for (i = 0; i < timeout; i++) {
		val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);

		if ((val & EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
		    exp_state)
		if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on)
			return 0;

		ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
@@ -1046,36 +1037,34 @@ static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout,
int efa_com_dev_reset(struct efa_com_dev *edev,
		      enum efa_regs_reset_reason_types reset_reason)
{
	u32 stat, timeout, cap, reset_val;
	u32 stat, timeout, cap;
	u32 reset_val = 0;
	int err;

	stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
	cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);

	if (!(stat & EFA_REGS_DEV_STS_READY_MASK)) {
	if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) {
		ibdev_err(edev->efa_dev,
			  "Device isn't ready, can't reset device\n");
		return -EINVAL;
	}

	timeout = (cap & EFA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
		  EFA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
	timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT);
	if (!timeout) {
		ibdev_err(edev->efa_dev, "Invalid timeout value\n");
		return -EINVAL;
	}

	/* start reset */
	reset_val = EFA_REGS_DEV_CTL_DEV_RESET_MASK;
	reset_val |= (reset_reason << EFA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
		     EFA_REGS_DEV_CTL_RESET_REASON_MASK;
	EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1);
	EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason);
	writel(reset_val, edev->reg_bar + EFA_REGS_DEV_CTL_OFF);

	/* reset clears the mmio readless address, restore it */
	efa_com_mmio_reg_read_resp_addr_init(edev);

	err = wait_for_reset_state(edev, timeout,
				   EFA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
	err = wait_for_reset_state(edev, timeout, 1);
	if (err) {
		ibdev_err(edev->efa_dev, "Reset indication didn't turn on\n");
		return err;
@@ -1089,8 +1078,7 @@ int efa_com_dev_reset(struct efa_com_dev *edev,
		return err;
	}

	timeout = (cap & EFA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
		  EFA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
	timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
	if (timeout)
		/* the resolution of timeout reg is 100ms */
		edev->aq.completion_timeout = timeout * 100000;
+15 −14
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
 * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
 * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
 */

#include "efa_com.h"
@@ -161,8 +161,9 @@ int efa_com_create_cq(struct efa_com_dev *edev,
	int err;

	create_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_CQ;
	create_cmd.cq_caps_2 = (params->entry_size_in_bytes / 4) &
				EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
	EFA_SET(&create_cmd.cq_caps_2,
		EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS,
		params->entry_size_in_bytes / 4);
	create_cmd.cq_depth = params->cq_depth;
	create_cmd.num_sub_cqs = params->num_sub_cqs;
	create_cmd.uar = params->uarn;
@@ -227,8 +228,8 @@ int efa_com_register_mr(struct efa_com_dev *edev,
	mr_cmd.aq_common_desc.opcode = EFA_ADMIN_REG_MR;
	mr_cmd.pd = params->pd;
	mr_cmd.mr_length = params->mr_length_in_bytes;
	mr_cmd.flags |= params->page_shift &
		EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT_MASK;
	EFA_SET(&mr_cmd.flags, EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT,
		params->page_shift);
	mr_cmd.iova = params->iova;
	mr_cmd.permissions = params->permissions;

@@ -242,11 +243,11 @@ int efa_com_register_mr(struct efa_com_dev *edev,
			params->pbl.pbl.address.mem_addr_low;
		mr_cmd.pbl.pbl.address.mem_addr_high =
			params->pbl.pbl.address.mem_addr_high;
		mr_cmd.aq_common_desc.flags |=
			EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_MASK;
		EFA_SET(&mr_cmd.aq_common_desc.flags,
			EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
		if (params->indirect)
			mr_cmd.aq_common_desc.flags |=
				EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
			EFA_SET(&mr_cmd.aq_common_desc.flags,
				EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
	}

	err = efa_com_cmd_exec(aq,
@@ -386,9 +387,8 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
	get_cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_FEATURE;

	if (control_buff_size)
		get_cmd.aq_common_descriptor.flags =
			EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;

		EFA_SET(&get_cmd.aq_common_descriptor.flags,
			EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);

	efa_com_set_dma_addr(control_buf_dma_addr,
			     &get_cmd.control_buffer.address.mem_addr_high,
@@ -538,8 +538,9 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,

	set_cmd->aq_common_descriptor.opcode = EFA_ADMIN_SET_FEATURE;
	if (control_buff_size) {
		set_cmd->aq_common_descriptor.flags =
			EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
		set_cmd->aq_common_descriptor.flags = 0;
		EFA_SET(&set_cmd->aq_common_descriptor.flags,
			EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
		efa_com_set_dma_addr(control_buf_dma_addr,
				     &set_cmd->control_buffer.address.mem_addr_high,
				     &set_cmd->control_buffer.address.mem_addr_low);
+12 −1
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
 * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
 * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
 */

#ifndef _EFA_COMMON_H_
#define _EFA_COMMON_H_

#include <linux/bitfield.h>

#define EFA_COMMON_SPEC_VERSION_MAJOR        2
#define EFA_COMMON_SPEC_VERSION_MINOR        0

#define EFA_GET(ptr, mask) FIELD_GET(mask##_MASK, *(ptr))

#define EFA_SET(ptr, mask, value)                                              \
	({                                                                     \
		typeof(ptr) _ptr = ptr;                                        \
		*_ptr = (*_ptr & ~(mask##_MASK)) |                             \
			FIELD_PREP(mask##_MASK, value);                        \
	})

struct efa_common_mem_addr {
	u32 mem_addr_low;

Loading