Commit 2c4b14ea authored by Shiraz Saleem's avatar Shiraz Saleem Committed by Jason Gunthorpe
Browse files

RDMA/irdma: Remove enum irdma_status_code

Replace use of custom irdma_status_code with linux error codes.

Remove enum irdma_status_code and header in which its defined.

Link: https://lore.kernel.org/r/20220217151851.1518-2-shiraz.saleem@intel.com


Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 4eaa29b4
Loading
Loading
Loading
Loading
+21 −23
Original line number Diff line number Diff line
@@ -1501,15 +1501,14 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
 * @cm_info: CM info for parent listen node
 * @cm_parent_listen_node: The parent listen node
 */
static enum irdma_status_code
irdma_del_multiple_qhash(struct irdma_device *iwdev,
static int irdma_del_multiple_qhash(struct irdma_device *iwdev,
				    struct irdma_cm_info *cm_info,
				    struct irdma_cm_listener *cm_parent_listen_node)
{
	struct irdma_cm_listener *child_listen_node;
	enum irdma_status_code ret = IRDMA_ERR_CFG;
	struct list_head *pos, *tpos;
	unsigned long flags;
	int ret = -EINVAL;

	spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
	list_for_each_safe (pos, tpos,
@@ -1618,16 +1617,16 @@ u16 irdma_get_vlan_ipv4(u32 *addr)
 * Adds a qhash and a child listen node for every IPv6 address
 * on the adapter and adds the associated qhash filter
 */
static enum irdma_status_code
irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
static int irdma_add_mqh_6(struct irdma_device *iwdev,
			   struct irdma_cm_info *cm_info,
			   struct irdma_cm_listener *cm_parent_listen_node)
{
	struct net_device *ip_dev;
	struct inet6_dev *idev;
	struct inet6_ifaddr *ifp, *tmp;
	enum irdma_status_code ret = 0;
	struct irdma_cm_listener *child_listen_node;
	unsigned long flags;
	int ret = 0;

	rtnl_lock();
	for_each_netdev(&init_net, ip_dev) {
@@ -1653,7 +1652,7 @@ irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
				  child_listen_node);
			if (!child_listen_node) {
				ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
				ret = IRDMA_ERR_NO_MEMORY;
				ret = -ENOMEM;
				goto exit;
			}

@@ -1700,16 +1699,16 @@ irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
 * Adds a qhash and a child listen node for every IPv4 address
 * on the adapter and adds the associated qhash filter
 */
static enum irdma_status_code
irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
static int irdma_add_mqh_4(struct irdma_device *iwdev,
			   struct irdma_cm_info *cm_info,
			   struct irdma_cm_listener *cm_parent_listen_node)
{
	struct net_device *ip_dev;
	struct in_device *idev;
	struct irdma_cm_listener *child_listen_node;
	enum irdma_status_code ret = 0;
	unsigned long flags;
	const struct in_ifaddr *ifa;
	int ret = 0;

	rtnl_lock();
	for_each_netdev(&init_net, ip_dev) {
@@ -1734,7 +1733,7 @@ irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
			if (!child_listen_node) {
				ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
				in_dev_put(idev);
				ret = IRDMA_ERR_NO_MEMORY;
				ret = -ENOMEM;
				goto exit;
			}

@@ -1781,8 +1780,8 @@ irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
 * @cm_info: CM info for parent listen node
 * @cm_listen_node: The parent listen node
 */
static enum irdma_status_code
irdma_add_mqh(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
static int irdma_add_mqh(struct irdma_device *iwdev,
			 struct irdma_cm_info *cm_info,
			 struct irdma_cm_listener *cm_listen_node)
{
	if (cm_info->ipv4)
@@ -3205,8 +3204,7 @@ static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node)
 * @iwdev: iwarp device structure
 * @rdma_ver: HW version
 */
enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev,
					   u8 rdma_ver)
int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
{
	struct irdma_cm_core *cm_core = &iwdev->cm_core;

@@ -3216,7 +3214,7 @@ enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev,
	/* Handles CM event work items send to Iwarp core */
	cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0);
	if (!cm_core->event_wq)
		return IRDMA_ERR_NO_MEMORY;
		return -ENOMEM;

	INIT_LIST_HEAD(&cm_core->listen_list);

@@ -3923,10 +3921,10 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
	struct irdma_device *iwdev;
	struct irdma_cm_listener *cm_listen_node;
	struct irdma_cm_info cm_info = {};
	enum irdma_status_code err;
	struct sockaddr_in *laddr;
	struct sockaddr_in6 *laddr6;
	bool wildcard = false;
	int err;

	iwdev = to_iwdev(cm_id->device);
	if (!iwdev)
@@ -4337,11 +4335,11 @@ static void irdma_qhash_ctrl(struct irdma_device *iwdev,
	struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
	struct irdma_cm_listener *child_listen_node;
	struct list_head *pos, *tpos;
	enum irdma_status_code err;
	bool node_allocated = false;
	enum irdma_quad_hash_manage_type op = ifup ?
					      IRDMA_QHASH_MANAGE_TYPE_ADD :
					      IRDMA_QHASH_MANAGE_TYPE_DELETE;
	int err;

	list_for_each_safe (pos, tpos, child_listen_list) {
		child_listen_node = list_entry(pos, struct irdma_cm_listener,
+255 −298

File changed.

Preview size limit exceeded, changes collapsed.

+4 −4
Original line number Diff line number Diff line
@@ -964,7 +964,7 @@ enum irdma_cqp_op_type {
			(_ring).head = ((_ring).head + 1) % size; \
			(_retcode) = 0; \
		} else { \
			(_retcode) = IRDMA_ERR_RING_FULL; \
			(_retcode) = -ENOMEM; \
		} \
	}
#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
@@ -975,7 +975,7 @@ enum irdma_cqp_op_type {
			(_ring).head = ((_ring).head + (_count)) % size; \
			(_retcode) = 0; \
		} else { \
			(_retcode) = IRDMA_ERR_RING_FULL; \
			(_retcode) = -ENOMEM; \
		} \
	}
#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
@@ -986,7 +986,7 @@ enum irdma_cqp_op_type {
			(_ring).head = ((_ring).head + 1) % size; \
			(_retcode) = 0; \
		} else { \
			(_retcode) = IRDMA_ERR_RING_FULL; \
			(_retcode) = -ENOMEM; \
		} \
	}
#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
@@ -997,7 +997,7 @@ enum irdma_cqp_op_type {
			(_ring).head = ((_ring).head + (_count)) % size; \
			(_retcode) = 0; \
		} else { \
			(_retcode) = IRDMA_ERR_RING_FULL; \
			(_retcode) = -ENOMEM; \
		} \
	}
#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
+46 −59
Original line number Diff line number Diff line
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h"
#include "status.h"
#include "hmc.h"
#include "defs.h"
#include "type.h"
@@ -121,10 +120,8 @@ static inline void irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_i
 * @type: paged or direct sd
 * @setsd: flag to set or clear sd
 */
enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
					u64 pa, u32 sd_idx,
					enum irdma_sd_entry_type type,
					bool setsd)
int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
		     enum irdma_sd_entry_type type, bool setsd)
{
	struct irdma_update_sds_info sdinfo;

@@ -145,16 +142,15 @@ enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
 * @sd_cnt: number of sd entries
 * @setsd: flag to set or clear sd
 */
static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
					       struct irdma_hmc_info *hmc_info,
					       u32 sd_index, u32 sd_cnt,
					       bool setsd)
static int irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
			    struct irdma_hmc_info *hmc_info, u32 sd_index,
			    u32 sd_cnt, bool setsd)
{
	struct irdma_hmc_sd_entry *sd_entry;
	struct irdma_update_sds_info sdinfo = {};
	u64 pa;
	u32 i;
	enum irdma_status_code ret_code = 0;
	int ret_code = 0;

	sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
	for (i = sd_index; i < sd_index + sd_cnt; i++) {
@@ -196,16 +192,15 @@ static enum irdma_status_code irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
 * @dev: pointer to the device structure
 * @info: create obj info
 */
static enum irdma_status_code
irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
static int irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
				       struct irdma_hmc_create_obj_info *info)
{
	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
		return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
		return -EINVAL;

	if ((info->start_idx + info->count) >
	    info->hmc_info->hmc_obj[info->rsrc_type].cnt)
		return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
		return -EINVAL;

	if (!info->add_sd_cnt)
		return 0;
@@ -222,8 +217,7 @@ irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
 * This will allocate memory for PDs and backing pages and populate
 * the sd and pd entries.
 */
enum irdma_status_code
irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
			    struct irdma_hmc_create_obj_info *info)
{
	struct irdma_hmc_sd_entry *sd_entry;
@@ -232,10 +226,10 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
	u32 pd_idx1 = 0, pd_lmt1 = 0;
	u32 i, j;
	bool pd_error = false;
	enum irdma_status_code ret_code = 0;
	int ret_code = 0;

	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
		return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
		return -EINVAL;

	if ((info->start_idx + info->count) >
	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
@@ -243,7 +237,7 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
			  "HMC: error type %u, start = %u, req cnt %u, cnt = %u\n",
			  info->rsrc_type, info->start_idx, info->count,
			  info->hmc_info->hmc_obj[info->rsrc_type].cnt);
		return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
		return -EINVAL;
	}

	irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
@@ -251,7 +245,7 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
				  &sd_lmt);
	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
		return IRDMA_ERR_INVALID_SD_INDEX;
		return -EINVAL;
	}

	irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
@@ -312,7 +306,7 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
			irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
			break;
		default:
			ret_code = IRDMA_ERR_INVALID_SD_TYPE;
			ret_code = -EINVAL;
			break;
		}
		j--;
@@ -327,12 +321,12 @@ irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
 * @info: dele obj info
 * @reset: true if called before reset
 */
static enum irdma_status_code
irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
			struct irdma_hmc_del_obj_info *info, bool reset)
static int irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
				   struct irdma_hmc_del_obj_info *info,
				   bool reset)
{
	struct irdma_hmc_sd_entry *sd_entry;
	enum irdma_status_code ret_code = 0;
	int ret_code = 0;
	u32 i, sd_idx;
	struct irdma_dma_mem *mem;

@@ -373,22 +367,21 @@ irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
 * caller should deallocate memory allocated previously for
 * book-keeping information about PDs and backing storage.
 */
enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
					    struct irdma_hmc_del_obj_info *info,
					    bool reset)
int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
			 struct irdma_hmc_del_obj_info *info, bool reset)
{
	struct irdma_hmc_pd_table *pd_table;
	u32 sd_idx, sd_lmt;
	u32 pd_idx, pd_lmt, rel_pd_idx;
	u32 i, j;
	enum irdma_status_code ret_code = 0;
	int ret_code = 0;

	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
		ibdev_dbg(to_ibdev(dev),
			  "HMC: error start_idx[%04d]  >= [type %04d].cnt[%04d]\n",
			  info->start_idx, info->rsrc_type,
			  info->hmc_info->hmc_obj[info->rsrc_type].cnt);
		return IRDMA_ERR_INVALID_HMC_OBJ_INDEX;
		return -EINVAL;
	}

	if ((info->start_idx + info->count) >
@@ -397,7 +390,7 @@ enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
			  "HMC: error start_idx[%04d] + count %04d  >= [type %04d].cnt[%04d]\n",
			  info->start_idx, info->count, info->rsrc_type,
			  info->hmc_info->hmc_obj[info->rsrc_type].cnt);
		return IRDMA_ERR_INVALID_HMC_OBJ_COUNT;
		return -EINVAL;
	}

	irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
@@ -433,7 +426,7 @@ enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
		ibdev_dbg(to_ibdev(dev), "HMC: invalid sd_idx\n");
		return IRDMA_ERR_INVALID_SD_INDEX;
		return -EINVAL;
	}

	for (i = sd_idx; i < sd_lmt; i++) {
@@ -477,11 +470,9 @@ enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
 * @type: what type of segment descriptor we're manipulating
 * @direct_mode_sz: size to alloc in direct mode
 */
enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
						struct irdma_hmc_info *hmc_info,
						u32 sd_index,
						enum irdma_sd_entry_type type,
						u64 direct_mode_sz)
int irdma_add_sd_table_entry(struct irdma_hw *hw,
			     struct irdma_hmc_info *hmc_info, u32 sd_index,
			     enum irdma_sd_entry_type type, u64 direct_mode_sz)
{
	struct irdma_hmc_sd_entry *sd_entry;
	struct irdma_dma_mem dma_mem;
@@ -499,7 +490,7 @@ enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
		dma_mem.va = dma_alloc_coherent(hw->device, dma_mem.size,
						&dma_mem.pa, GFP_KERNEL);
		if (!dma_mem.va)
			return IRDMA_ERR_NO_MEMORY;
			return -ENOMEM;
		if (type == IRDMA_SD_TYPE_PAGED) {
			struct irdma_virt_mem *vmem =
				&sd_entry->u.pd_table.pd_entry_virt_mem;
@@ -510,7 +501,7 @@ enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
				dma_free_coherent(hw->device, dma_mem.size,
						  dma_mem.va, dma_mem.pa);
				dma_mem.va = NULL;
				return IRDMA_ERR_NO_MEMORY;
				return -ENOMEM;
			}
			sd_entry->u.pd_table.pd_entry = vmem->va;

@@ -549,9 +540,8 @@ enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
 *	   aligned on 4K boundary and zeroed memory.
 *	2. It should be 4K in size.
 */
enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
						struct irdma_hmc_info *hmc_info,
						u32 pd_index,
int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
			     struct irdma_hmc_info *hmc_info, u32 pd_index,
			     struct irdma_dma_mem *rsrc_pg)
{
	struct irdma_hmc_pd_table *pd_table;
@@ -563,7 +553,7 @@ enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
	u64 page_desc;

	if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
		return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
		return -EINVAL;

	sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
	if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
@@ -584,7 +574,7 @@ enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
						      page->size, &page->pa,
						      GFP_KERNEL);
			if (!page->va)
				return IRDMA_ERR_NO_MEMORY;
				return -ENOMEM;

			pd_entry->rsrc_pg = false;
		}
@@ -621,9 +611,8 @@ enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
 *	1. Caller can deallocate the memory used by backing storage after this
 *	   function returns.
 */
enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
					  struct irdma_hmc_info *hmc_info,
					  u32 idx)
int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
		       struct irdma_hmc_info *hmc_info, u32 idx)
{
	struct irdma_hmc_pd_entry *pd_entry;
	struct irdma_hmc_pd_table *pd_table;
@@ -635,11 +624,11 @@ enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
	sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
	rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
	if (sd_idx >= hmc_info->sd_table.sd_cnt)
		return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
		return -EINVAL;

	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
	if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
		return IRDMA_ERR_INVALID_SD_TYPE;
		return -EINVAL;

	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
	pd_entry = &pd_table->pd_entry[rel_pd_idx];
@@ -656,7 +645,7 @@ enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
	if (!pd_entry->rsrc_pg) {
		mem = &pd_entry->bp.addr;
		if (!mem || !mem->va)
			return IRDMA_ERR_PARAM;
			return -EINVAL;

		dma_free_coherent(dev->hw->device, mem->size, mem->va,
				  mem->pa);
@@ -673,14 +662,13 @@ enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: the page index
 */
enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
					       u32 idx)
int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
{
	struct irdma_hmc_sd_entry *sd_entry;

	sd_entry = &hmc_info->sd_table.sd_entry[idx];
	if (--sd_entry->u.bp.use_cnt)
		return IRDMA_ERR_NOT_READY;
		return -EBUSY;

	hmc_info->sd_table.use_cnt--;
	sd_entry->valid = false;
@@ -693,15 +681,14 @@ enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
 * @hmc_info: pointer to the HMC configuration information structure
 * @idx: segment descriptor index to find the relevant page descriptor
 */
enum irdma_status_code
irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
{
	struct irdma_hmc_sd_entry *sd_entry;

	sd_entry = &hmc_info->sd_table.sd_entry[idx];

	if (sd_entry->u.pd_table.use_cnt)
		return IRDMA_ERR_NOT_READY;
		return -EBUSY;

	sd_entry->valid = false;
	hmc_info->sd_table.use_cnt--;
+21 −32
Original line number Diff line number Diff line
@@ -141,40 +141,29 @@ struct irdma_hmc_del_obj_info {
	bool privileged;
};

enum irdma_status_code irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
					  struct irdma_dma_mem *src_mem,
					  u64 src_offset, u64 size);
enum irdma_status_code
irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
int irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
		       struct irdma_dma_mem *src_mem, u64 src_offset, u64 size);
int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
			    struct irdma_hmc_create_obj_info *info);
enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
					    struct irdma_hmc_del_obj_info *info,
					    bool reset);
enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id,
					u64 pa, u32 sd_idx,
int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
			 struct irdma_hmc_del_obj_info *info, bool reset);
int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
		     enum irdma_sd_entry_type type,
		     bool setsd);
enum irdma_status_code
irdma_update_sds_noccq(struct irdma_sc_dev *dev,
int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
			   struct irdma_update_sds_info *info);
struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
					 u8 hmc_fn_id);
struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
						 u8 hmc_fn_id);
enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw,
						struct irdma_hmc_info *hmc_info,
						u32 sd_index,
						enum irdma_sd_entry_type type,
						u64 direct_mode_sz);
enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
						struct irdma_hmc_info *hmc_info,
						u32 pd_index,
int irdma_add_sd_table_entry(struct irdma_hw *hw,
			     struct irdma_hmc_info *hmc_info, u32 sd_index,
			     enum irdma_sd_entry_type type, u64 direct_mode_sz);
int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
			     struct irdma_hmc_info *hmc_info, u32 pd_index,
			     struct irdma_dma_mem *rsrc_pg);
enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev,
					  struct irdma_hmc_info *hmc_info,
					  u32 idx);
enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
					       u32 idx);
enum irdma_status_code
irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
		       struct irdma_hmc_info *hmc_info, u32 idx);
int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx);
int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
#endif /* IRDMA_HMC_H */
Loading