Commit bf2746e8 authored by Shay Agroskin's avatar Shay Agroskin Committed by David S. Miller
Browse files

net: ena: Capitalize all log strings and improve code readability



Capitalize all log strings printed by the ena driver to make their
format uniform across it.

Also fix indentation, spelling mistakes and comments to improve code
readability. This also includes adding comments to macros/enums whose
purpose might be difficult to understand.
Separate some code into functions to make it easier to understand the
purpose of these lines.

Signed-off-by: default avatarAmit Bernstein <amitbern@amazon.com>
Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f0525298
Loading
Loading
Loading
Loading
+27 −26
Original line number Diff line number Diff line
@@ -28,6 +28,7 @@ enum ena_admin_aq_completion_status {
	ENA_ADMIN_RESOURCE_BUSY                     = 7,
};

/* subcommands for the set/get feature admin commands */
enum ena_admin_aq_feature_id {
	ENA_ADMIN_DEVICE_ATTRIBUTES                 = 1,
	ENA_ADMIN_MAX_QUEUES_NUM                    = 2,
@@ -213,8 +214,8 @@ struct ena_admin_aq_create_sq_cmd {
	 */
	u8 sq_caps_3;

	/* associated completion queue id. This CQ must be created prior to
	 *    SQ creation
	/* associated completion queue id. This CQ must be created prior to SQ
	 * creation
	 */
	u16 cq_idx;

@@ -448,7 +449,9 @@ struct ena_admin_device_attr_feature_desc {

	u32 device_version;

	/* bitmap of ena_admin_aq_feature_id */
	/* bitmap of ena_admin_aq_feature_id, which represents supported
	 * subcommands for the set/get feature admin commands.
	 */
	u32 supported_features;

	u32 reserved3;
@@ -534,32 +537,30 @@ struct ena_admin_feature_llq_desc {

	u32 max_llq_depth;

	/*  specify the header locations the device supports. bitfield of
	 *    enum ena_admin_llq_header_location.
	/* specify the header locations the device supports. bitfield of enum
	 * ena_admin_llq_header_location.
	 */
	u16 header_location_ctrl_supported;

	/* the header location the driver selected to use. */
	u16 header_location_ctrl_enabled;

	/* if inline header is specified - this is the size of descriptor
	 *    list entry. If header in a separate ring is specified - this is
	 *    the size of header ring entry. bitfield of enum
	 *    ena_admin_llq_ring_entry_size. specify the entry sizes the device
	 *    supports
	/* if inline header is specified - this is the size of descriptor list
	 * entry. If header in a separate ring is specified - this is the size
	 * of header ring entry. bitfield of enum ena_admin_llq_ring_entry_size.
	 * specify the entry sizes the device supports
	 */
	u16 entry_size_ctrl_supported;

	/* the entry size the driver selected to use. */
	u16 entry_size_ctrl_enabled;

	/* valid only if inline header is specified. First entry associated
	 *    with the packet includes descriptors and header. Rest of the
	 *    entries occupied by descriptors. This parameter defines the max
	 *    number of descriptors precedding the header in the first entry.
	 *    The field is bitfield of enum
	 *    ena_admin_llq_num_descs_before_header and specify the values the
	 *    device supports
	/* valid only if inline header is specified. First entry associated with
	 * the packet includes descriptors and header. Rest of the entries
	 * occupied by descriptors. This parameter defines the max number of
	 * descriptors precedding the header in the first entry. The field is
	 * bitfield of enum ena_admin_llq_num_descs_before_header and specify
	 * the values the device supports
	 */
	u16 desc_num_before_header_supported;

@@ -602,8 +603,8 @@ struct ena_admin_queue_ext_feature_fields {

	u32 max_tx_header_size;

	/* Maximum Descriptors number, including meta descriptor, allowed for
	 * a single Tx packet
	/* Maximum Descriptors number, including meta descriptor, allowed for a
	 * single Tx packet
	 */
	u16 max_per_packet_tx_descs;

@@ -626,8 +627,8 @@ struct ena_admin_queue_feature_desc {

	u32 max_header_size;

	/* Maximum Descriptors number, including meta descriptor, allowed for
	 *    a single Tx packet
	/* Maximum Descriptors number, including meta descriptor, allowed for a
	 * single Tx packet
	 */
	u16 max_packet_tx_descs;

@@ -1015,7 +1016,7 @@ struct ena_admin_set_feat_resp {
struct ena_admin_aenq_common_desc {
	u16 group;

	u16 syndrom;
	u16 syndrome;

	/* 0 : phase
	 * 7:1 : reserved - MBZ
@@ -1039,7 +1040,7 @@ enum ena_admin_aenq_group {
	ENA_ADMIN_AENQ_GROUPS_NUM                   = 5,
};

enum ena_admin_aenq_notification_syndrom {
enum ena_admin_aenq_notification_syndrome {
	ENA_ADMIN_SUSPEND                           = 0,
	ENA_ADMIN_RESUME                            = 1,
	ENA_ADMIN_UPDATE_HINTS                      = 2,
+93 −78
Original line number Diff line number Diff line
@@ -71,7 +71,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
				       dma_addr_t addr)
{
	if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
		pr_err("dma address has more bits that the device supports\n");
		pr_err("DMA address has more bits that the device supports\n");
		return -EINVAL;
	}

@@ -81,16 +81,16 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
	return 0;
}

static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
{
	struct ena_com_admin_sq *sq = &queue->sq;
	u16 size = ADMIN_SQ_SIZE(queue->q_depth);
	struct ena_com_admin_sq *sq = &admin_queue->sq;
	u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);

	sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
					 GFP_KERNEL);
	sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
					 &sq->dma_addr, GFP_KERNEL);

	if (!sq->entries) {
		pr_err("memory allocation failed\n");
		pr_err("Memory allocation failed\n");
		return -ENOMEM;
	}

@@ -103,16 +103,16 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
	return 0;
}

static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
{
	struct ena_com_admin_cq *cq = &queue->cq;
	u16 size = ADMIN_CQ_SIZE(queue->q_depth);
	struct ena_com_admin_cq *cq = &admin_queue->cq;
	u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);

	cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
					 GFP_KERNEL);
	cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
					 &cq->dma_addr, GFP_KERNEL);

	if (!cq->entries) {
		pr_err("memory allocation failed\n");
		pr_err("Memory allocation failed\n");
		return -ENOMEM;
	}

@@ -122,20 +122,20 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
	return 0;
}

static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
				   struct ena_aenq_handlers *aenq_handlers)
{
	struct ena_com_aenq *aenq = &dev->aenq;
	struct ena_com_aenq *aenq = &ena_dev->aenq;
	u32 addr_low, addr_high, aenq_caps;
	u16 size;

	dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
	ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
	size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
	aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
					   GFP_KERNEL);
	aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
					   &aenq->dma_addr, GFP_KERNEL);

	if (!aenq->entries) {
		pr_err("memory allocation failed\n");
		pr_err("Memory allocation failed\n");
		return -ENOMEM;
	}

@@ -145,18 +145,18 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
	addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
	addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);

	writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
	writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
	writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
	writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);

	aenq_caps = 0;
	aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
	aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
	aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
		      << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
		     ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
	writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
	writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);

	if (unlikely(!aenq_handlers)) {
		pr_err("aenq handlers pointer is NULL\n");
		pr_err("AENQ handlers pointer is NULL\n");
		return -EINVAL;
	}

@@ -172,31 +172,31 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
	atomic_dec(&queue->outstanding_cmds);
}

static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
					  u16 command_id, bool capture)
{
	if (unlikely(command_id >= queue->q_depth)) {
		pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
		       command_id, queue->q_depth);
	if (unlikely(command_id >= admin_queue->q_depth)) {
		pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n",
		       command_id, admin_queue->q_depth);
		return NULL;
	}

	if (unlikely(!queue->comp_ctx)) {
	if (unlikely(!admin_queue->comp_ctx)) {
		pr_err("Completion context is NULL\n");
		return NULL;
	}

	if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
	if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
		pr_err("Completion context is occupied\n");
		return NULL;
	}

	if (capture) {
		atomic_inc(&queue->outstanding_cmds);
		queue->comp_ctx[command_id].occupied = true;
		atomic_inc(&admin_queue->outstanding_cmds);
		admin_queue->comp_ctx[command_id].occupied = true;
	}

	return &queue->comp_ctx[command_id];
	return &admin_queue->comp_ctx[command_id];
}

static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
@@ -217,7 +217,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
	/* In case of queue FULL */
	cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
	if (cnt >= admin_queue->q_depth) {
		pr_debug("admin queue is full.\n");
		pr_debug("Admin queue is full.\n");
		admin_queue->stats.out_of_space++;
		return ERR_PTR(-ENOSPC);
	}
@@ -257,20 +257,21 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
	return comp_ctx;
}

static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
{
	size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
	size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
	struct ena_comp_ctx *comp_ctx;
	u16 i;

	queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
	if (unlikely(!queue->comp_ctx)) {
		pr_err("memory allocation failed\n");
	admin_queue->comp_ctx =
		devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
	if (unlikely(!admin_queue->comp_ctx)) {
		pr_err("Memory allocation failed\n");
		return -ENOMEM;
	}

	for (i = 0; i < queue->q_depth; i++) {
		comp_ctx = get_comp_ctxt(queue, i, false);
	for (i = 0; i < admin_queue->q_depth; i++) {
		comp_ctx = get_comp_ctxt(admin_queue, i, false);
		if (comp_ctx)
			init_completion(&comp_ctx->wait_event);
	}
@@ -336,7 +337,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
		}

		if (!io_sq->desc_addr.virt_addr) {
			pr_err("memory allocation failed\n");
			pr_err("Memory allocation failed\n");
			return -ENOMEM;
		}
	}
@@ -362,7 +363,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
				devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);

		if (!io_sq->bounce_buf_ctrl.base_buffer) {
			pr_err("bounce buffer memory allocation failed\n");
			pr_err("Bounce buffer memory allocation failed\n");
			return -ENOMEM;
		}

@@ -422,7 +423,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
	}

	if (!io_cq->cdesc_addr.virt_addr) {
		pr_err("memory allocation failed\n");
		pr_err("Memory allocation failed\n");
		return -ENOMEM;
	}

@@ -498,7 +499,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
static int ena_com_comp_status_to_errno(u8 comp_status)
{
	if (unlikely(comp_status != 0))
		pr_err("admin command failed[%u]\n", comp_status);
		pr_err("Admin command failed[%u]\n", comp_status);

	switch (comp_status) {
	case ENA_ADMIN_SUCCESS:
@@ -690,7 +691,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
		/* The desc list entry size should be whole multiply of 8
		 * This requirement comes from __iowrite64_copy()
		 */
		pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size);
		pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size);
		return -EINVAL;
	}

@@ -831,7 +832,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
	}

	if (unlikely(i == timeout)) {
		pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
		pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
		       mmio_read->seq_num, offset, read_resp->req_id,
		       read_resp->reg_off);
		ret = ENA_MMIO_READ_TIMEOUT;
@@ -898,7 +899,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
					    sizeof(destroy_resp));

	if (unlikely(ret && (ret != -ENODEV)))
		pr_err("failed to destroy io sq error: %d\n", ret);
		pr_err("Failed to destroy io sq error: %d\n", ret);

	return ret;
}
@@ -1007,7 +1008,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
				   &get_cmd.control_buffer.address,
				   control_buf_dma_addr);
	if (unlikely(ret)) {
		pr_err("memory address set failed\n");
		pr_err("Memory address set failed\n");
		return ret;
	}

@@ -1128,7 +1129,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,

	if ((get_resp.u.ind_table.min_size > log_size) ||
	    (get_resp.u.ind_table.max_size < log_size)) {
		pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
		pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
		       1 << log_size, 1 << get_resp.u.ind_table.min_size,
		       1 << get_resp.u.ind_table.max_size);
		return -EINVAL;
@@ -1221,7 +1222,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
					   &create_cmd.sq_ba,
					   io_sq->desc_addr.phys_addr);
		if (unlikely(ret)) {
			pr_err("memory address set failed\n");
			pr_err("Memory address set failed\n");
			return ret;
		}
	}
@@ -1250,7 +1251,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
			cmd_completion.llq_descriptors_offset);
	}

	pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
	pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);

	return ret;
}
@@ -1363,7 +1364,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
				   &create_cmd.cq_ba,
				   io_cq->cdesc_addr.phys_addr);
	if (unlikely(ret)) {
		pr_err("memory address set failed\n");
		pr_err("Memory address set failed\n");
		return ret;
	}

@@ -1392,7 +1393,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
			(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
			cmd_completion.numa_node_register_offset);

	pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
	pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);

	return ret;
}
@@ -1585,12 +1586,12 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
		return -ETIME;
	}

	pr_info("ena device version: %d.%d\n",
	pr_info("ENA device version: %d.%d\n",
		(ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
			ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
		ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);

	pr_info("ena controller version: %d.%d.%d implementation version %d\n",
	pr_info("ENA controller version: %d.%d.%d implementation version %d\n",
		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
			ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
		(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
@@ -1613,6 +1614,19 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
	return 0;
}

static void
ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
				      struct ena_com_admin_queue *admin_queue)

{
	if (!admin_queue->comp_ctx)
		return;

	devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);

	admin_queue->comp_ctx = NULL;
}

void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
{
	struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
@@ -1621,9 +1635,8 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
	struct ena_com_aenq *aenq = &ena_dev->aenq;
	u16 size;

	if (admin_queue->comp_ctx)
		devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
	admin_queue->comp_ctx = NULL;
	ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);

	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
	if (sq->entries)
		dma_free_coherent(ena_dev->dmadev, size, sq->entries,
@@ -1901,6 +1914,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,

	memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
	       sizeof(get_resp.u.dev_attr));

	ena_dev->supported_features = get_resp.u.dev_attr.supported_features;

	if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
@@ -1979,10 +1993,10 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
/* ena_handle_specific_aenq_event:
 * return the handler that is relevant to the specific event group
 */
static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
						     u16 group)
{
	struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
	struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;

	if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
		return aenq_handlers->handlers[group];
@@ -1994,11 +2008,11 @@ static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
 * handles the aenq incoming events.
 * pop events from the queue and apply the specific handler
 */
void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
{
	struct ena_admin_aenq_entry *aenq_e;
	struct ena_admin_aenq_common_desc *aenq_common;
	struct ena_com_aenq *aenq  = &dev->aenq;
	struct ena_com_aenq *aenq  = &ena_dev->aenq;
	u64 timestamp;
	ena_aenq_handler handler_cb;
	u16 masked_head, processed = 0;
@@ -2019,11 +2033,12 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)

		timestamp = (u64)aenq_common->timestamp_low |
			((u64)aenq_common->timestamp_high << 32);
		pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
			 aenq_common->group, aenq_common->syndrom, timestamp);

		pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
			 aenq_common->group, aenq_common->syndrome, timestamp);

		/* Handle specific event*/
		handler_cb = ena_com_get_specific_aenq_cb(dev,
		handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
							  aenq_common->group);
		handler_cb(data, aenq_e); /* call the actual event handler*/

@@ -2048,7 +2063,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)

	/* write the aenq doorbell after all AENQ descriptors were read */
	mb();
	writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
	writel_relaxed((u32)aenq->head,
		       ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}

int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2261,7 +2277,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
				   &cmd.control_buffer.address,
				   rss->hash_key_dma_addr);
	if (unlikely(ret)) {
		pr_err("memory address set failed\n");
		pr_err("Memory address set failed\n");
		return ret;
	}

@@ -2430,7 +2446,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
				   &cmd.control_buffer.address,
				   rss->hash_ctrl_dma_addr);
	if (unlikely(ret)) {
		pr_err("memory address set failed\n");
		pr_err("Memory address set failed\n");
		return ret;
	}
	cmd.control_buffer.length = sizeof(*hash_ctrl);
@@ -2491,7 +2507,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
		available_fields = hash_ctrl->selected_fields[i].fields &
				hash_ctrl->supported_fields[i].fields;
		if (available_fields != hash_ctrl->selected_fields[i].fields) {
			pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
			pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
			       i, hash_ctrl->supported_fields[i].fields,
			       hash_ctrl->selected_fields[i].fields);
			return -EOPNOTSUPP;
@@ -2529,7 +2545,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
	/* Make sure all the fields are supported */
	supported_fields = hash_ctrl->supported_fields[proto].fields;
	if ((hash_fields & supported_fields) != hash_fields) {
		pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
		pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n",
		       proto, hash_fields, supported_fields);
	}

@@ -2594,7 +2610,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
				   &cmd.control_buffer.address,
				   rss->rss_ind_tbl_dma_addr);
	if (unlikely(ret)) {
		pr_err("memory address set failed\n");
		pr_err("Memory address set failed\n");
		return ret;
	}

@@ -2707,8 +2723,7 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,

	host_attr->debug_area_virt_addr =
		dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
				   &host_attr->debug_area_dma_addr,
				   GFP_KERNEL);
				   &host_attr->debug_area_dma_addr, GFP_KERNEL);
	if (unlikely(!host_attr->debug_area_virt_addr)) {
		host_attr->debug_area_size = 0;
		return -ENOMEM;
@@ -2765,7 +2780,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
				   &cmd.u.host_attr.debug_ba,
				   host_attr->debug_area_dma_addr);
	if (unlikely(ret)) {
		pr_err("memory address set failed\n");
		pr_err("Memory address set failed\n");
		return ret;
	}

@@ -2773,7 +2788,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
				   &cmd.u.host_attr.os_info_ba,
				   host_attr->host_info_dma_addr);
	if (unlikely(ret)) {
		pr_err("memory address set failed\n");
		pr_err("Memory address set failed\n");
		return ret;
	}

@@ -2892,7 +2907,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
		(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));

	if (unlikely(ena_dev->tx_max_header_size == 0)) {
		pr_err("the size of the LLQ entry is smaller than needed\n");
		pr_err("The size of the LLQ entry is smaller than needed\n");
		return -EINVAL;
	}

+1 −1
Original line number Diff line number Diff line
@@ -509,7 +509,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
 * This method goes over the async event notification queue and calls the proper
 * aenq handler.
 */
void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);

/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
 * @ena_dev: ENA communication layer struct
+19 −17
Original line number Diff line number Diff line
@@ -18,7 +18,8 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
	cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
			+ (head_masked * io_cq->cdesc_entry_size_in_bytes));

	desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
	desc_phase = (READ_ONCE(cdesc->status) &
		      ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
		     ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;

	if (desc_phase != expected_phase)
@@ -62,7 +63,7 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
		}

		io_sq->entries_in_tx_burst_left--;
		pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
		pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n",
			 io_sq->qid, io_sq->entries_in_tx_burst_left);
	}

@@ -101,12 +102,12 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,

	if (unlikely((header_offset + header_len) >
		     llq_info->desc_list_entry_size)) {
		pr_err("trying to write header larger than llq entry can accommodate\n");
		pr_err("Trying to write header larger than llq entry can accommodate\n");
		return -EFAULT;
	}

	if (unlikely(!bounce_buffer)) {
		pr_err("bounce buffer is NULL\n");
		pr_err("Bounce buffer is NULL\n");
		return -EFAULT;
	}

@@ -124,7 +125,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
	bounce_buffer = pkt_ctrl->curr_bounce_buf;

	if (unlikely(!bounce_buffer)) {
		pr_err("bounce buffer is NULL\n");
		pr_err("Bounce buffer is NULL\n");
		return NULL;
	}

@@ -235,7 +236,8 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,

		ena_com_cq_inc_head(io_cq);
		count++;
		last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
		last = (READ_ONCE(cdesc->status) &
			ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
		       ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
	} while (!last);

@@ -248,7 +250,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
		io_cq->cur_rx_pkt_cdesc_count = 0;
		io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;

		pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
		pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
			 io_cq->qid, *first_cdesc_idx, count);
	} else {
		io_cq->cur_rx_pkt_cdesc_count += count;
@@ -352,7 +354,7 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
		(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
		ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;

	pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
	pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
		 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
		 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
		 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
@@ -385,7 +387,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
	}

	if (unlikely(header_len > io_sq->tx_max_header_size)) {
		pr_err("header size is too large %d max header: %d\n",
		pr_err("Header size is too large %d max header: %d\n",
		       header_len, io_sq->tx_max_header_size);
		return -EINVAL;
	}
@@ -400,7 +402,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,

	rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
	if (unlikely(rc)) {
		pr_err("failed to create and store tx meta desc\n");
		pr_err("Failed to create and store tx meta desc\n");
		return rc;
	}

@@ -523,7 +525,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
		return 0;
	}

	pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
	pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
		 nb_hw_desc);

	if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+3 −3
Original line number Diff line number Diff line
@@ -140,7 +140,7 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
						   llq_info->descs_per_entry);
	}

	pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
	pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
		 num_descs, num_entries_needed);

	return num_entries_needed > io_sq->entries_in_tx_burst_left;
@@ -151,13 +151,13 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
	u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
	u16 tail = io_sq->tail;

	pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
	pr_debug("Write submission queue doorbell for queue: %d tail: %d\n",
		 io_sq->qid, tail);

	writel(tail, io_sq->db_addr);

	if (is_llq_max_tx_burst_exists(io_sq)) {
		pr_debug("reset available entries in tx burst for queue %d to %d\n",
		pr_debug("Reset available entries in tx burst for queue %d to %d\n",
			 io_sq->qid, max_entries_in_tx_burst);
		io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
	}
Loading