Commit 2739bd76 authored by David S. Miller's avatar David S. Miller
Browse files

Merge branch 'ipa-kill-validation'

Alex Elder says:

====================
net: ipa: kill IPA_VALIDATION

A few months ago I proposed cleaning up some code that validates
certain things conditionally, arguing that doing so once is enough,
thus doing so always should not be necessary.
  https://lore.kernel.org/netdev/20210320141729.1956732-1-elder@linaro.org/


Leon Romanovsky felt strongly that this was a mistake, and in the
end I agreed to change my plans.

This series finally completes what I said I would do about this,
ultimately eliminating the IPA_VALIDATION symbol and conditional
code entirely.

The first patch both extends and simplifies some validation done for
IPA immediate commands, and performs those tests unconditionally.

The second patch fixes a bug that wasn't normally exposed because of
the conditional compilation (a reason Leon was right about this).
It makes filter and routing table validation occur unconditionally.

The third eliminates the remaining conditionally-defined code and
removes the line in the Makefile used to enable validation.

And the fourth removes all comments containing ipa_assert()
statements, replacing most of them with WARN_ON() calls.
====================

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents beeee08c 5bc55884
Loading
Loading
Loading
Loading
+0 −3
Original line number Diff line number Diff line
# Un-comment the next line if you want to validate configuration data
#ccflags-y		+=	-DIPA_VALIDATE

obj-$(CONFIG_QCOM_IPA)	+=	ipa.o

ipa-y			:=	ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
+0 −2
Original line number Diff line number Diff line
@@ -1964,7 +1964,6 @@ static void gsi_evt_ring_init(struct gsi *gsi)
static bool gsi_channel_data_valid(struct gsi *gsi,
				   const struct ipa_gsi_endpoint_data *data)
{
#ifdef IPA_VALIDATION
	u32 channel_id = data->channel_id;
	struct device *dev = gsi->dev;

@@ -2010,7 +2009,6 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
			channel_id, data->channel.event_count);
		return false;
	}
#endif /* IPA_VALIDATION */

	return true;
}
+18 −16
Original line number Diff line number Diff line
@@ -90,14 +90,12 @@ int gsi_trans_pool_init(struct gsi_trans_pool *pool, size_t size, u32 count,
{
	void *virt;

#ifdef IPA_VALIDATE
	if (!size)
		return -EINVAL;
	if (count < max_alloc)
		return -EINVAL;
	if (!max_alloc)
		return -EINVAL;
#endif /* IPA_VALIDATE */

	/* By allocating a few extra entries in our pool (one less
	 * than the maximum number that will be requested in a
@@ -140,14 +138,12 @@ int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
	dma_addr_t addr;
	void *virt;

#ifdef IPA_VALIDATE
	if (!size)
		return -EINVAL;
	if (count < max_alloc)
		return -EINVAL;
	if (!max_alloc)
		return -EINVAL;
#endif /* IPA_VALIDATE */

	/* Don't let allocations cross a power-of-two boundary */
	size = __roundup_pow_of_two(size);
@@ -188,8 +184,8 @@ static u32 gsi_trans_pool_alloc_common(struct gsi_trans_pool *pool, u32 count)
{
	u32 offset;

	/* assert(count > 0); */
	/* assert(count <= pool->max_alloc); */
	WARN_ON(!count);
	WARN_ON(count > pool->max_alloc);

	/* Allocate from beginning if wrap would occur */
	if (count > pool->count - pool->free)
@@ -225,9 +221,10 @@ void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
{
	void *end = pool->base + pool->count * pool->size;

	/* assert(element >= pool->base); */
	/* assert(element < end); */
	/* assert(pool->max_alloc == 1); */
	WARN_ON(element < pool->base);
	WARN_ON(element >= end);
	WARN_ON(pool->max_alloc != 1);

	element += pool->size;

	return element < end ? element : pool->base;
@@ -332,7 +329,8 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
	struct gsi_trans_info *trans_info;
	struct gsi_trans *trans;

	/* assert(tre_count <= gsi_channel_trans_tre_max(gsi, channel_id)); */
	if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id)))
		return NULL;

	trans_info = &channel->trans_info;

@@ -408,7 +406,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
	u32 which = trans->used++;
	struct scatterlist *sg;

	/* assert(which < trans->tre_count); */
	WARN_ON(which >= trans->tre_count);

	/* Commands are quite different from data transfer requests.
	 * Their payloads come from a pool whose memory is allocated
@@ -441,8 +439,10 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
	struct scatterlist *sg = &trans->sgl[0];
	int ret;

	/* assert(trans->tre_count == 1); */
	/* assert(!trans->used); */
	if (WARN_ON(trans->tre_count != 1))
		return -EINVAL;
	if (WARN_ON(trans->used))
		return -EINVAL;

	sg_set_page(sg, page, size, offset);
	ret = dma_map_sg(trans->gsi->dev, sg, 1, trans->direction);
@@ -461,8 +461,10 @@ int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
	u32 used;
	int ret;

	/* assert(trans->tre_count == 1); */
	/* assert(!trans->used); */
	if (WARN_ON(trans->tre_count != 1))
		return -EINVAL;
	if (WARN_ON(trans->used))
		return -EINVAL;

	/* skb->len will not be 0 (checked early) */
	ret = skb_to_sgvec(skb, sg, 0, skb->len);
@@ -550,7 +552,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
	u32 avail;
	u32 i;

	/* assert(trans->used > 0); */
	WARN_ON(!trans->used);

	/* Consume the entries.  If we cross the end of the ring while
	 * filling them we'll switch to the beginning to finish.
+30 −21
Original line number Diff line number Diff line
@@ -159,35 +159,49 @@ static void ipa_cmd_validate_build(void)
	BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
#undef TABLE_COUNT_MAX
#undef TABLE_SIZE
}

#ifdef IPA_VALIDATE
	/* Hashed and non-hashed fields are assumed to be the same size */
	BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
		     field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
	BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
		     field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));

	/* Valid endpoint numbers must fit in the IP packet init command */
	BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
		     IPA_ENDPOINT_MAX - 1);
}

/* Validate a memory region holding a table */
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
			 bool route, bool ipv6, bool hashed)
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
{
	u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
	u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
	const char *table = route ? "route" : "filter";
	struct device *dev = &ipa->pdev->dev;
	u32 offset_max;

	offset_max = hashed ? field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK)
			    : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
	/* Size must fit in the immediate command field that holds it */
	if (mem->size > size_max) {
		dev_err(dev, "%s table region size too large\n", table);
		dev_err(dev, "    (0x%04x > 0x%04x)\n",
			mem->size, size_max);

		return false;
	}

	/* Offset must fit in the immediate command field that holds it */
	if (mem->offset > offset_max ||
	    ipa->mem_offset > offset_max - mem->offset) {
		dev_err(dev, "IPv%c %s%s table region offset too large\n",
			ipv6 ? '6' : '4', hashed ? "hashed " : "",
			route ? "route" : "filter");
		dev_err(dev, "%s table region offset too large\n", table);
		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
			ipa->mem_offset, mem->offset, offset_max);

		return false;
	}

	/* Entire memory range must fit within IPA-local memory */
	if (mem->offset > ipa->mem_size ||
	    mem->size > ipa->mem_size - mem->offset) {
		dev_err(dev, "IPv%c %s%s table region out of range\n",
			ipv6 ? '6' : '4', hashed ? "hashed " : "",
			route ? "route" : "filter");
		dev_err(dev, "%s table region out of range\n", table);
		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
			mem->offset, mem->size, ipa->mem_size);

@@ -331,7 +345,6 @@ bool ipa_cmd_data_valid(struct ipa *ipa)
	return true;
}

#endif /* IPA_VALIDATE */

int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
@@ -522,9 +535,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
	union ipa_cmd_payload *cmd_payload;
	dma_addr_t payload_addr;

	/* assert(endpoint_id <
		  field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK)); */

	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
	payload = &cmd_payload->ip_packet_init;

@@ -548,8 +558,9 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
	u16 flags;

	/* size and offset must fit in 16 bit fields */
	/* assert(size > 0 && size <= U16_MAX); */
	/* assert(offset <= U16_MAX && ipa->mem_offset <= U16_MAX - offset); */
	WARN_ON(!size);
	WARN_ON(size > U16_MAX);
	WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);

	offset += ipa->mem_offset;

@@ -588,8 +599,6 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
	union ipa_cmd_payload *cmd_payload;
	dma_addr_t payload_addr;

	/* assert(tag <= field_max(IP_PACKET_TAG_STATUS_TAG_FMASK)); */

	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
	payload = &cmd_payload->ip_packet_tag_status;

+1 −21
Original line number Diff line number Diff line
@@ -57,20 +57,16 @@ struct ipa_cmd_info {
	enum dma_data_direction direction;
};

#ifdef IPA_VALIDATE

/**
 * ipa_cmd_table_valid() - Validate a memory region holding a table
 * @ipa:	- IPA pointer
 * @mem:	- IPA memory region descriptor
 * @route:	- Whether the region holds a route or filter table
 * @ipv6:	- Whether the table is for IPv6 or IPv4
 * @hashed:	- Whether the table is hashed or non-hashed
 *
 * Return:	true if region is valid, false otherwise
 */
bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
			    bool route, bool ipv6, bool hashed);
			    bool route);

/**
 * ipa_cmd_data_valid() - Validate command-realted configuration is valid
@@ -80,22 +76,6 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
 */
bool ipa_cmd_data_valid(struct ipa *ipa);

#else /* !IPA_VALIDATE */

static inline bool ipa_cmd_table_valid(struct ipa *ipa,
				       const struct ipa_mem *mem, bool route,
				       bool ipv6, bool hashed)
{
	return true;
}

static inline bool ipa_cmd_data_valid(struct ipa *ipa)
{
	return true;
}

#endif /* !IPA_VALIDATE */

/**
 * ipa_cmd_pool_init() - initialize command channel pools
 * @channel:	AP->IPA command TX GSI channel pointer
Loading