Commit 347e4e44 authored by Tony Battersby's avatar Tony Battersby Committed by Andrew Morton
Browse files

dmapool: cleanup integer types

To represent the size of a single allocation, dmapool currently uses
'unsigned int' in some places and 'size_t' in other places.  Standardize
on 'unsigned int' to reduce overhead, but use 'size_t' when counting all
the blocks in the entire pool.

Link: https://lkml.kernel.org/r/20230126215125.4069751-5-kbusch@meta.com


Signed-off-by: default avatarTony Battersby <tonyb@cybernetics.com>
Signed-off-by: default avatarKeith Busch <kbusch@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 65216545
Loading
Loading
Loading
Loading
+11 −8
Original line number Original line Diff line number Diff line
@@ -43,10 +43,10 @@
struct dma_pool {		/* the pool */
struct dma_pool {		/* the pool */
	struct list_head page_list;
	struct list_head page_list;
	spinlock_t lock;
	spinlock_t lock;
	size_t size;
	struct device *dev;
	struct device *dev;
	size_t allocation;
	unsigned int size;
	size_t boundary;
	unsigned int allocation;
	unsigned int boundary;
	char name[32];
	char name[32];
	struct list_head pools;
	struct list_head pools;
};
};
@@ -73,7 +73,7 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha
	mutex_lock(&pools_lock);
	mutex_lock(&pools_lock);
	list_for_each_entry(pool, &dev->dma_pools, pools) {
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned pages = 0;
		unsigned blocks = 0;
		size_t blocks = 0;


		spin_lock_irq(&pool->lock);
		spin_lock_irq(&pool->lock);
		list_for_each_entry(page, &pool->page_list, page_list) {
		list_for_each_entry(page, &pool->page_list, page_list) {
@@ -83,9 +83,10 @@ static ssize_t pools_show(struct device *dev, struct device_attribute *attr, cha
		spin_unlock_irq(&pool->lock);
		spin_unlock_irq(&pool->lock);


		/* per-pool info, no real statistics yet */
		/* per-pool info, no real statistics yet */
		size += sysfs_emit_at(buf, size, "%-16s %4u %4zu %4zu %2u\n",
		size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2u\n",
				      pool->name, blocks,
				      pool->name, blocks,
				      pages * (pool->allocation / pool->size),
				      (size_t) pages *
				      (pool->allocation / pool->size),
				      pool->size, pages);
				      pool->size, pages);
	}
	}
	mutex_unlock(&pools_lock);
	mutex_unlock(&pools_lock);
@@ -133,7 +134,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
	else if (align & (align - 1))
	else if (align & (align - 1))
		return NULL;
		return NULL;


	if (size == 0)
	if (size == 0 || size > INT_MAX)
		return NULL;
		return NULL;
	else if (size < 4)
	else if (size < 4)
		size = 4;
		size = 4;
@@ -146,6 +147,8 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
	else if ((boundary < size) || (boundary & (boundary - 1)))
	else if ((boundary < size) || (boundary & (boundary - 1)))
		return NULL;
		return NULL;


	boundary = min(boundary, allocation);

	retval = kmalloc(sizeof(*retval), GFP_KERNEL);
	retval = kmalloc(sizeof(*retval), GFP_KERNEL);
	if (!retval)
	if (!retval)
		return retval;
		return retval;
@@ -306,7 +309,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
{
{
	unsigned long flags;
	unsigned long flags;
	struct dma_page *page;
	struct dma_page *page;
	size_t offset;
	unsigned int offset;
	void *retval;
	void *retval;


	might_alloc(mem_flags);
	might_alloc(mem_flags);