Commit 32e92d9f authored by John Garry's avatar John Garry Committed by Joerg Roedel
Browse files

iommu/iova: Separate out rcache init



Currently the rcache structures are allocated for all IOVA domains, even if
they do not use "fast" alloc+free interface. This is wasteful of memory.

In addition, fails in init_iova_rcaches() are not handled safely, which is
less than ideal.

Make "fast" users call a separate rcache init explicitly, which includes
error checking.

Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Acked-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Link: https://lore.kernel.org/r/1643882360-241739-1-git-send-email-john.garry@huawei.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 114a6f50
Loading
Loading
Loading
Loading
+4 −0
Original line number Diff line number Diff line
@@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
	struct iommu_dma_cookie *cookie = domain->iova_cookie;
	unsigned long order, base_pfn;
	struct iova_domain *iovad;
	int ret;

	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
		return -EINVAL;
@@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
	}

	init_iova_domain(iovad, 1UL << order, base_pfn);
	ret = iova_domain_init_rcaches(iovad);
	if (ret)
		return ret;

	/* If the FQ fails we can simply fall back to strict mode */
	if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
+59 −14
Original line number Diff line number Diff line
@@ -15,13 +15,14 @@
/* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR	~0UL

#define IOVA_RANGE_CACHE_MAX_SIZE 6	/* log of max cached IOVA range size (in pages) */

static bool iova_rcache_insert(struct iova_domain *iovad,
			       unsigned long pfn,
			       unsigned long size);
static unsigned long iova_rcache_get(struct iova_domain *iovad,
				     unsigned long size,
				     unsigned long limit_pfn);
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);

@@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
	iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
	rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
	rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
	cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
	init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);

@@ -488,6 +487,13 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
}
EXPORT_SYMBOL_GPL(free_iova_fast);

static void iova_domain_free_rcaches(struct iova_domain *iovad)
{
	cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
					    &iovad->cpuhp_dead);
	free_iova_rcaches(iovad);
}

/**
 * put_iova_domain - destroys the iova domain
 * @iovad: - iova domain in question.
@@ -497,9 +503,9 @@ void put_iova_domain(struct iova_domain *iovad)
{
	struct iova *iova, *tmp;

	cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
					    &iovad->cpuhp_dead);
	free_iova_rcaches(iovad);
	if (iovad->rcaches)
		iova_domain_free_rcaches(iovad);

	rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
		free_iova_mem(iova);
}
@@ -608,6 +614,7 @@ EXPORT_SYMBOL_GPL(reserve_iova);
 */

#define IOVA_MAG_SIZE 128
#define MAX_GLOBAL_MAGS 32	/* magazines per bin */

struct iova_magazine {
	unsigned long size;
@@ -620,6 +627,13 @@ struct iova_cpu_rcache {
	struct iova_magazine *prev;
};

struct iova_rcache {
	spinlock_t lock;
	unsigned long depot_size;
	struct iova_magazine *depot[MAX_GLOBAL_MAGS];
	struct iova_cpu_rcache __percpu *cpu_rcaches;
};

static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
{
	return kzalloc(sizeof(struct iova_magazine), flags);
@@ -693,29 +707,55 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
	mag->pfns[mag->size++] = pfn;
}

static void init_iova_rcaches(struct iova_domain *iovad)
int iova_domain_init_rcaches(struct iova_domain *iovad)
{
	struct iova_cpu_rcache *cpu_rcache;
	struct iova_rcache *rcache;
	unsigned int cpu;
	int i;
	int i, ret;

	iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
				 sizeof(struct iova_rcache),
				 GFP_KERNEL);
	if (!iovad->rcaches)
		return -ENOMEM;

	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
		struct iova_cpu_rcache *cpu_rcache;
		struct iova_rcache *rcache;

		rcache = &iovad->rcaches[i];
		spin_lock_init(&rcache->lock);
		rcache->depot_size = 0;
		rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
		if (WARN_ON(!rcache->cpu_rcaches))
			continue;
		rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache),
						     cache_line_size());
		if (!rcache->cpu_rcaches) {
			ret = -ENOMEM;
			goto out_err;
		}
		for_each_possible_cpu(cpu) {
			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);

			spin_lock_init(&cpu_rcache->lock);
			cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
			cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
			if (!cpu_rcache->loaded || !cpu_rcache->prev) {
				ret = -ENOMEM;
				goto out_err;
			}
		}
	}

	ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
					       &iovad->cpuhp_dead);
	if (ret)
		goto out_err;
	return 0;

out_err:
	free_iova_rcaches(iovad);
	return ret;
}
EXPORT_SYMBOL_GPL(iova_domain_init_rcaches);

/*
 * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
 * return true on success.  Can fail if rcache is full and we can't free
@@ -831,7 +871,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
{
	unsigned int log_size = order_base_2(size);

	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
	if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
		return 0;

	return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
@@ -849,6 +889,8 @@ static void free_iova_rcaches(struct iova_domain *iovad)

	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
		rcache = &iovad->rcaches[i];
		if (!rcache->cpu_rcaches)
			break;
		for_each_possible_cpu(cpu) {
			cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
			iova_magazine_free(cpu_rcache->loaded);
@@ -858,6 +900,9 @@ static void free_iova_rcaches(struct iova_domain *iovad)
		for (j = 0; j < rcache->depot_size; ++j)
			iova_magazine_free(rcache->depot[j]);
	}

	kfree(iovad->rcaches);
	iovad->rcaches = NULL;
}

/*
+11 −0
Original line number Diff line number Diff line
@@ -480,6 +480,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
	struct file *file;
	struct vduse_bounce_map *map;
	unsigned long pfn, bounce_pfns;
	int ret;

	bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
	if (iova_limit <= bounce_size)
@@ -513,10 +514,20 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
	spin_lock_init(&domain->iotlb_lock);
	init_iova_domain(&domain->stream_iovad,
			PAGE_SIZE, IOVA_START_PFN);
	ret = iova_domain_init_rcaches(&domain->stream_iovad);
	if (ret)
		goto err_iovad_stream;
	init_iova_domain(&domain->consistent_iovad,
			PAGE_SIZE, bounce_pfns);
	ret = iova_domain_init_rcaches(&domain->consistent_iovad);
	if (ret)
		goto err_iovad_consistent;

	return domain;
err_iovad_consistent:
	put_iova_domain(&domain->stream_iovad);
err_iovad_stream:
	fput(file);
err_file:
	vfree(domain->bounce_maps);
err_map:
+3 −12
Original line number Diff line number Diff line
@@ -21,18 +21,8 @@ struct iova {
	unsigned long	pfn_lo; /* Lowest allocated pfn */
};

struct iova_magazine;
struct iova_cpu_rcache;

#define IOVA_RANGE_CACHE_MAX_SIZE 6	/* log of max cached IOVA range size (in pages) */
#define MAX_GLOBAL_MAGS 32	/* magazines per bin */

struct iova_rcache {
	spinlock_t lock;
	unsigned long depot_size;
	struct iova_magazine *depot[MAX_GLOBAL_MAGS];
	struct iova_cpu_rcache __percpu *cpu_rcaches;
};
struct iova_rcache;

/* holds all the iova translations for a domain */
struct iova_domain {
@@ -46,7 +36,7 @@ struct iova_domain {
	unsigned long	max32_alloc_size; /* Size of last failed allocation */
	struct iova	anchor;		/* rbtree lookup anchor */

	struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE];	/* IOVA range caches */
	struct iova_rcache	*rcaches;
	struct hlist_node	cpuhp_dead;
};

@@ -102,6 +92,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
	unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
	unsigned long start_pfn);
int iova_domain_init_rcaches(struct iova_domain *iovad);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
#else