Commit 4dfc6967 authored by Zhang Zekun's avatar Zhang Zekun
Browse files

iommu/iova: Manage the depot list size

mainline inclusion
from mainline-v6.7-rc1
commit 233045378dbbc0a7346127d19a54d4f91e0bd855
category: bugfix
bugzilla: https://gitee.com/openeuler/kernel/issues/I8KS9A
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=233045378dbbc0a7346127d19a54d4f91e0bd855



----------------------------------------

Automatically scaling the depot up to suit the peak capacity of a
workload is all well and good, but it would be nice to have a way to
scale it back down again if the workload changes. To that end, add
backround reclaim that will gradually free surplus magazines if the
depot size remains above a reasonable threshold for long enough.

Reviewed-by: default avatarJerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/03170665c56d89c6ce6081246b47f68d4e483308.1694535580.git.robin.murphy@arm.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
Signed-off-by: default avatarZhang Zekun <zhangzekun11@huawei.com>
parent 876b598e
Loading
Loading
Loading
Loading
+27 −0
Original line number Diff line number Diff line
@@ -11,6 +11,7 @@
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>

/* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR	~0UL
@@ -800,6 +801,8 @@ split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
 */
#define IOVA_MAG_SIZE 127

#define IOVA_DEPOT_DELAY msecs_to_jiffies(100)

struct iova_magazine {
	union {
		unsigned long size;
@@ -893,6 +896,7 @@ static struct iova_magazine *iova_depot_pop(struct iova_rcache *rcache)

	rcache->depot = mag->next;
	mag->size = IOVA_MAG_SIZE;
	rcache->depot_size--;
	return mag;
}

@@ -900,6 +904,25 @@ static void iova_depot_push(struct iova_rcache *rcache, struct iova_magazine *ma
{
	mag->next = rcache->depot;
	rcache->depot = mag;
	rcache->depot_size++;
}

static void iova_depot_work_func(struct work_struct *work)
{
	struct iova_rcache *rcache = container_of(work, typeof(*rcache), work.work);
	struct iova_magazine *mag = NULL;
	unsigned long flags;

	spin_lock_irqsave(&rcache->lock, flags);
	if (rcache->depot_size > num_online_cpus())
		mag = iova_depot_pop(rcache);
	spin_unlock_irqrestore(&rcache->lock, flags);

	if (mag) {
		iova_magazine_free_pfns(mag, rcache->iovad);
		iova_magazine_free(mag);
		schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
	}
}

static void init_iova_rcaches(struct iova_domain *iovad)
@@ -912,6 +935,8 @@ static void init_iova_rcaches(struct iova_domain *iovad)
	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
		rcache = &iovad->rcaches[i];
		spin_lock_init(&rcache->lock);
		rcache->iovad = iovad;
		INIT_DELAYED_WORK(&rcache->work, iova_depot_work_func);
		rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
		if (WARN_ON(!rcache->cpu_rcaches))
			continue;
@@ -953,6 +978,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
			spin_lock(&rcache->lock);
			iova_depot_push(rcache, cpu_rcache->loaded);
			spin_unlock(&rcache->lock);
			schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);

			cpu_rcache->loaded = new_mag;
			can_insert = true;
@@ -1052,6 +1078,7 @@ static void free_iova_rcaches(struct iova_domain *iovad)
			iova_magazine_free(cpu_rcache->prev);
		}
		free_percpu(rcache->cpu_rcaches);
		cancel_delayed_work_sync(&rcache->work);
		while (rcache->depot)
			iova_magazine_free(iova_depot_pop(rcache));
	}
+3 −0
Original line number Diff line number Diff line
@@ -29,8 +29,11 @@ struct iova_cpu_rcache;

struct iova_rcache {
	spinlock_t lock;
	unsigned int depot_size;
	struct iova_magazine *depot;
	struct iova_cpu_rcache __percpu *cpu_rcaches;
	struct iova_domain *iovad;
	struct delayed_work work;
};

struct iova_domain;