Commit f598a497 authored by John Garry's avatar John Garry Committed by Joerg Roedel
Browse files

iova: Add CPU hotplug handler to flush rcaches



Like the Intel IOMMU driver already does, flush the per-IOVA domain
CPU rcache when a CPU goes offline - there's no point in keeping it.

Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/1616675401-151997-2-git-send-email-john.garry@huawei.com


Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 371d7955
Loading
Loading
Loading
Loading
+29 −1
Original line number Diff line number Diff line
@@ -25,6 +25,17 @@ static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
static void fq_flush_timeout(struct timer_list *t);

static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
{
	struct iova_domain *iovad;

	iovad = hlist_entry_safe(node, struct iova_domain, cpuhp_dead);

	free_cpu_cached_iovas(cpu, iovad);
	return 0;
}

static void free_global_cached_iovas(struct iova_domain *iovad);

static struct iova *to_iova(struct rb_node *node)
@@ -56,6 +67,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
	iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
	rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
	rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
	cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
	init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -299,10 +311,21 @@ int iova_cache_get(void)
{
	mutex_lock(&iova_cache_mutex);
	if (!iova_cache_users) {
		int ret;

		ret = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, "iommu/iova:dead", NULL,
					iova_cpuhp_dead);
		if (ret) {
			mutex_unlock(&iova_cache_mutex);
			pr_err("Couldn't register cpuhp handler\n");
			return ret;
		}

		iova_cache = kmem_cache_create(
			"iommu_iova", sizeof(struct iova), 0,
			SLAB_HWCACHE_ALIGN, NULL);
		if (!iova_cache) {
			cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
			mutex_unlock(&iova_cache_mutex);
			pr_err("Couldn't create iova cache\n");
			return -ENOMEM;
@@ -324,8 +347,10 @@ void iova_cache_put(void)
		return;
	}
	iova_cache_users--;
	if (!iova_cache_users)
	if (!iova_cache_users) {
		cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
		kmem_cache_destroy(iova_cache);
	}
	mutex_unlock(&iova_cache_mutex);
}
EXPORT_SYMBOL_GPL(iova_cache_put);
@@ -648,6 +673,9 @@ void put_iova_domain(struct iova_domain *iovad)
{
	struct iova *iova, *tmp;

	cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
					    &iovad->cpuhp_dead);

	free_iova_flush_queue(iovad);
	free_iova_rcaches(iovad);
	rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
+1 −0
Original line number Diff line number Diff line
@@ -58,6 +58,7 @@ enum cpuhp_state {
	CPUHP_NET_DEV_DEAD,
	CPUHP_PCI_XGENE_DEAD,
	CPUHP_IOMMU_INTEL_DEAD,
	CPUHP_IOMMU_IOVA_DEAD,
	CPUHP_LUSTRE_CFS_DEAD,
	CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
	CPUHP_PADATA_DEAD,
+1 −0
Original line number Diff line number Diff line
@@ -95,6 +95,7 @@ struct iova_domain {
						   flush-queues */
	atomic_t fq_timer_on;			/* 1 when timer is active, 0
						   when not */
	struct hlist_node	cpuhp_dead;
};

static inline unsigned long iova_size(struct iova *iova)