Commit b5ea3305 authored by Keqian Zhu's avatar Keqian Zhu Committed by Zheng Zengkai
Browse files

vfio/iommu_type1: Mantain a counter for non_pinned_groups

virt inclusion
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/I3ZUKK


CVE: NA

------------------------------

With this counter, we never need to traverse all groups to update
pinned_scope of vfio_iommu.

Suggested-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Signed-off-by: default avatarKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
Signed-off-by: default avatarKunkun <Jiang&lt;jiangkunkun@huawei.com>
Reviewed-by: default avatarKeqian Zhu <zhukeqian1@huawei.com>
Signed-off-by: default avatarZheng Zengkai <zhengzengkai@huawei.com>
parent a466f5dd
Loading
Loading
Loading
Loading
+5 −35
Original line number Diff line number Diff line
@@ -72,10 +72,10 @@ struct vfio_iommu {
	struct blocking_notifier_head notifier;
	unsigned int		dma_avail;
	uint64_t		pgsize_bitmap;
	uint64_t		num_non_pinned_groups;
	bool			v2;
	bool			nesting;
	bool			dirty_page_tracking;
	bool			pinned_page_dirty_scope;
};

struct vfio_domain {
@@ -161,7 +161,6 @@ static int put_pfn(unsigned long pfn, int prot);
static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
					       struct iommu_group *iommu_group);

static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu);
/*
 * This code handles mapping and unmapping of user data buffers
 * into DMA'ble space using the IOMMU
@@ -749,7 +748,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
	group = vfio_iommu_find_iommu_group(iommu, iommu_group);
	if (!group->pinned_page_dirty_scope) {
		group->pinned_page_dirty_scope = true;
		update_pinned_page_dirty_scope(iommu);
		iommu->num_non_pinned_groups--;
	}

	goto pin_done;
@@ -1027,7 +1026,7 @@ static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
	 * mark all pages dirty if any IOMMU capable device is not able
	 * to report dirty pages and all pages are pinned and mapped.
	 */
	if (!iommu->pinned_page_dirty_scope && dma->iommu_mapped)
	if (iommu->num_non_pinned_groups && dma->iommu_mapped)
		bitmap_set(dma->bitmap, 0, nbits);

	if (shift) {
@@ -1810,33 +1809,6 @@ static struct vfio_group *vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
	return group;
}

static void update_pinned_page_dirty_scope(struct vfio_iommu *iommu)
{
	struct vfio_domain *domain;
	struct vfio_group *group;

	list_for_each_entry(domain, &iommu->domain_list, next) {
		list_for_each_entry(group, &domain->group_list, next) {
			if (!group->pinned_page_dirty_scope) {
				iommu->pinned_page_dirty_scope = false;
				return;
			}
		}
	}

	if (iommu->external_domain) {
		domain = iommu->external_domain;
		list_for_each_entry(group, &domain->group_list, next) {
			if (!group->pinned_page_dirty_scope) {
				iommu->pinned_page_dirty_scope = false;
				return;
			}
		}
	}

	iommu->pinned_page_dirty_scope = true;
}

static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
				  phys_addr_t *base)
{
@@ -2281,8 +2253,6 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
			 * addition of a dirty tracking group.
			 */
			group->pinned_page_dirty_scope = true;
			if (!iommu->pinned_page_dirty_scope)
				update_pinned_page_dirty_scope(iommu);
			mutex_unlock(&iommu->lock);

			return 0;
@@ -2404,7 +2374,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
	 * demotes the iommu scope until it declares itself dirty tracking
	 * capable via the page pinning interface.
	 */
	iommu->pinned_page_dirty_scope = false;
	iommu->num_non_pinned_groups++;
	mutex_unlock(&iommu->lock);
	vfio_iommu_resv_free(&group_resv_regions);

@@ -2623,7 +2593,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
	 * to be promoted.
	 */
	if (update_dirty_scope) {
		update_pinned_page_dirty_scope(iommu);
		iommu->num_non_pinned_groups--;
		if (iommu->dirty_page_tracking)
			vfio_iommu_populate_bitmap_full(iommu);
	}