Commit a6117097 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter
Browse files

drm/i915: Rework struct phys attachment handling



Instead of creating a separate object type, we make changes to
the shmem type, to clear struct page backing. This will allow us to
ensure we never run into a race when we exchange obj->ops with other
function pointers.

Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-9-maarten.lankhorst@linux.intel.com
parent c471748d
Loading
Loading
Loading
Loading
+8 −0
Original line number Diff line number Diff line
@@ -37,7 +37,15 @@ void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
				     struct sg_table *pages,
				     bool needs_clflush);

int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
				const struct drm_i915_gem_pwrite *args);
int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
			       const struct drm_i915_gem_pread *args);

int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
void i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
				    struct sg_table *pages);


void i915_gem_flush_free_objects(struct drm_i915_private *i915);

+49 −53
Original line number Diff line number Diff line
@@ -76,6 +76,8 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)

	intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);

	/* We're no longer struct page backed */
	obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;
	__i915_gem_object_set_pages(obj, st, sg->length);

	return 0;
@@ -89,7 +91,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
	return -ENOMEM;
}

static void
void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
@@ -134,8 +136,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			  vaddr, dma);
}

static int
phys_pwrite(struct drm_i915_gem_object *obj,
int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
				const struct drm_i915_gem_pwrite *args)
{
	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
@@ -165,8 +166,7 @@ phys_pwrite(struct drm_i915_gem_object *obj,
	return 0;
}

static int
phys_pread(struct drm_i915_gem_object *obj,
int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
			       const struct drm_i915_gem_pread *args)
{
	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
@@ -186,86 +186,82 @@ phys_pread(struct drm_i915_gem_object *obj,
	return 0;
}

static void phys_release(struct drm_i915_gem_object *obj)
static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
{
	fput(obj->base.filp);
}
	struct sg_table *pages;
	int err;

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.name = "i915_gem_object_phys",
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	pages = __i915_gem_object_unset_pages(obj);

	err = i915_gem_object_get_pages_phys(obj);
	if (err)
		goto err_xfer;

	.pread  = phys_pread,
	.pwrite = phys_pwrite,
	/* Perma-pin (until release) the physical set of pages */
	__i915_gem_object_pin_pages(obj);

	.release = phys_release,
};
	if (!IS_ERR_OR_NULL(pages))
		i915_gem_shmem_ops.put_pages(obj, pages);

	i915_gem_object_release_memory_region(obj);
	return 0;

err_xfer:
	if (!IS_ERR_OR_NULL(pages)) {
		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);

		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
	}
	return err;
}

int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
	struct sg_table *pages;
	int err;

	if (align > obj->base.size)
		return -EINVAL;

	if (obj->ops == &i915_gem_phys_ops)
		return 0;

	if (!i915_gem_object_is_shmem(obj))
		return -EINVAL;

	if (!i915_gem_object_has_struct_page(obj))
		return 0;

	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
	if (err)
		return err;

	mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);

	if (unlikely(!i915_gem_object_has_struct_page(obj)))
		goto out;

	if (obj->mm.madv != I915_MADV_WILLNEED) {
		err = -EFAULT;
		goto err_unlock;
		goto out;
	}

	if (i915_gem_object_has_tiling_quirk(obj)) {
		err = -EFAULT;
		goto err_unlock;
		goto out;
	}

	if (obj->mm.mapping) {
	if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj)) {
		err = -EBUSY;
		goto err_unlock;
		goto out;
	}

	pages = __i915_gem_object_unset_pages(obj);

	obj->ops = &i915_gem_phys_ops;
	obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;

	err = ____i915_gem_object_get_pages(obj);
	if (err)
		goto err_xfer;

	/* Perma-pin (until release) the physical set of pages */
	__i915_gem_object_pin_pages(obj);

	if (!IS_ERR_OR_NULL(pages))
		i915_gem_shmem_ops.put_pages(obj, pages);

	i915_gem_object_release_memory_region(obj);

	mutex_unlock(&obj->mm.lock);
	return 0;
	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		drm_dbg(obj->base.dev,
			"Attempting to obtain a purgeable object\n");
		err = -EFAULT;
		goto out;
	}

err_xfer:
	obj->ops = &i915_gem_shmem_ops;
	obj->flags |= I915_BO_ALLOC_STRUCT_PAGE;
	if (!IS_ERR_OR_NULL(pages)) {
		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
	err = i915_gem_object_shmem_to_phys(obj);

		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
	}
err_unlock:
out:
	mutex_unlock(&obj->mm.lock);
	return err;
}
+21 −1
Original line number Diff line number Diff line
@@ -303,6 +303,11 @@ shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
	struct pagevec pvec;
	struct page *page;

	if (unlikely(!i915_gem_object_has_struct_page(obj))) {
		i915_gem_object_put_pages_phys(obj, pages);
		return;
	}

	__i915_gem_object_release_shmem(obj, pages, true);

	i915_gem_gtt_finish_pages(obj, pages);
@@ -343,6 +348,9 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
	/* Caller already validated user args */
	GEM_BUG_ON(!access_ok(user_data, arg->size));

	if (!i915_gem_object_has_struct_page(obj))
		return i915_gem_object_pwrite_phys(obj, arg);

	/*
	 * Before we instantiate/pin the backing store for our use, we
	 * can prepopulate the shmemfs filp efficiently using a write into
@@ -421,8 +429,19 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
	return 0;
}

static int
shmem_pread(struct drm_i915_gem_object *obj,
	    const struct drm_i915_gem_pread *arg)
{
	if (!i915_gem_object_has_struct_page(obj))
		return i915_gem_object_pread_phys(obj, arg);

	return -ENODEV;
}

static void shmem_release(struct drm_i915_gem_object *obj)
{
	if (obj->flags & I915_BO_ALLOC_STRUCT_PAGE)
		i915_gem_object_release_memory_region(obj);

	fput(obj->base.filp);
@@ -438,6 +457,7 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
	.writeback = shmem_writeback,

	.pwrite = shmem_pwrite,
	.pread = shmem_pread,

	.release = shmem_release,
};
+0 −6
Original line number Diff line number Diff line
@@ -38,12 +38,6 @@ static int mock_phys_object(void *arg)
	}

	if (i915_gem_object_has_struct_page(obj)) {
		err = -EINVAL;
		pr_err("shmem has a struct page\n");
		goto out_obj;
	}

	if (obj->ops != &i915_gem_phys_ops) {
		pr_err("i915_gem_object_attach_phys did not create a phys object\n");
		err = -EINVAL;
		goto out_obj;