Commit d976521a authored by CQ Tang's avatar CQ Tang Committed by Matthew Auld
Browse files

drm/i915: extend i915_vma_pin_iomap()



In the future display might try call this with a normal smem object,
which doesn't require PIN_MAPPABLE underneath in order to CPU map the
pages (unlike stolen).  Extend i915_vma_pin_iomap() to directly use
i915_gem_object_pin_map() for such cases.

This change was suggested by Chris P Wilson, that we pin
the smem with i915_gem_object_pin_map_unlocked().

v2 (jheikkil): Change i915_gem_object_pin_map_unlocked to
               i915_gem_object_pin_map

Signed-off-by: default avatarCQ Tang <cq.tang@intel.com>
Signed-off-by: default avatarJuha-Pekka Heikkila <juhapekka.heikkila@gmail.com>
Cc: Chris Wilson <chris.p.wilson@intel.com>
Cc: Jari Tahvanainen <jari.tahvanainen@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
[mauld: tweak commit message, plus minor checkpatch fix]
Link: https://patchwork.freedesktop.org/patch/msgid/20220610121205.29645-2-juhapekka.heikkila@gmail.com
parent afd5cb39
Loading
Loading
Loading
Loading
+23 −12
Original line number Diff line number Diff line
@@ -551,13 +551,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
	if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
		return IOMEM_ERR_PTR(-EINVAL);

	if (!i915_gem_object_is_lmem(vma->obj)) {
		if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
			err = -ENODEV;
			goto err;
		}
	}

	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
	GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
	GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
@@ -570,19 +563,32 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
		 * of pages, that way we can also drop the
		 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
		 */
		if (i915_gem_object_is_lmem(vma->obj))
		if (i915_gem_object_is_lmem(vma->obj)) {
			ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
							  vma->obj->base.size);
		else
		} else if (i915_vma_is_map_and_fenceable(vma)) {
			ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
						vma->node.start,
						vma->node.size);
		} else {
			ptr = (void __iomem *)
				i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
			if (IS_ERR(ptr)) {
				err = PTR_ERR(ptr);
				goto err;
			}
			ptr = page_pack_bits(ptr, 1);
		}

		if (ptr == NULL) {
			err = -ENOMEM;
			goto err;
		}

		if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
			if (page_unmask_bits(ptr))
				__i915_gem_object_release_map(vma->obj);
			else
				io_mapping_unmap(ptr);
			ptr = vma->iomap;
		}
@@ -597,7 +603,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
	i915_vma_set_ggtt_write(vma);

	/* NB Access through the GTT requires the device to be awake. */
	return ptr;
	return page_mask_bits(ptr);

err_unpin:
	__i915_vma_unpin(vma);
@@ -615,6 +621,8 @@ void i915_vma_unpin_iomap(struct i915_vma *vma)
{
	GEM_BUG_ON(vma->iomap == NULL);

	/* XXX We keep the mapping until __i915_vma_unbind()/evict() */

	i915_vma_flush_writes(vma);

	i915_vma_unpin_fence(vma);
@@ -1763,6 +1771,9 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
	if (vma->iomap == NULL)
		return;

	if (page_unmask_bits(vma->iomap))
		__i915_gem_object_release_map(vma->obj);
	else
		io_mapping_unmap(vma->iomap);
	vma->iomap = NULL;
}