Commit bfe53be2 authored by Matthew Auld's avatar Matthew Auld
Browse files

drm/i915/ttm: handle blitter failure on DG2



If the move or clear operation somehow fails, and the memory underneath
is not cleared, like when moving to lmem, then we currently fallback to
memcpy or memset. However with small-BAR systems this fallback might no
longer be possible. For now we use the set_wedged sledgehammer if we
ever encounter such a scenario, and mark the object as borked to plug
any holes where access to the memory underneath can happen. Add some
basic selftests to exercise this.

v2:
  - In the selftests make sure we grab the runtime pm around the reset.
    Also make sure we grab the reset lock before checking if the device
    is wedged, since the wedge might still be in-progress and hence the
    bit might not be set yet.
  - Don't wedge or put the object into an unknown state, if the request
    construction fails (or similar). Just returning an error and
    skipping the fallback should be safe here.
  - Make sure we wedge each gt. (Thomas)
  - Peek at the unknown_state in io_reserve, that way we don't have to
    export or hand roll the fault_wait_for_idle. (Thomas)
  - Add the missing read-side barriers for the unknown_state. (Thomas)
  - Some kernel-doc fixes. (Thomas)
v3:
  - Tweak the ordering of the set_wedged, also add FIXME.

Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Kenneth Graunke <kenneth@whitecape.org>
Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220629174350.384910-11-matthew.auld@intel.com
parent 11f01dcf
Loading
Loading
Loading
Loading
+21 −0
Original line number Diff line number Diff line
@@ -783,10 +783,31 @@ int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
				    intr, MAX_SCHEDULE_TIMEOUT);
	if (!ret)
		ret = -ETIME;
	else if (ret > 0 && i915_gem_object_has_unknown_state(obj))
		ret = -EIO;

	return ret < 0 ? ret : 0;
}

/**
 * i915_gem_object_has_unknown_state - Return true if the object backing pages are
 * in an unknown_state. This means that userspace must NEVER be allowed to touch
 * the pages, with either the GPU or CPU.
 *
 * ONLY valid to be called after ensuring that all kernel fences have signalled
 * (in particular the fence for moving/clearing the object).
 */
bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj)
{
	/*
	 * The below barrier pairs with the dma_fence_signal() in
	 * __memcpy_work(). We should only sample the unknown_state after all
	 * the kernel fences have signalled.
	 */
	smp_rmb();
	return obj->mm.unknown_state;
}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"
+1 −0
Original line number Diff line number Diff line
@@ -524,6 +524,7 @@ int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
				     struct dma_fence **fence);
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
				      bool intr);
bool i915_gem_object_has_unknown_state(struct drm_i915_gem_object *obj);

void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
					 unsigned int cache_level);
+18 −0
Original line number Diff line number Diff line
@@ -547,6 +547,24 @@ struct drm_i915_gem_object {
		 */
		bool ttm_shrinkable;

		/**
		 * @unknown_state: Indicate that the object is effectively
		 * borked. This is write-once and set if we somehow encounter a
		 * fatal error when moving/clearing the pages, and we are not
		 * able to fallback to memcpy/memset, like on small-BAR systems.
		 * The GPU should also be wedged (or in the process) at this
		 * point.
		 *
		 * Only valid to read this after acquiring the dma-resv lock and
		 * waiting for all DMA_RESV_USAGE_KERNEL fences to be signalled,
		 * or if we otherwise know that the moving fence has signalled,
		 * and we are certain the pages underneath are valid for
		 * immediate access (under normal operation), like just prior to
		 * binding the object or when setting up the CPU fault handler.
		 * See i915_gem_object_has_unknown_state();
		 */
		bool unknown_state;

		/**
		 * Priority list of potential placements for this object.
		 */
+25 −1
Original line number Diff line number Diff line
@@ -675,7 +675,15 @@ static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
		i915_ttm_purge(obj);
}

static bool i915_ttm_resource_mappable(struct ttm_resource *res)
/**
 * i915_ttm_resource_mappable - Return true if the ttm resource is CPU
 * accessible.
 * @res: The TTM resource to check.
 *
 * This is interesting on small-BAR systems where we may encounter lmem objects
 * that can't be accessed via the CPU.
 */
bool i915_ttm_resource_mappable(struct ttm_resource *res)
{
	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);

@@ -687,6 +695,22 @@ static bool i915_ttm_resource_mappable(struct ttm_resource *res)

static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
	struct drm_i915_gem_object *obj = i915_ttm_to_gem(mem->bo);
	bool unknown_state;

	if (!obj)
		return -EINVAL;

	if (!kref_get_unless_zero(&obj->base.refcount))
		return -EINVAL;

	assert_object_held(obj);

	unknown_state = i915_gem_object_has_unknown_state(obj);
	i915_gem_object_put(obj);
	if (unknown_state)
		return -EINVAL;

	if (!i915_ttm_cpu_maps_iomem(mem))
		return 0;

+3 −0
Original line number Diff line number Diff line
@@ -92,4 +92,7 @@ static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
	/* Once / if we support GGTT, this is also false for cached ttm_tts */
	return mem->mem_type != I915_PL_SYSTEM;
}

bool i915_ttm_resource_mappable(struct ttm_resource *res);

#endif
Loading