Commit eeb52ee6 authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson
Browse files

drm/i915: clear the shadow batch



The shadow batch is an internal object, which doesn't have any page
clearing, and since the batch_len can be smaller than the object, we
should take care to clear it.

Testcase: igt/gen9_exec_parse/shadow-peek
Fixes: 4f7af194 ("drm/i915: Support ro ppgtt mapped cmdparser shadow buffers")
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201224151358.401345-1-matthew.auld@intel.com
Cc: stable@vger.kernel.org
parent 177b7a52
Loading
Loading
Loading
Loading
+9 −18
Original line number Diff line number Diff line
@@ -1167,7 +1167,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
		}
	}
	if (IS_ERR(src)) {
		unsigned long x, n;
		unsigned long x, n, remain;
		void *ptr;

		/*
@@ -1178,14 +1178,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
		 * We don't care about copying too much here as we only
		 * validate up to the end of the batch.
		 */
		remain = length;
		if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
			length = round_up(length,
			remain = round_up(remain,
					  boot_cpu_data.x86_clflush_size);

		ptr = dst;
		x = offset_in_page(offset);
		for (n = offset >> PAGE_SHIFT; length; n++) {
			int len = min(length, PAGE_SIZE - x);
		for (n = offset >> PAGE_SHIFT; remain; n++) {
			int len = min(remain, PAGE_SIZE - x);

			src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
			if (needs_clflush)
@@ -1194,13 +1195,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
			kunmap_atomic(src);

			ptr += len;
			length -= len;
			remain -= len;
			x = 0;
		}
	}

	i915_gem_object_unpin_pages(src_obj);

	memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));

	/* dst_obj is returned with vmap pinned */
	return dst;
}
@@ -1393,11 +1396,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)

#define LENGTH_BIAS 2

static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
{
	return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}

/**
 * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
 * @engine: the engine on which the batch is to execute
@@ -1539,16 +1537,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
				ret = 0; /* allow execution */
			}
		}

		if (shadow_needs_clflush(shadow->obj))
			drm_clflush_virt_range(batch_end, 8);
	}

	if (shadow_needs_clflush(shadow->obj)) {
		void *ptr = page_mask_bits(shadow->obj->mm.mapping);

		drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
	}
	i915_gem_object_flush_map(shadow->obj);

	if (!IS_ERR_OR_NULL(jump_whitelist))
		kfree(jump_whitelist);