Commit c8c197d4 authored by Chris Wilson's avatar Chris Wilson
Browse files

drm/i915/selftests: Use a random engine for GEM coherency tests



Select a random user accessible engine for checking coherency results.
While we should check all engines, we use a random selection so that
over repeated runs we cover all.

Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191027225808.19437-4-chris@chris-wilson.co.uk
parent 96815f3d
Loading
Loading
Loading
Loading
+89 −79
Original line number Diff line number Diff line
@@ -13,9 +13,12 @@
#include "i915_selftest.h"
#include "selftests/i915_random.h"

static int cpu_set(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 v)
struct context {
	struct drm_i915_gem_object *obj;
	struct intel_engine_cs *engine;
};

static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
{
	unsigned int needs_clflush;
	struct page *page;
@@ -23,11 +26,11 @@ static int cpu_set(struct drm_i915_gem_object *obj,
	u32 *cpu;
	int err;

	err = i915_gem_object_prepare_write(obj, &needs_clflush);
	err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
	if (err)
		return err;

	page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
	page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
	map = kmap_atomic(page);
	cpu = map + offset_in_page(offset);

@@ -40,14 +43,12 @@ static int cpu_set(struct drm_i915_gem_object *obj,
		drm_clflush_virt_range(cpu, sizeof(*cpu));

	kunmap_atomic(map);
	i915_gem_object_finish_access(obj);
	i915_gem_object_finish_access(ctx->obj);

	return 0;
}

static int cpu_get(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 *v)
static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
{
	unsigned int needs_clflush;
	struct page *page;
@@ -55,11 +56,11 @@ static int cpu_get(struct drm_i915_gem_object *obj,
	u32 *cpu;
	int err;

	err = i915_gem_object_prepare_read(obj, &needs_clflush);
	err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
	if (err)
		return err;

	page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
	page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
	map = kmap_atomic(page);
	cpu = map + offset_in_page(offset);

@@ -69,26 +70,24 @@ static int cpu_get(struct drm_i915_gem_object *obj,
	*v = *cpu;

	kunmap_atomic(map);
	i915_gem_object_finish_access(obj);
	i915_gem_object_finish_access(ctx->obj);

	return 0;
}

static int gtt_set(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 v)
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
	struct i915_vma *vma;
	u32 __iomem *map;
	int err = 0;

	i915_gem_object_lock(obj);
	err = i915_gem_object_set_to_gtt_domain(obj, true);
	i915_gem_object_unlock(obj);
	i915_gem_object_lock(ctx->obj);
	err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
	i915_gem_object_unlock(ctx->obj);
	if (err)
		return err;

	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

@@ -109,21 +108,19 @@ static int gtt_set(struct drm_i915_gem_object *obj,
	return err;
}

static int gtt_get(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 *v)
static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
	struct i915_vma *vma;
	u32 __iomem *map;
	int err = 0;

	i915_gem_object_lock(obj);
	err = i915_gem_object_set_to_gtt_domain(obj, false);
	i915_gem_object_unlock(obj);
	i915_gem_object_lock(ctx->obj);
	err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
	i915_gem_object_unlock(ctx->obj);
	if (err)
		return err;

	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

@@ -144,73 +141,66 @@ static int gtt_get(struct drm_i915_gem_object *obj,
	return err;
}

static int wc_set(struct drm_i915_gem_object *obj,
		  unsigned long offset,
		  u32 v)
static int wc_set(struct context *ctx, unsigned long offset, u32 v)
{
	u32 *map;
	int err;

	i915_gem_object_lock(obj);
	err = i915_gem_object_set_to_wc_domain(obj, true);
	i915_gem_object_unlock(obj);
	i915_gem_object_lock(ctx->obj);
	err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
	i915_gem_object_unlock(ctx->obj);
	if (err)
		return err;

	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
	map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
	if (IS_ERR(map))
		return PTR_ERR(map);

	map[offset / sizeof(*map)] = v;
	i915_gem_object_unpin_map(obj);
	i915_gem_object_unpin_map(ctx->obj);

	return 0;
}

static int wc_get(struct drm_i915_gem_object *obj,
		  unsigned long offset,
		  u32 *v)
static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
{
	u32 *map;
	int err;

	i915_gem_object_lock(obj);
	err = i915_gem_object_set_to_wc_domain(obj, false);
	i915_gem_object_unlock(obj);
	i915_gem_object_lock(ctx->obj);
	err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
	i915_gem_object_unlock(ctx->obj);
	if (err)
		return err;

	map = i915_gem_object_pin_map(obj, I915_MAP_WC);
	map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC);
	if (IS_ERR(map))
		return PTR_ERR(map);

	*v = map[offset / sizeof(*map)];
	i915_gem_object_unpin_map(obj);
	i915_gem_object_unpin_map(ctx->obj);

	return 0;
}

static int gpu_set(struct drm_i915_gem_object *obj,
		   unsigned long offset,
		   u32 v)
static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_request *rq;
	struct i915_vma *vma;
	u32 *cs;
	int err;

	i915_gem_object_lock(obj);
	err = i915_gem_object_set_to_gtt_domain(obj, true);
	i915_gem_object_unlock(obj);
	i915_gem_object_lock(ctx->obj);
	err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
	i915_gem_object_unlock(ctx->obj);
	if (err)
		return err;

	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
	if (IS_ERR(vma))
		return PTR_ERR(vma);

	rq = i915_request_create(i915->engine[RCS0]->kernel_context);
	rq = i915_request_create(ctx->engine->kernel_context);
	if (IS_ERR(rq)) {
		i915_vma_unpin(vma);
		return PTR_ERR(rq);
@@ -223,12 +213,12 @@ static int gpu_set(struct drm_i915_gem_object *obj,
		return PTR_ERR(cs);
	}

	if (INTEL_GEN(i915) >= 8) {
	if (INTEL_GEN(ctx->engine->i915) >= 8) {
		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
		*cs++ = v;
	} else if (INTEL_GEN(i915) >= 4) {
	} else if (INTEL_GEN(ctx->engine->i915) >= 4) {
		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
		*cs++ = 0;
		*cs++ = i915_ggtt_offset(vma) + offset;
@@ -253,32 +243,29 @@ static int gpu_set(struct drm_i915_gem_object *obj,
	return err;
}

static bool always_valid(struct drm_i915_private *i915)
static bool always_valid(struct context *ctx)
{
	return true;
}

static bool needs_fence_registers(struct drm_i915_private *i915)
static bool needs_fence_registers(struct context *ctx)
{
	return !intel_gt_is_wedged(&i915->gt);
	return !intel_gt_is_wedged(ctx->engine->gt);
}

static bool needs_mi_store_dword(struct drm_i915_private *i915)
static bool needs_mi_store_dword(struct context *ctx)
{
	if (intel_gt_is_wedged(&i915->gt))
		return false;

	if (!HAS_ENGINE(i915, RCS0))
	if (intel_gt_is_wedged(ctx->engine->gt))
		return false;

	return intel_engine_can_store_dword(i915->engine[RCS0]);
	return intel_engine_can_store_dword(ctx->engine);
}

static const struct igt_coherency_mode {
	const char *name;
	int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
	int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
	bool (*valid)(struct drm_i915_private *i915);
	int (*set)(struct context *ctx, unsigned long offset, u32 v);
	int (*get)(struct context *ctx, unsigned long offset, u32 *v);
	bool (*valid)(struct context *ctx);
} igt_coherency_mode[] = {
	{ "cpu", cpu_set, cpu_get, always_valid },
	{ "gtt", gtt_set, gtt_get, needs_fence_registers },
@@ -287,18 +274,37 @@ static const struct igt_coherency_mode {
	{ },
};

static struct intel_engine_cs *
random_engine(struct drm_i915_private *i915, struct rnd_state *prng)
{
	struct intel_engine_cs *engine;
	unsigned int count;

	count = 0;
	for_each_uabi_engine(engine, i915)
		count++;

	count = i915_prandom_u32_max_state(count, prng);
	for_each_uabi_engine(engine, i915)
		if (count-- == 0)
			return engine;

	return NULL;
}

static int igt_gem_coherency(void *arg)
{
	const unsigned int ncachelines = PAGE_SIZE/64;
	I915_RND_STATE(prng);
	struct drm_i915_private *i915 = arg;
	const struct igt_coherency_mode *read, *write, *over;
	struct drm_i915_gem_object *obj;
	unsigned long count, n;
	u32 *offsets, *values;
	I915_RND_STATE(prng);
	struct context ctx;
	int err = 0;

	/* We repeatedly write, overwrite and read from a sequence of
	/*
	 * We repeatedly write, overwrite and read from a sequence of
	 * cachelines in order to try and detect incoherency (unflushed writes
	 * from either the CPU or GPU). Each setter/getter uses our cache
	 * domain API which should prevent incoherency.
@@ -312,31 +318,35 @@ static int igt_gem_coherency(void *arg)

	values = offsets + ncachelines;

	ctx.engine = random_engine(i915, &prng);
	GEM_BUG_ON(!ctx.engine);
	pr_info("%s: using %s\n", __func__, ctx.engine->name);

	for (over = igt_coherency_mode; over->name; over++) {
		if (!over->set)
			continue;

		if (!over->valid(i915))
		if (!over->valid(&ctx))
			continue;

		for (write = igt_coherency_mode; write->name; write++) {
			if (!write->set)
				continue;

			if (!write->valid(i915))
			if (!write->valid(&ctx))
				continue;

			for (read = igt_coherency_mode; read->name; read++) {
				if (!read->get)
					continue;

				if (!read->valid(i915))
				if (!read->valid(&ctx))
					continue;

				for_each_prime_number_from(count, 1, ncachelines) {
					obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
					if (IS_ERR(obj)) {
						err = PTR_ERR(obj);
					ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
					if (IS_ERR(ctx.obj)) {
						err = PTR_ERR(ctx.obj);
						goto free;
					}

@@ -345,7 +355,7 @@ static int igt_gem_coherency(void *arg)
						values[n] = prandom_u32_state(&prng);

					for (n = 0; n < count; n++) {
						err = over->set(obj, offsets[n], ~values[n]);
						err = over->set(&ctx, offsets[n], ~values[n]);
						if (err) {
							pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
							       n, count, over->name, err);
@@ -354,7 +364,7 @@ static int igt_gem_coherency(void *arg)
					}

					for (n = 0; n < count; n++) {
						err = write->set(obj, offsets[n], values[n]);
						err = write->set(&ctx, offsets[n], values[n]);
						if (err) {
							pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
							       n, count, write->name, err);
@@ -365,7 +375,7 @@ static int igt_gem_coherency(void *arg)
					for (n = 0; n < count; n++) {
						u32 found;

						err = read->get(obj, offsets[n], &found);
						err = read->get(&ctx, offsets[n], &found);
						if (err) {
							pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
							       n, count, read->name, err);
@@ -383,7 +393,7 @@ static int igt_gem_coherency(void *arg)
						}
					}

					i915_gem_object_put(obj);
					i915_gem_object_put(ctx.obj);
				}
			}
		}
@@ -393,7 +403,7 @@ static int igt_gem_coherency(void *arg)
	return err;

put_object:
	i915_gem_object_put(obj);
	i915_gem_object_put(ctx.obj);
	goto free;
}