Commit 6f2f7c83 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'drm-intel-gt-next-2021-10-21' of...

Merge tag 'drm-intel-gt-next-2021-10-21' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:

- Expose multi-LRC submission interface

  Similar to the bonded submission interface but simplified.
  Comes with GuC only implementation for now. See kerneldoc
  for more details.

  Userspace changes: https://github.com/intel/media-driver/pull/1252

- Expose logical engine instance to user

  Needed by the multi-LRC submission interface for GuC

  Userspace changes: https://github.com/intel/media-driver/pull/1252



Driver Changes:

- Fix blank screen booting crashes when CONFIG_CC_OPTIMIZE_FOR_SIZE=y (Hugh)
- Add support for multi-LRC submission in the GuC backend (Matt B)
- Add extra cache flushing before making pages userspace visible (Matt A, Thomas)
- Mark internal GPU object pages dirty so they will be flushed properly (Matt A)

- Move remaining debugfs interfaces i915_wedged/i915_forcewake_user into gt (Andi)
- Replace the unconditional clflushes with drm_clflush_virt_range() (Ville)
- Remove IS_ACTIVE macro completely (Lucas)
- Improve kerneldocs for cache_dirty (Matt A)

- Add missing includes (Lucas)
- Selftest improvements (Matt R, Ran, Matt A)

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YXFmLKoq8Fg9JxSd@jlahtine-mobl.ger.corp.intel.com
parents 94ff371e ab5d964c
Loading
Loading
Loading
Loading
+0 −122
Original line number Original line Diff line number Diff line
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2021 Intel Corporation
 */

#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */

/**
 * struct drm_i915_context_engines_parallel_submit - Configure engine for
 * parallel submission.
 *
 * Setup a slot in the context engine map to allow multiple BBs to be submitted
 * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
 * in parallel. Multiple hardware contexts are created internally in the i915
 * run these BBs. Once a slot is configured for N BBs only N BBs can be
 * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
 * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
 * many BBs there are based on the slot's configuration. The N BBs are the last
 * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
 *
 * The default placement behavior is to create implicit bonds between each
 * context if each context maps to more than 1 physical engine (e.g. context is
 * a virtual engine). Also we only allow contexts of same engine class and these
 * contexts must be in logically contiguous order. Examples of the placement
 * behavior described below. Lastly, the default is to not allow BBs to
 * preempted mid BB rather insert coordinated preemption on all hardware
 * contexts between each set of BBs. Flags may be added in the future to change
 * both of these default behaviors.
 *
 * Returns -EINVAL if hardware context placement configuration is invalid or if
 * the placement configuration isn't supported on the platform / submission
 * interface.
 * Returns -ENODEV if extension isn't supported on the platform / submission
 * interface.
 *
 * .. code-block:: none
 *
 *	Example 1 pseudo code:
 *	CS[X] = generic engine of same class, logical instance X
 *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
 *	set_engines(INVALID)
 *	set_parallel(engine_index=0, width=2, num_siblings=1,
 *		     engines=CS[0],CS[1])
 *
 *	Results in the following valid placement:
 *	CS[0], CS[1]
 *
 *	Example 2 pseudo code:
 *	CS[X] = generic engine of same class, logical instance X
 *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
 *	set_engines(INVALID)
 *	set_parallel(engine_index=0, width=2, num_siblings=2,
 *		     engines=CS[0],CS[2],CS[1],CS[3])
 *
 *	Results in the following valid placements:
 *	CS[0], CS[1]
 *	CS[2], CS[3]
 *
 *	This can also be thought of as 2 virtual engines described by 2-D array
 *	in the engines the field with bonds placed between each index of the
 *	virtual engines. e.g. CS[0] is bonded to CS[1], CS[2] is bonded to
 *	CS[3].
 *	VE[0] = CS[0], CS[2]
 *	VE[1] = CS[1], CS[3]
 *
 *	Example 3 pseudo code:
 *	CS[X] = generic engine of same class, logical instance X
 *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
 *	set_engines(INVALID)
 *	set_parallel(engine_index=0, width=2, num_siblings=2,
 *		     engines=CS[0],CS[1],CS[1],CS[3])
 *
 *	Results in the following valid and invalid placements:
 *	CS[0], CS[1]
 *	CS[1], CS[3] - Not logical contiguous, return -EINVAL
 */
struct drm_i915_context_engines_parallel_submit {
	/**
	 * @base: base user extension.
	 */
	struct i915_user_extension base;

	/**
	 * @engine_index: slot for parallel engine
	 */
	__u16 engine_index;

	/**
	 * @width: number of contexts per parallel engine
	 */
	__u16 width;

	/**
	 * @num_siblings: number of siblings per context
	 */
	__u16 num_siblings;

	/**
	 * @mbz16: reserved for future use; must be zero
	 */
	__u16 mbz16;

	/**
	 * @flags: all undefined flags must be zero, currently not defined flags
	 */
	__u64 flags;

	/**
	 * @mbz64: reserved for future use; must be zero
	 */
	__u64 mbz64[3];

	/**
	 * @engines: 2-d array of engine instances to configure parallel engine
	 *
	 * length = width (i) * num_siblings (j)
	 * index = j + i * num_siblings
	 */
	struct i915_engine_class_instance engines[0];

} __packed;
+2 −2
Original line number Original line Diff line number Diff line
@@ -135,8 +135,8 @@ Add I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT and
drm_i915_context_engines_parallel_submit to the uAPI to implement this
drm_i915_context_engines_parallel_submit to the uAPI to implement this
extension.
extension.


.. kernel-doc:: Documentation/gpu/rfc/i915_parallel_execbuf.h
.. kernel-doc:: include/uapi/drm/i915_drm.h
        :functions: drm_i915_context_engines_parallel_submit
        :functions: i915_context_engines_parallel_submit


Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL
Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL
-------------------------------------------------------------------
-------------------------------------------------------------------
+45 −12
Original line number Original line Diff line number Diff line
@@ -4,6 +4,8 @@
 * Copyright © 2014-2016 Intel Corporation
 * Copyright © 2014-2016 Intel Corporation
 */
 */


#include <linux/dma-fence-array.h>

#include "gt/intel_engine.h"
#include "gt/intel_engine.h"


#include "i915_gem_ioctls.h"
#include "i915_gem_ioctls.h"
@@ -36,7 +38,7 @@ static __always_inline u32 __busy_write_id(u16 id)
}
}


static __always_inline unsigned int
static __always_inline unsigned int
__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
__busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id))
{
{
	const struct i915_request *rq;
	const struct i915_request *rq;


@@ -46,13 +48,43 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
	 * to eventually flush us, but to minimise latency just ask the
	 * to eventually flush us, but to minimise latency just ask the
	 * hardware.
	 * hardware.
	 *
	 *
	 * Note we only report on the status of native fences.
	 * Note we only report on the status of native fences and we currently
	 * have two native fences:
	 *
	 * 1. A composite fence (dma_fence_array) constructed of i915 requests
	 * created during a parallel submission. In this case we deconstruct the
	 * composite fence into individual i915 requests and check the status of
	 * each request.
	 *
	 * 2. A single i915 request.
	 */
	 */
	if (dma_fence_is_array(fence)) {
		struct dma_fence_array *array = to_dma_fence_array(fence);
		struct dma_fence **child = array->fences;
		unsigned int nchild = array->num_fences;

		do {
			struct dma_fence *current_fence = *child++;

			/* Not an i915 fence, can't be busy per above */
			if (!dma_fence_is_i915(current_fence) ||
			    !test_bit(I915_FENCE_FLAG_COMPOSITE,
				      &current_fence->flags)) {
				return 0;
			}

			rq = to_request(current_fence);
			if (!i915_request_completed(rq))
				return flag(rq->engine->uabi_class);
		} while (--nchild);

		/* All requests in array complete, not busy */
		return 0;
	} else {
		if (!dma_fence_is_i915(fence))
		if (!dma_fence_is_i915(fence))
			return 0;
			return 0;


	/* opencode to_request() in order to avoid const warnings */
		rq = to_request(fence);
	rq = container_of(fence, const struct i915_request, fence);
		if (i915_request_completed(rq))
		if (i915_request_completed(rq))
			return 0;
			return 0;


@@ -60,15 +92,16 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
		BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
		BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
		return flag(rq->engine->uabi_class);
		return flag(rq->engine->uabi_class);
	}
	}
}


static __always_inline unsigned int
static __always_inline unsigned int
busy_check_reader(const struct dma_fence *fence)
busy_check_reader(struct dma_fence *fence)
{
{
	return __busy_set_if_active(fence, __busy_read_flag);
	return __busy_set_if_active(fence, __busy_read_flag);
}
}


static __always_inline unsigned int
static __always_inline unsigned int
busy_check_writer(const struct dma_fence *fence)
busy_check_writer(struct dma_fence *fence)
{
{
	if (!fence)
	if (!fence)
		return 0;
		return 0;
+225 −2
Original line number Original line Diff line number Diff line
@@ -556,9 +556,147 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
	return 0;
	return 0;
}
}


static int
set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
				      void *data)
{
	struct i915_context_engines_parallel_submit __user *ext =
		container_of_user(base, typeof(*ext), base);
	const struct set_proto_ctx_engines *set = data;
	struct drm_i915_private *i915 = set->i915;
	u64 flags;
	int err = 0, n, i, j;
	u16 slot, width, num_siblings;
	struct intel_engine_cs **siblings = NULL;
	intel_engine_mask_t prev_mask;

	/* FIXME: This is NIY for execlists */
	if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
		return -ENODEV;

	if (get_user(slot, &ext->engine_index))
		return -EFAULT;

	if (get_user(width, &ext->width))
		return -EFAULT;

	if (get_user(num_siblings, &ext->num_siblings))
		return -EFAULT;

	if (slot >= set->num_engines) {
		drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
			slot, set->num_engines);
		return -EINVAL;
	}

	if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
		drm_dbg(&i915->drm,
			"Invalid placement[%d], already occupied\n", slot);
		return -EINVAL;
	}

	if (get_user(flags, &ext->flags))
		return -EFAULT;

	if (flags) {
		drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
		return -EINVAL;
	}

	for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
		err = check_user_mbz(&ext->mbz64[n]);
		if (err)
			return err;
	}

	if (width < 2) {
		drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
		return -EINVAL;
	}

	if (num_siblings < 1) {
		drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
			num_siblings);
		return -EINVAL;
	}

	siblings = kmalloc_array(num_siblings * width,
				 sizeof(*siblings),
				 GFP_KERNEL);
	if (!siblings)
		return -ENOMEM;

	/* Create contexts / engines */
	for (i = 0; i < width; ++i) {
		intel_engine_mask_t current_mask = 0;
		struct i915_engine_class_instance prev_engine;

		for (j = 0; j < num_siblings; ++j) {
			struct i915_engine_class_instance ci;

			n = i * num_siblings + j;
			if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
				err = -EFAULT;
				goto out_err;
			}

			siblings[n] =
				intel_engine_lookup_user(i915, ci.engine_class,
							 ci.engine_instance);
			if (!siblings[n]) {
				drm_dbg(&i915->drm,
					"Invalid sibling[%d]: { class:%d, inst:%d }\n",
					n, ci.engine_class, ci.engine_instance);
				err = -EINVAL;
				goto out_err;
			}

			if (n) {
				if (prev_engine.engine_class !=
				    ci.engine_class) {
					drm_dbg(&i915->drm,
						"Mismatched class %d, %d\n",
						prev_engine.engine_class,
						ci.engine_class);
					err = -EINVAL;
					goto out_err;
				}
			}

			prev_engine = ci;
			current_mask |= siblings[n]->logical_mask;
		}

		if (i > 0) {
			if (current_mask != prev_mask << 1) {
				drm_dbg(&i915->drm,
					"Non contiguous logical mask 0x%x, 0x%x\n",
					prev_mask, current_mask);
				err = -EINVAL;
				goto out_err;
			}
		}
		prev_mask = current_mask;
	}

	set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
	set->engines[slot].num_siblings = num_siblings;
	set->engines[slot].width = width;
	set->engines[slot].siblings = siblings;

	return 0;

out_err:
	kfree(siblings);

	return err;
}

static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
	[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
	[I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
	[I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
	[I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
		set_proto_ctx_engines_parallel_submit,
};
};


static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
@@ -794,6 +932,7 @@ static int intel_context_set_gem(struct intel_context *ce,
	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
	GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
	RCU_INIT_POINTER(ce->gem_context, ctx);
	RCU_INIT_POINTER(ce->gem_context, ctx);


	GEM_BUG_ON(intel_context_is_pinned(ce));
	ce->ring_size = SZ_16K;
	ce->ring_size = SZ_16K;


	i915_vm_put(ce->vm);
	i915_vm_put(ce->vm);
@@ -818,6 +957,25 @@ static int intel_context_set_gem(struct intel_context *ce,
	return ret;
	return ret;
}
}


static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
{
	while (count--) {
		struct intel_context *ce = e->engines[count], *child;

		if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
			continue;

		for_each_child(ce, child)
			intel_context_unpin(child);
		intel_context_unpin(ce);
	}
}

static void unpin_engines(struct i915_gem_engines *e)
{
	__unpin_engines(e, e->num_engines);
}

static void __free_engines(struct i915_gem_engines *e, unsigned int count)
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
{
{
	while (count--) {
	while (count--) {
@@ -933,6 +1091,40 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
	return err;
	return err;
}
}


static int perma_pin_contexts(struct intel_context *ce)
{
	struct intel_context *child;
	int i = 0, j = 0, ret;

	GEM_BUG_ON(!intel_context_is_parent(ce));

	ret = intel_context_pin(ce);
	if (unlikely(ret))
		return ret;

	for_each_child(ce, child) {
		ret = intel_context_pin(child);
		if (unlikely(ret))
			goto unwind;
		++i;
	}

	set_bit(CONTEXT_PERMA_PIN, &ce->flags);

	return 0;

unwind:
	intel_context_unpin(ce);
	for_each_child(ce, child) {
		if (j++ < i)
			intel_context_unpin(child);
		else
			break;
	}

	return ret;
}

static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
					     unsigned int num_engines,
					     unsigned int num_engines,
					     struct i915_gem_proto_engine *pe)
					     struct i915_gem_proto_engine *pe)
@@ -946,7 +1138,7 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
	e->num_engines = num_engines;
	e->num_engines = num_engines;


	for (n = 0; n < num_engines; n++) {
	for (n = 0; n < num_engines; n++) {
		struct intel_context *ce;
		struct intel_context *ce, *child;
		int ret;
		int ret;


		switch (pe[n].type) {
		switch (pe[n].type) {
@@ -956,7 +1148,13 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,


		case I915_GEM_ENGINE_TYPE_BALANCED:
		case I915_GEM_ENGINE_TYPE_BALANCED:
			ce = intel_engine_create_virtual(pe[n].siblings,
			ce = intel_engine_create_virtual(pe[n].siblings,
							 pe[n].num_siblings);
							 pe[n].num_siblings, 0);
			break;

		case I915_GEM_ENGINE_TYPE_PARALLEL:
			ce = intel_engine_create_parallel(pe[n].siblings,
							  pe[n].num_siblings,
							  pe[n].width);
			break;
			break;


		case I915_GEM_ENGINE_TYPE_INVALID:
		case I915_GEM_ENGINE_TYPE_INVALID:
@@ -977,6 +1175,30 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
			err = ERR_PTR(ret);
			err = ERR_PTR(ret);
			goto free_engines;
			goto free_engines;
		}
		}
		for_each_child(ce, child) {
			ret = intel_context_set_gem(child, ctx, pe->sseu);
			if (ret) {
				err = ERR_PTR(ret);
				goto free_engines;
			}
		}

		/*
		 * XXX: Must be done after calling intel_context_set_gem as that
		 * function changes the ring size. The ring is allocated when
		 * the context is pinned. If the ring size is changed after
		 * allocation we have a mismatch of the ring size and will cause
		 * the context to hang. Presumably with a bit of reordering we
		 * could move the perma-pin step to the backend function
		 * intel_engine_create_parallel.
		 */
		if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
			ret = perma_pin_contexts(ce);
			if (ret) {
				err = ERR_PTR(ret);
				goto free_engines;
			}
		}
	}
	}


	return e;
	return e;
@@ -1219,6 +1441,7 @@ static void context_close(struct i915_gem_context *ctx)


	/* Flush any concurrent set_engines() */
	/* Flush any concurrent set_engines() */
	mutex_lock(&ctx->engines_mutex);
	mutex_lock(&ctx->engines_mutex);
	unpin_engines(__context_engines_static(ctx));
	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
	engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
	i915_gem_context_set_closed(ctx);
	i915_gem_context_set_closed(ctx);
	mutex_unlock(&ctx->engines_mutex);
	mutex_unlock(&ctx->engines_mutex);
+13 −3
Original line number Original line Diff line number Diff line
@@ -78,13 +78,16 @@ enum i915_gem_engine_type {


	/** @I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set */
	/** @I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set */
	I915_GEM_ENGINE_TYPE_BALANCED,
	I915_GEM_ENGINE_TYPE_BALANCED,

	/** @I915_GEM_ENGINE_TYPE_PARALLEL: A parallel engine set */
	I915_GEM_ENGINE_TYPE_PARALLEL,
};
};


/**
/**
 * struct i915_gem_proto_engine - prototype engine
 * struct i915_gem_proto_engine - prototype engine
 *
 *
 * This struct describes an engine that a context may contain.  Engines
 * This struct describes an engine that a context may contain.  Engines
 * have three types:
 * have four types:
 *
 *
 *  - I915_GEM_ENGINE_TYPE_INVALID: Invalid engines can be created but they
 *  - I915_GEM_ENGINE_TYPE_INVALID: Invalid engines can be created but they
 *    show up as a NULL in i915_gem_engines::engines[i] and any attempt to
 *    show up as a NULL in i915_gem_engines::engines[i] and any attempt to
@@ -97,6 +100,10 @@ enum i915_gem_engine_type {
 *
 *
 *  - I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set, described
 *  - I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set, described
 *    i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings.
 *    i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings.
 *
 *  - I915_GEM_ENGINE_TYPE_PARALLEL: A parallel submission engine set, described
 *    i915_gem_proto_engine::width, i915_gem_proto_engine::num_siblings, and
 *    i915_gem_proto_engine::siblings.
 */
 */
struct i915_gem_proto_engine {
struct i915_gem_proto_engine {
	/** @type: Type of this engine */
	/** @type: Type of this engine */
@@ -105,10 +112,13 @@ struct i915_gem_proto_engine {
	/** @engine: Engine, for physical */
	/** @engine: Engine, for physical */
	struct intel_engine_cs *engine;
	struct intel_engine_cs *engine;


	/** @num_siblings: Number of balanced siblings */
	/** @num_siblings: Number of balanced or parallel siblings */
	unsigned int num_siblings;
	unsigned int num_siblings;


	/** @siblings: Balanced siblings */
	/** @width: Width of each sibling */
	unsigned int width;

	/** @siblings: Balanced siblings or num_siblings * width for parallel */
	struct intel_engine_cs **siblings;
	struct intel_engine_cs **siblings;


	/** @sseu: Client-set SSEU parameters */
	/** @sseu: Client-set SSEU parameters */
Loading