Commit 420a07b8 authored by Nirmoy Das's avatar Nirmoy Das Committed by Matthew Auld
Browse files

drm/i915: Individualize fences before adding to dma_resv obj

_i915_vma_move_to_active() can receive > 1 fences for
multiple batch buffers submission. Because dma_resv_add_fence()
can only accept one fence at a time, change _i915_vma_move_to_active()
to be aware of multiple fences so that it can add individual
fences to the dma resv object.

v6: fix multi-line comment.
v5: remove double fence reservation for batch VMAs.
v4: Reserve fences for composite_fence on multi-batch contexts and
    also reserve fence slots to composite_fence for each VMAs.
v3: dma_resv_reserve_fences is not cumulative so pass num_fences.
v2: make sure to reserve enough fence slots before adding.

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/5614


Fixes: 544460c3 ("drm/i915: Multi-BB execbuf")
Cc: <stable@vger.kernel.org> # v5.16+
Signed-off-by: default avatarNirmoy Das <nirmoy.das@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220525095955.15371-1-nirmoy.das@intel.com
parent 69d6bf5c
Loading
Loading
Loading
Loading
+2 −1
Original line number Diff line number Diff line
@@ -999,7 +999,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
			}
		}

		err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
		/* Reserve enough slots to accommodate composite fences */
		err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches);
		if (err)
			return err;

+28 −20
Original line number Diff line number Diff line
@@ -23,6 +23,7 @@
 */

#include <linux/sched/mm.h>
#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>

#include "display/intel_frontbuffer.h"
@@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
	if (unlikely(err))
		return err;

	/*
	 * Reserve fences slot early to prevent an allocation after preparing
	 * the workload and associating fences with dma_resv.
	 */
	if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
		struct dma_fence *curr;
		int idx;

		dma_fence_array_for_each(curr, idx, fence)
			;
		err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
		if (unlikely(err))
			return err;
	}

	if (flags & EXEC_OBJECT_WRITE) {
		struct intel_frontbuffer *front;

@@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
				i915_active_add_request(&front->write, rq);
			intel_frontbuffer_put(front);
		}

		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
			if (unlikely(err))
				return err;
	}

	if (fence) {
			dma_resv_add_fence(vma->obj->base.resv, fence,
					   DMA_RESV_USAGE_WRITE);
			obj->write_domain = I915_GEM_DOMAIN_RENDER;
		struct dma_fence *curr;
		enum dma_resv_usage usage;
		int idx;

		obj->read_domains = 0;
		}
		if (flags & EXEC_OBJECT_WRITE) {
			usage = DMA_RESV_USAGE_WRITE;
			obj->write_domain = I915_GEM_DOMAIN_RENDER;
		} else {
		if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
			err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
			if (unlikely(err))
				return err;
			usage = DMA_RESV_USAGE_READ;
		}

		if (fence) {
			dma_resv_add_fence(vma->obj->base.resv, fence,
					   DMA_RESV_USAGE_READ);
			obj->write_domain = 0;
		}
		dma_fence_array_for_each(curr, idx, fence)
			dma_resv_add_fence(vma->obj->base.resv, curr, usage);
	}

	if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)