Commit 22f0463a authored by Christian König's avatar Christian König
Browse files

drm/amdgpu: unwrap fence chains in the explicit sync fence



Unwrap the explicit fence if it is a dma_fence_chain and
sync to the first fence not matching the owner rules.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210614174536.5188-1-christian.koenig@amd.com
parent 1451d0e9
Loading
Loading
Loading
Loading
+68 −50
Original line number Diff line number Diff line
@@ -28,6 +28,8 @@
 *    Christian König <christian.koenig@amd.com>
 */

#include <linux/dma-fence-chain.h>

#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
@@ -186,93 +188,109 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
	return amdgpu_sync_fence(sync, fence);
}

/**
 * amdgpu_sync_resv - sync to a reservation object
 *
 * @adev: amdgpu device
 * @sync: sync object to add fences from reservation object to
 * @resv: reservation object with embedded fence
 * @mode: how owner affects which fences we sync to
 * @owner: owner of the planned job submission
 *
 * Sync to the fence
 */
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
		     void *owner)
/* Determine based on the owner and mode if we should sync to a fence or not */
static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
				   enum amdgpu_sync_mode mode,
				   void *owner, struct dma_fence *f)
{
	struct dma_resv_list *flist;
	struct dma_fence *f;
	unsigned i;
	int r = 0;

	if (resv == NULL)
		return -EINVAL;

	/* always sync to the exclusive fence */
	f = dma_resv_excl_fence(resv);
	r = amdgpu_sync_fence(sync, f);

	flist = dma_resv_shared_list(resv);
	if (!flist || r)
		return r;

	for (i = 0; i < flist->shared_count; ++i) {
		void *fence_owner;

		f = rcu_dereference_protected(flist->shared[i],
					      dma_resv_held(resv));

		fence_owner = amdgpu_sync_get_owner(f);
	void *fence_owner = amdgpu_sync_get_owner(f);

	/* Always sync to moves, no matter what */
		if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
			r = amdgpu_sync_fence(sync, f);
			if (r)
				break;
		}
	if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED)
		return true;

	/* We only want to trigger KFD eviction fences on
	 * evict or move jobs. Skip KFD fences otherwise.
	 */
	if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
	    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
			continue;
		return false;

	/* Never sync to VM updates either. */
	if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
	    owner != AMDGPU_FENCE_OWNER_UNDEFINED)
			continue;
		return false;

	/* Ignore fences depending on the sync mode */
	switch (mode) {
	case AMDGPU_SYNC_ALWAYS:
			break;
		return true;

	case AMDGPU_SYNC_NE_OWNER:
		if (amdgpu_sync_same_dev(adev, f) &&
		    fence_owner == owner)
				continue;
			return false;
		break;

	case AMDGPU_SYNC_EQ_OWNER:
		if (amdgpu_sync_same_dev(adev, f) &&
		    fence_owner != owner)
				continue;
			return false;
		break;

	case AMDGPU_SYNC_EXPLICIT:
			continue;
		return false;
	}

	WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
	     "Adding eviction fence to sync obj");
	return true;
}

/**
 * amdgpu_sync_resv - sync to a reservation object
 *
 * @adev: amdgpu device
 * @sync: sync object to add fences from reservation object to
 * @resv: reservation object with embedded fence
 * @mode: how owner affects which fences we sync to
 * @owner: owner of the planned job submission
 *
 * Sync to the fence
 */
int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
		     struct dma_resv *resv, enum amdgpu_sync_mode mode,
		     void *owner)
{
	struct dma_resv_list *flist;
	struct dma_fence *f;
	unsigned i;
	int r = 0;

	if (resv == NULL)
		return -EINVAL;

	/* always sync to the exclusive fence */
	f = dma_resv_excl_fence(resv);
	dma_fence_chain_for_each(f, f) {
		struct dma_fence_chain *chain = to_dma_fence_chain(f);

		if (amdgpu_sync_test_fence(adev, mode, owner, chain ?
					   chain->fence : f)) {
			r = amdgpu_sync_fence(sync, f);
			dma_fence_put(f);
			if (r)
				return r;
			break;
		}
	}

	flist = dma_resv_shared_list(resv);
	if (!flist)
		return 0;

	for (i = 0; i < flist->shared_count; ++i) {
		f = rcu_dereference_protected(flist->shared[i],
					      dma_resv_held(resv));

		if (amdgpu_sync_test_fence(adev, mode, owner, f)) {
			r = amdgpu_sync_fence(sync, f);
			if (r)
				return r;
		}
	}
	return 0;
}

/**
 * amdgpu_sync_peek_fence - get the next fence not signaled yet