Commit e997b827 authored by Christian König's avatar Christian König Committed by Alex Deucher
Browse files

drm/amdgpu: simplify VM update tracking a bit



Store the 64bit sequence directly. Makes it simpler to use and saves a bit
of fence reference counting overhead.

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 184a69ca
Loading
Loading
Loading
Loading
+11 −29
Original line number Diff line number Diff line
@@ -276,19 +276,15 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
	struct amdgpu_device *adev = ring->adev;
	unsigned vmhub = ring->funcs->vmhub;
	uint64_t fence_context = adev->fence_context + ring->idx;
	struct dma_fence *updates = sync->last_vm_update;
	bool needs_flush = vm->use_cpu_for_update;
	int r = 0;
	uint64_t updates = sync->last_vm_update;
	int r;

	*id = vm->reserved_vmid[vmhub];
	if (updates && (*id)->flushed_updates &&
	    updates->context == (*id)->flushed_updates->context &&
	    !dma_fence_is_later(updates, (*id)->flushed_updates))
		updates = NULL;

	if ((*id)->owner != vm->immediate.fence_context ||
	    job->vm_pd_addr != (*id)->pd_gpu_addr ||
	    updates || !(*id)->last_flush ||
	    (*id)->pd_gpu_addr != job->vm_pd_addr ||
	    (*id)->flushed_updates < updates ||
	    !(*id)->last_flush ||
	    ((*id)->last_flush->context != fence_context &&
	     !dma_fence_is_signaled((*id)->last_flush))) {
		struct dma_fence *tmp;
@@ -302,8 +298,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
		tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
		if (tmp) {
			*id = NULL;
			r = amdgpu_sync_fence(sync, tmp);
			return r;
			return amdgpu_sync_fence(sync, tmp);
		}
		needs_flush = true;
	}
@@ -315,10 +310,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
	if (r)
		return r;

	if (updates) {
		dma_fence_put((*id)->flushed_updates);
		(*id)->flushed_updates = dma_fence_get(updates);
	}
	(*id)->flushed_updates = updates;
	job->vm_needs_flush = needs_flush;
	return 0;
}
@@ -346,7 +338,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
	unsigned vmhub = ring->funcs->vmhub;
	struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
	uint64_t fence_context = adev->fence_context + ring->idx;
	struct dma_fence *updates = sync->last_vm_update;
	uint64_t updates = sync->last_vm_update;
	int r;

	job->vm_needs_flush = vm->use_cpu_for_update;
@@ -354,7 +346,6 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
	/* Check if we can use a VMID already assigned to this VM */
	list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
		bool needs_flush = vm->use_cpu_for_update;
		struct dma_fence *flushed;

		/* Check all the prerequisites to using this VMID */
		if ((*id)->owner != vm->immediate.fence_context)
@@ -368,8 +359,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
		     !dma_fence_is_signaled((*id)->last_flush)))
			needs_flush = true;

		flushed  = (*id)->flushed_updates;
		if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
		if ((*id)->flushed_updates < updates)
			needs_flush = true;

		if (needs_flush && !adev->vm_manager.concurrent_flush)
@@ -382,11 +372,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
		if (r)
			return r;

		if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
			dma_fence_put((*id)->flushed_updates);
			(*id)->flushed_updates = dma_fence_get(updates);
		}

		(*id)->flushed_updates = updates;
		job->vm_needs_flush |= needs_flush;
		return 0;
	}
@@ -432,8 +418,6 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
			goto error;

		if (!id) {
			struct dma_fence *updates = sync->last_vm_update;

			/* Still no ID to use? Then use the idle one found earlier */
			id = idle;

@@ -442,8 +426,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
			if (r)
				goto error;

			dma_fence_put(id->flushed_updates);
			id->flushed_updates = dma_fence_get(updates);
			id->flushed_updates = sync->last_vm_update;
			job->vm_needs_flush = true;
		}

@@ -610,7 +593,6 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
			struct amdgpu_vmid *id = &id_mgr->ids[j];

			amdgpu_sync_free(&id->active);
			dma_fence_put(id->flushed_updates);
			dma_fence_put(id->last_flush);
			dma_fence_put(id->pasid_mapping);
		}
+1 −1
Original line number Diff line number Diff line
@@ -47,7 +47,7 @@ struct amdgpu_vmid {

	uint64_t		pd_gpu_addr;
	/* last flushed PD/PT update */
	struct dma_fence	*flushed_updates;
	uint64_t		flushed_updates;

	uint32_t                current_gpu_reset_count;

+3 −6
Original line number Diff line number Diff line
@@ -51,7 +51,7 @@ static struct kmem_cache *amdgpu_sync_slab;
void amdgpu_sync_create(struct amdgpu_sync *sync)
{
	hash_init(sync->fences);
	sync->last_vm_update = NULL;
	sync->last_vm_update = 0;
}

/**
@@ -184,7 +184,7 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
	if (!fence)
		return 0;

	amdgpu_sync_keep_later(&sync->last_vm_update, fence);
	sync->last_vm_update = max(sync->last_vm_update, fence->seqno);
	return amdgpu_sync_fence(sync, fence);
}

@@ -376,8 +376,7 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
		}
	}

	dma_fence_put(clone->last_vm_update);
	clone->last_vm_update = dma_fence_get(source->last_vm_update);
	clone->last_vm_update = source->last_vm_update;

	return 0;
}
@@ -419,8 +418,6 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
		dma_fence_put(e->fence);
		kmem_cache_free(amdgpu_sync_slab, e);
	}

	dma_fence_put(sync->last_vm_update);
}

/**
+1 −1
Original line number Diff line number Diff line
@@ -43,7 +43,7 @@ enum amdgpu_sync_mode {
 */
struct amdgpu_sync {
	DECLARE_HASHTABLE(fences, 4);
	struct dma_fence	*last_vm_update;
	uint64_t	last_vm_update;
};

void amdgpu_sync_create(struct amdgpu_sync *sync);