Commit fc2f0756 authored by Rob Clark's avatar Rob Clark
Browse files

drm/msm/gem: Tidy up VMA API



Stop open coding VMA construction, which will be needed in the next
commit.  And since the VMA already has a ptr to the adress space, stop
passing that around everywhere.  (Also, an aspace always has an mmu so
we can drop a couple pointless NULL checks.)

Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Patchwork: https://patchwork.freedesktop.org/patch/527833/
Link: https://lore.kernel.org/r/20230320144356.803762-4-robdclark@gmail.com
parent 769fec1e
Loading
Loading
Loading
Loading
+8 −10
Original line number Diff line number Diff line
@@ -309,12 +309,10 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,

	msm_gem_assert_locked(obj);

	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	vma = msm_gem_vma_new(aspace);
	if (!vma)
		return ERR_PTR(-ENOMEM);

	vma->aspace = aspace;

	list_add_tail(&vma->list, &msm_obj->vmas);

	return vma;
@@ -361,9 +359,9 @@ put_iova_spaces(struct drm_gem_object *obj, bool close)

	list_for_each_entry(vma, &msm_obj->vmas, list) {
		if (vma->aspace) {
			msm_gem_purge_vma(vma->aspace, vma);
			msm_gem_vma_purge(vma);
			if (close)
				msm_gem_close_vma(vma->aspace, vma);
				msm_gem_vma_close(vma);
		}
	}
}
@@ -399,7 +397,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj,
		if (IS_ERR(vma))
			return vma;

		ret = msm_gem_init_vma(aspace, vma, obj->size,
		ret = msm_gem_vma_init(vma, obj->size,
			range_start, range_end);
		if (ret) {
			del_vma(vma);
@@ -437,7 +435,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
	if (IS_ERR(pages))
		return PTR_ERR(pages);

	ret = msm_gem_map_vma(vma->aspace, vma, prot, msm_obj->sgt, obj->size);
	ret = msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size);
	if (ret)
		msm_gem_unpin_locked(obj);

@@ -539,8 +537,8 @@ static int clear_iova(struct drm_gem_object *obj,
	if (msm_gem_vma_inuse(vma))
		return -EBUSY;

	msm_gem_purge_vma(vma->aspace, vma);
	msm_gem_close_vma(vma->aspace, vma);
	msm_gem_vma_purge(vma);
	msm_gem_vma_close(vma);
	del_vma(vma);

	return 0;
@@ -589,7 +587,7 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj,
	msm_gem_lock(obj);
	vma = lookup_vma(obj, aspace);
	if (!GEM_WARN_ON(!vma)) {
		msm_gem_unpin_vma(vma);
		msm_gem_vma_unpin(vma);
		msm_gem_unpin_locked(obj);
	}
	msm_gem_unlock(obj);
+7 −11
Original line number Diff line number Diff line
@@ -69,19 +69,15 @@ struct msm_gem_vma {
	struct msm_fence_context *fctx[MSM_GPU_MAX_RINGS];
};

int msm_gem_init_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, int size,
struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace);
int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
		u64 range_start, u64 range_end);
bool msm_gem_vma_inuse(struct msm_gem_vma *vma);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma);
void msm_gem_unpin_vma(struct msm_gem_vma *vma);
void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, int prot,
		struct sg_table *sgt, int size);
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma);
void msm_gem_vma_purge(struct msm_gem_vma *vma);
void msm_gem_vma_unpin(struct msm_gem_vma *vma);
void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx);
int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size);
void msm_gem_vma_close(struct msm_gem_vma *vma);

struct msm_gem_object {
	struct drm_gem_object base;
+1 −1
Original line number Diff line number Diff line
@@ -250,7 +250,7 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i,
	submit->bos[i].flags &= ~cleanup_flags;

	if (flags & BO_VMA_PINNED)
		msm_gem_unpin_vma(submit->bos[i].vma);
		msm_gem_vma_unpin(submit->bos[i].vma);

	if (flags & BO_OBJ_PINNED)
		msm_gem_unpin_locked(obj);
+34 −17
Original line number Diff line number Diff line
@@ -56,9 +56,9 @@ bool msm_gem_vma_inuse(struct msm_gem_vma *vma)
}

/* Actually unmap memory for the vma */
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma)
void msm_gem_vma_purge(struct msm_gem_vma *vma)
{
	struct msm_gem_address_space *aspace = vma->aspace;
	unsigned size = vma->node.size;

	/* Print a message if we try to purge a vma in use */
@@ -68,14 +68,13 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
	if (!vma->mapped)
		return;

	if (aspace->mmu)
	aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);

	vma->mapped = false;
}

/* Remove reference counts for the mapping */
void msm_gem_unpin_vma(struct msm_gem_vma *vma)
void msm_gem_vma_unpin(struct msm_gem_vma *vma)
{
	if (GEM_WARN_ON(!vma->inuse))
		return;
@@ -84,21 +83,21 @@ void msm_gem_unpin_vma(struct msm_gem_vma *vma)
}

/* Replace pin reference with fence: */
void msm_gem_unpin_vma_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
void msm_gem_vma_unpin_fenced(struct msm_gem_vma *vma, struct msm_fence_context *fctx)
{
	vma->fctx[fctx->index] = fctx;
	vma->fence[fctx->index] = fctx->last_fence;
	vma->fence_mask |= BIT(fctx->index);
	msm_gem_unpin_vma(vma);
	msm_gem_vma_unpin(vma);
}

/* Map and pin vma: */
int
msm_gem_map_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, int prot,
msm_gem_vma_map(struct msm_gem_vma *vma, int prot,
		struct sg_table *sgt, int size)
{
	int ret = 0;
	struct msm_gem_address_space *aspace = vma->aspace;
	int ret;

	if (GEM_WARN_ON(!vma->iova))
		return -EINVAL;
@@ -111,9 +110,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,

	vma->mapped = true;

	if (aspace && aspace->mmu)
		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
				size, prot);
	if (!aspace)
		return 0;

	ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, prot);

	if (ret) {
		vma->mapped = false;
@@ -124,9 +124,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
}

/* Close an iova.  Warn if it is still in use */
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma)
void msm_gem_vma_close(struct msm_gem_vma *vma)
{
	struct msm_gem_address_space *aspace = vma->aspace;

	GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped);

	spin_lock(&aspace->lock);
@@ -139,13 +140,29 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
	msm_gem_address_space_put(aspace);
}

struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace)
{
	struct msm_gem_vma *vma;

	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (!vma)
		return NULL;

	vma->aspace = aspace;

	return vma;
}

/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, int size,
int msm_gem_vma_init(struct msm_gem_vma *vma, int size,
		u64 range_start, u64 range_end)
{
	struct msm_gem_address_space *aspace = vma->aspace;
	int ret;

	if (GEM_WARN_ON(!aspace))
		return -EINVAL;

	if (GEM_WARN_ON(vma->iova))
		return -EBUSY;

+1 −1
Original line number Diff line number Diff line
@@ -24,7 +24,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
		struct drm_gem_object *obj = &submit->bos[i].obj->base;

		msm_gem_lock(obj);
		msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx);
		msm_gem_vma_unpin_fenced(submit->bos[i].vma, fctx);
		msm_gem_unpin_locked(obj);
		msm_gem_unlock(obj);
		submit->bos[i].flags &= ~(BO_VMA_PINNED | BO_OBJ_PINNED);