Commit 73511edf authored by Christian König's avatar Christian König
Browse files

dma-buf: specify usage while adding fences to dma_resv obj v7



Instead of distingting between shared and exclusive fences specify
the fence usage while adding fences.

Rework all drivers to use this interface instead and deprecate the old one.

v2: some kerneldoc comments suggested by Daniel
v3: fix a missing case in radeon
v4: rebase on nouveau changes, fix lockdep and temporary disable warning
v5: more documentation updates
v6: separate internal dma_resv changes from this patch, avoids to
    disable warning temporary, rebase on upstream changes
v7: fix missed case in lima driver, minimize changes to i915_gem_busy_ioctl

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-3-christian.koenig@amd.com
parent 7bc80a54
Loading
Loading
Loading
Loading
+37 −11
Original line number Diff line number Diff line
@@ -234,14 +234,14 @@ EXPORT_SYMBOL(dma_resv_reserve_fences);

#ifdef CONFIG_DEBUG_MUTEXES
/**
 * dma_resv_reset_shared_max - reset shared fences for debugging
 * dma_resv_reset_max_fences - reset shared fences for debugging
 * @obj: the dma_resv object to reset
 *
 * Reset the number of pre-reserved shared slots to test that drivers do
 * correct slot allocation using dma_resv_reserve_fences(). See also
 * &dma_resv_list.shared_max.
 */
void dma_resv_reset_shared_max(struct dma_resv *obj)
void dma_resv_reset_max_fences(struct dma_resv *obj)
{
	struct dma_resv_list *fences = dma_resv_shared_list(obj);

@@ -251,7 +251,7 @@ void dma_resv_reset_shared_max(struct dma_resv *obj)
	if (fences)
		fences->shared_max = fences->shared_count;
}
EXPORT_SYMBOL(dma_resv_reset_shared_max);
EXPORT_SYMBOL(dma_resv_reset_max_fences);
#endif

/**
@@ -264,7 +264,8 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
 *
 * See also &dma_resv.fence for a discussion of the semantics.
 */
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
static void dma_resv_add_shared_fence(struct dma_resv *obj,
				      struct dma_fence *fence)
{
	struct dma_resv_list *fobj;
	struct dma_fence *old;
@@ -305,13 +306,13 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
	write_seqcount_end(&obj->seq);
	dma_fence_put(old);
}
EXPORT_SYMBOL(dma_resv_add_shared_fence);

/**
 * dma_resv_replace_fences - replace fences in the dma_resv obj
 * @obj: the reservation object
 * @context: the context of the fences to replace
 * @replacement: the new fence to use instead
 * @usage: how the new fence is used, see enum dma_resv_usage
 *
 * Replace fences with a specified context with a new fence. Only valid if the
 * operation represented by the original fence has no longer access to the
@@ -321,12 +322,16 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
 * update fence which makes the resource inaccessible.
 */
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
			     struct dma_fence *replacement)
			     struct dma_fence *replacement,
			     enum dma_resv_usage usage)
{
	struct dma_resv_list *list;
	struct dma_fence *old;
	unsigned int i;

	/* Only readers supported for now */
	WARN_ON(usage != DMA_RESV_USAGE_READ);

	dma_resv_assert_held(obj);

	write_seqcount_begin(&obj->seq);
@@ -360,7 +365,8 @@ EXPORT_SYMBOL(dma_resv_replace_fences);
 * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
 * See also &dma_resv.fence_excl for a discussion of the semantics.
 */
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
static void dma_resv_add_excl_fence(struct dma_resv *obj,
				    struct dma_fence *fence)
{
	struct dma_fence *old_fence = dma_resv_excl_fence(obj);

@@ -375,7 +381,27 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)

	dma_fence_put(old_fence);
}
EXPORT_SYMBOL(dma_resv_add_excl_fence);

/**
 * dma_resv_add_fence - Add a fence to the dma_resv obj
 * @obj: the reservation object
 * @fence: the fence to add
 * @usage: how the fence is used, see enum dma_resv_usage
 *
 * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
 * dma_resv_reserve_fences() has been called.
 *
 * See also &dma_resv.fence for a discussion of the semantics.
 */
void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
			enum dma_resv_usage usage)
{
	if (usage == DMA_RESV_USAGE_WRITE)
		dma_resv_add_excl_fence(obj, fence);
	else
		dma_resv_add_shared_fence(obj, fence);
}
EXPORT_SYMBOL(dma_resv_add_fence);

/* Restart the iterator by initializing all the necessary fields, but not the
 * relation to the dma_resv object. */
@@ -574,7 +600,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
		}

		dma_fence_get(f);
		if (dma_resv_iter_is_exclusive(&cursor))
		if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE)
			excl = f;
		else
			RCU_INIT_POINTER(list->shared[list->shared_count++], f);
@@ -771,13 +797,13 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
 */
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
	static const char *usage[] = { "write", "read" };
	struct dma_resv_iter cursor;
	struct dma_fence *fence;

	dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
		seq_printf(seq, "\t%s fence:",
			   dma_resv_iter_is_exclusive(&cursor) ?
				"Exclusive" : "Shared");
			   usage[dma_resv_iter_usage(&cursor)]);
		dma_fence_describe(fence, seq);
	}
}
+27 −74
Original line number Diff line number Diff line
@@ -58,8 +58,9 @@ static int sanitycheck(void *arg)
	return r;
}

static int test_signaling(void *arg, enum dma_resv_usage usage)
static int test_signaling(void *arg)
{
	enum dma_resv_usage usage = (unsigned long)arg;
	struct dma_resv resv;
	struct dma_fence *f;
	int r;
@@ -81,11 +82,7 @@ static int test_signaling(void *arg, enum dma_resv_usage usage)
		goto err_unlock;
	}

	if (usage >= DMA_RESV_USAGE_READ)
		dma_resv_add_shared_fence(&resv, f);
	else
		dma_resv_add_excl_fence(&resv, f);

	dma_resv_add_fence(&resv, f, usage);
	if (dma_resv_test_signaled(&resv, usage)) {
		pr_err("Resv unexpectedly signaled\n");
		r = -EINVAL;
@@ -105,18 +102,9 @@ static int test_signaling(void *arg, enum dma_resv_usage usage)
	return r;
}

static int test_excl_signaling(void *arg)
{
	return test_signaling(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_signaling(void *arg)
{
	return test_signaling(arg, DMA_RESV_USAGE_READ);
}

static int test_for_each(void *arg, enum dma_resv_usage usage)
static int test_for_each(void *arg)
{
	enum dma_resv_usage usage = (unsigned long)arg;
	struct dma_resv_iter cursor;
	struct dma_fence *f, *fence;
	struct dma_resv resv;
@@ -139,10 +127,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
		goto err_unlock;
	}

	if (usage >= DMA_RESV_USAGE_READ)
		dma_resv_add_shared_fence(&resv, f);
	else
		dma_resv_add_excl_fence(&resv, f);
	dma_resv_add_fence(&resv, f, usage);

	r = -ENOENT;
	dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
@@ -156,8 +141,7 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
			r = -EINVAL;
			goto err_unlock;
		}
		if (dma_resv_iter_is_exclusive(&cursor) !=
		    (usage >= DMA_RESV_USAGE_READ)) {
		if (dma_resv_iter_usage(&cursor) != usage) {
			pr_err("Unexpected fence usage\n");
			r = -EINVAL;
			goto err_unlock;
@@ -177,18 +161,9 @@ static int test_for_each(void *arg, enum dma_resv_usage usage)
	return r;
}

static int test_excl_for_each(void *arg)
{
	return test_for_each(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_for_each(void *arg)
{
	return test_for_each(arg, DMA_RESV_USAGE_READ);
}

static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
static int test_for_each_unlocked(void *arg)
{
	enum dma_resv_usage usage = (unsigned long)arg;
	struct dma_resv_iter cursor;
	struct dma_fence *f, *fence;
	struct dma_resv resv;
@@ -212,10 +187,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
		goto err_free;
	}

	if (usage >= DMA_RESV_USAGE_READ)
		dma_resv_add_shared_fence(&resv, f);
	else
		dma_resv_add_excl_fence(&resv, f);
	dma_resv_add_fence(&resv, f, usage);
	dma_resv_unlock(&resv);

	r = -ENOENT;
@@ -235,8 +207,7 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
			r = -EINVAL;
			goto err_iter_end;
		}
		if (dma_resv_iter_is_exclusive(&cursor) !=
		    (usage >= DMA_RESV_USAGE_READ)) {
		if (dma_resv_iter_usage(&cursor) != usage) {
			pr_err("Unexpected fence usage\n");
			r = -EINVAL;
			goto err_iter_end;
@@ -262,18 +233,9 @@ static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
	return r;
}

static int test_excl_for_each_unlocked(void *arg)
{
	return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_for_each_unlocked(void *arg)
{
	return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ);
}

static int test_get_fences(void *arg, enum dma_resv_usage usage)
static int test_get_fences(void *arg)
{
	enum dma_resv_usage usage = (unsigned long)arg;
	struct dma_fence *f, **fences = NULL;
	struct dma_resv resv;
	int r, i;
@@ -296,10 +258,7 @@ static int test_get_fences(void *arg, enum dma_resv_usage usage)
		goto err_resv;
	}

	if (usage >= DMA_RESV_USAGE_READ)
		dma_resv_add_shared_fence(&resv, f);
	else
		dma_resv_add_excl_fence(&resv, f);
	dma_resv_add_fence(&resv, f, usage);
	dma_resv_unlock(&resv);

	r = dma_resv_get_fences(&resv, usage, &i, &fences);
@@ -324,30 +283,24 @@ static int test_get_fences(void *arg, enum dma_resv_usage usage)
	return r;
}

static int test_excl_get_fences(void *arg)
{
	return test_get_fences(arg, DMA_RESV_USAGE_WRITE);
}

static int test_shared_get_fences(void *arg)
{
	return test_get_fences(arg, DMA_RESV_USAGE_READ);
}

int dma_resv(void)
{
	static const struct subtest tests[] = {
		SUBTEST(sanitycheck),
		SUBTEST(test_excl_signaling),
		SUBTEST(test_shared_signaling),
		SUBTEST(test_excl_for_each),
		SUBTEST(test_shared_for_each),
		SUBTEST(test_excl_for_each_unlocked),
		SUBTEST(test_shared_for_each_unlocked),
		SUBTEST(test_excl_get_fences),
		SUBTEST(test_shared_get_fences),
		SUBTEST(test_signaling),
		SUBTEST(test_for_each),
		SUBTEST(test_for_each_unlocked),
		SUBTEST(test_get_fences),
	};
	enum dma_resv_usage usage;
	int r;

	spin_lock_init(&fence_lock);
	return subtests(tests, NULL);
	for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ;
	     ++usage) {
		r = subtests(tests, (void *)(unsigned long)usage);
		if (r)
			return r;
	}
	return 0;
}
+1 −1
Original line number Diff line number Diff line
@@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
	 */
	replacement = dma_fence_get_stub();
	dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
				replacement);
				replacement, DMA_RESV_USAGE_READ);
	dma_fence_put(replacement);
	return 0;
}
+2 −2
Original line number Diff line number Diff line
@@ -55,8 +55,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
	bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
	p->uf_entry.priority = 0;
	p->uf_entry.tv.bo = &bo->tbo;
	/* One for TTM and one for the CS job */
	p->uf_entry.tv.num_shared = 2;
	/* One for TTM and two for the CS job */
	p->uf_entry.tv.num_shared = 3;

	drm_gem_object_put(gobj);

+2 −4
Original line number Diff line number Diff line
@@ -1397,10 +1397,8 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
		return;
	}

	if (shared)
		dma_resv_add_shared_fence(resv, fence);
	else
		dma_resv_add_excl_fence(resv, fence);
	dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
			   DMA_RESV_USAGE_WRITE);
}

/**
Loading