Commit d3fae3b3 authored by Christian König's avatar Christian König
Browse files

dma-buf: drop the _rcu postfix on function names v3



The functions can be called both in _rcu context as well
as while holding the lock.

v2: add some kerneldoc as suggested by Daniel
v3: fix indentation

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarJason Ekstrand <jason@jlekstrand.net>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-7-christian.koenig@amd.com
parent 6b41323a
Loading
Loading
Loading
Loading
+1 −2
Original line number Diff line number Diff line
@@ -1147,8 +1147,7 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
	long ret;

	/* Wait on any implicit rendering fences */
	ret = dma_resv_wait_timeout_rcu(resv, write, true,
						  MAX_SCHEDULE_TIMEOUT);
	ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
	if (ret < 0)
		return ret;

+17 −15
Original line number Diff line number Diff line
@@ -396,7 +396,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
EXPORT_SYMBOL(dma_resv_copy_fences);

/**
 * dma_resv_get_fences_rcu - Get an object's shared and exclusive
 * dma_resv_get_fences - Get an object's shared and exclusive
 * fences without update side lock held
 * @obj: the reservation object
 * @pfence_excl: the returned exclusive fence (or NULL)
@@ -408,8 +408,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
 * exclusive fence is not specified the fence is put into the array of the
 * shared fences as well. Returns either zero or -ENOMEM.
 */
int dma_resv_get_fences_rcu(struct dma_resv *obj,
			    struct dma_fence **pfence_excl,
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
			unsigned int *pshared_count,
			struct dma_fence ***pshared)
{
@@ -494,22 +493,23 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
	*pshared = shared;
	return ret;
}
EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
EXPORT_SYMBOL_GPL(dma_resv_get_fences);

/**
 * dma_resv_wait_timeout_rcu - Wait on reservation's objects
 * dma_resv_wait_timeout - Wait on reservation's objects
 * shared and/or exclusive fences.
 * @obj: the reservation object
 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
 * @intr: if true, do interruptible wait
 * @timeout: timeout value in jiffies or zero to return immediately
 *
 * Callers are not required to hold specific locks, but maybe hold
 * dma_resv_lock() already
 * RETURNS
 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
 * greater than zer on success.
 */
long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
			       bool wait_all, bool intr,
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
			   unsigned long timeout)
{
	long ret = timeout ? timeout : 1;
@@ -582,7 +582,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
	rcu_read_unlock();
	goto retry;
}
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);


static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
@@ -602,16 +602,18 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
}

/**
 * dma_resv_test_signaled_rcu - Test if a reservation object's
 * fences have been signaled.
 * dma_resv_test_signaled - Test if a reservation object's fences have been
 * signaled.
 * @obj: the reservation object
 * @test_all: if true, test all fences, otherwise only test the exclusive
 * fence
 *
 * Callers are not required to hold specific locks, but maybe hold
 * dma_resv_lock() already
 * RETURNS
 * true if all fences signaled, else false
 */
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
{
	unsigned int seq, shared_count;
	int ret;
@@ -660,7 +662,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
	rcu_read_unlock();
	return ret;
}
EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);

#if IS_ENABLED(CONFIG_LOCKDEP)
static int __init dma_resv_lockdep(void)
+2 −3
Original line number Diff line number Diff line
@@ -203,9 +203,8 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
		goto unpin;
	}

	r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
					      &work->shared_count,
					      &work->shared);
	r = dma_resv_get_fences(new_abo->tbo.base.resv, &work->excl,
				&work->shared_count, &work->shared);
	if (unlikely(r != 0)) {
		DRM_ERROR("failed to get fences for buffer\n");
		goto unpin;
+1 −1
Original line number Diff line number Diff line
@@ -52,7 +52,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
	if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
		return 0;

	r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
	r = dma_resv_get_fences(obj, NULL, &count, &fences);
	if (r)
		return r;

+1 −2
Original line number Diff line number Diff line
@@ -526,8 +526,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
		return -ENOENT;
	}
	robj = gem_to_amdgpu_bo(gobj);
	ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
						  timeout);
	ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);

	/* ret == 0 means not signaled,
	 * ret > 0 means signaled
Loading