Commit a6ae74c9 authored by Rob Clark's avatar Rob Clark
Browse files

drm/msm/gem: Add obj->lock wrappers



This will make it easier to transition over to obj->resv locking for
everything that is per-bo locking.

Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Reviewed-by: default avatarKristian H. Kristensen <hoegsberg@google.com>
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
parent 96c876f1
Loading
Loading
Loading
Loading
+46 −53
Original line number Diff line number Diff line
@@ -177,15 +177,15 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct page **p;

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		msm_gem_unlock(obj);
		return ERR_PTR(-EBUSY);
	}

	p = get_pages(obj);
	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
	return p;
}

@@ -251,14 +251,14 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf)
	 * vm_ops.open/drm_gem_mmap_obj and close get and put
	 * a reference on obj. So, we dont need to hold one here.
	 */
	err = mutex_lock_interruptible(&msm_obj->lock);
	err = msm_gem_lock_interruptible(obj);
	if (err) {
		ret = VM_FAULT_NOPAGE;
		goto out;
	}

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
		mutex_unlock(&msm_obj->lock);
		msm_gem_unlock(obj);
		return VM_FAULT_SIGBUS;
	}

@@ -279,7 +279,7 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf)

	ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
out:
	return ret;
}
@@ -288,10 +288,9 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf)
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int ret;

	WARN_ON(!mutex_is_locked(&msm_obj->lock));
	WARN_ON(!msm_gem_is_locked(obj));

	/* Make it mmapable */
	ret = drm_gem_create_mmap_offset(obj);
@@ -307,11 +306,10 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
	uint64_t offset;
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);
	offset = mmap_offset(obj);
	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
	return offset;
}

@@ -321,7 +319,7 @@ static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

	WARN_ON(!mutex_is_locked(&msm_obj->lock));
	WARN_ON(!msm_gem_is_locked(obj));

	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (!vma)
@@ -340,7 +338,7 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

	WARN_ON(!mutex_is_locked(&msm_obj->lock));
	WARN_ON(!msm_gem_is_locked(obj));

	list_for_each_entry(vma, &msm_obj->vmas, list) {
		if (vma->aspace == aspace)
@@ -359,14 +357,14 @@ static void del_vma(struct msm_gem_vma *vma)
	kfree(vma);
}

/* Called with msm_obj->lock locked */
/* Called with msm_obj locked */
static void
put_iova(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma, *tmp;

	WARN_ON(!mutex_is_locked(&msm_obj->lock));
	WARN_ON(!msm_gem_is_locked(obj));

	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
		if (vma->aspace) {
@@ -381,11 +379,10 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;
	int ret = 0;

	WARN_ON(!mutex_is_locked(&msm_obj->lock));
	WARN_ON(!msm_gem_is_locked(obj));

	vma = lookup_vma(obj, aspace);

@@ -420,7 +417,7 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
	if (msm_obj->flags & MSM_BO_MAP_PRIV)
		prot |= IOMMU_PRIV;

	WARN_ON(!mutex_is_locked(&msm_obj->lock));
	WARN_ON(!msm_gem_is_locked(obj));

	if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
		return -EBUSY;
@@ -445,11 +442,10 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova,
		u64 range_start, u64 range_end)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	u64 local;
	int ret;

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);

	ret = msm_gem_get_iova_locked(obj, aspace, &local,
		range_start, range_end);
@@ -460,7 +456,7 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
	if (!ret)
		*iova = local;

	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
	return ret;
}

@@ -478,12 +474,11 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
int msm_gem_get_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace, uint64_t *iova)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int ret;

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);
	ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);

	return ret;
}
@@ -494,12 +489,11 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
uint64_t msm_gem_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);
	vma = lookup_vma(obj, aspace);
	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
	WARN_ON(!vma);

	return vma ? vma->iova : 0;
@@ -513,16 +507,15 @@ uint64_t msm_gem_iova(struct drm_gem_object *obj,
void msm_gem_unpin_iova(struct drm_gem_object *obj,
		struct msm_gem_address_space *aspace)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	struct msm_gem_vma *vma;

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);
	vma = lookup_vma(obj, aspace);

	if (!WARN_ON(!vma))
		msm_gem_unmap_vma(aspace, vma);

	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
}

int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
@@ -563,20 +556,20 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
	if (obj->import_attach)
		return ERR_PTR(-ENODEV);

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);

	if (WARN_ON(msm_obj->madv > madv)) {
		DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
			msm_obj->madv, madv);
		mutex_unlock(&msm_obj->lock);
		msm_gem_unlock(obj);
		return ERR_PTR(-EBUSY);
	}

	/* increment vmap_count *before* vmap() call, so shrinker can
	 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
	 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
	 * This guarantees that we won't try to msm_gem_vunmap() this
	 * same object from within the vmap() call (while we already
	 * hold msm_obj->lock)
	 * hold msm_obj lock)
	 */
	msm_obj->vmap_count++;

@@ -594,12 +587,12 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
		}
	}

	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
	return msm_obj->vaddr;

fail:
	msm_obj->vmap_count--;
	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
	return ERR_PTR(ret);
}

@@ -623,10 +616,10 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);
	WARN_ON(msm_obj->vmap_count < 1);
	msm_obj->vmap_count--;
	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
}

/* Update madvise status, returns true if not purged, else
@@ -636,7 +629,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);

	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));

@@ -645,7 +638,7 @@ int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)

	madv = msm_obj->madv;

	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);

	return (madv != __MSM_MADV_PURGED);
}
@@ -682,14 +675,14 @@ void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
	invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
			0, (loff_t)-1);

	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
}

static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	WARN_ON(!mutex_is_locked(&msm_obj->lock));
	WARN_ON(!msm_gem_is_locked(obj));

	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
		return;
@@ -704,7 +697,7 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)

	mutex_lock_nested(&msm_obj->lock, subclass);
	msm_gem_vunmap_locked(obj);
	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
}

/* must be called before _move_to_active().. */
@@ -815,7 +808,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
	uint64_t off = drm_vma_node_start(&obj->vma_node);
	const char *madv;

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);

	switch (msm_obj->madv) {
	case __MSM_MADV_PURGED:
@@ -883,7 +876,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
		describe_fence(fence, "Exclusive", m);
	rcu_read_unlock();

	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
}

void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -928,7 +921,7 @@ static void free_object(struct msm_gem_object *msm_obj)

	list_del(&msm_obj->mm_list);

	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);

	put_iova(obj);

@@ -949,7 +942,7 @@ static void free_object(struct msm_gem_object *msm_obj)

	drm_gem_object_release(obj);

	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);
	kfree(msm_obj);
}

@@ -1069,10 +1062,10 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
		struct msm_gem_vma *vma;
		struct page **pages;

		mutex_lock(&msm_obj->lock);
		msm_gem_lock(obj);

		vma = add_vma(obj, NULL);
		mutex_unlock(&msm_obj->lock);
		msm_gem_unlock(obj);
		if (IS_ERR(vma)) {
			ret = PTR_ERR(vma);
			goto fail;
@@ -1156,22 +1149,22 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
	mutex_lock(&msm_obj->lock);
	msm_gem_lock(obj);
	msm_obj->sgt = sgt;
	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
	if (!msm_obj->pages) {
		mutex_unlock(&msm_obj->lock);
		msm_gem_unlock(obj);
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
	if (ret) {
		mutex_unlock(&msm_obj->lock);
		msm_gem_unlock(obj);
		goto fail;
	}

	mutex_unlock(&msm_obj->lock);
	msm_gem_unlock(obj);

	mutex_lock(&dev->struct_mutex);
	list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+28 −0
Original line number Diff line number Diff line
@@ -93,6 +93,34 @@ struct msm_gem_object {
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)

static inline void
msm_gem_lock(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	mutex_lock(&msm_obj->lock);
}

static inline int
msm_gem_lock_interruptible(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return mutex_lock_interruptible(&msm_obj->lock);
}

static inline void
msm_gem_unlock(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	mutex_unlock(&msm_obj->lock);
}

static inline bool
msm_gem_is_locked(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	return mutex_is_locked(&msm_obj->lock);
}

static inline bool is_active(struct msm_gem_object *msm_obj)
{
	return atomic_read(&msm_obj->active_count);