Commit 0cc5fb4e authored by Daniel Vetter's avatar Daniel Vetter
Browse files

drm/shmem-helpers: Don't call get/put_pages on imported dma-buf in vmap



There's no direct harm, because for the shmem helpers these are noops
on imported buffers. The trouble is in the locks these take - I want
to change dma_buf_vmap locking, and so need to make sure that we only
ever take certain locks on one side of the dma-buf interface: Either
for exporters, or for importers.

v2: Change the control flow less compared to what's there (Thomas)

Tested-by: default avatarBoris Brezillon <boris.brezillon@collabora.com>
Cc: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Noralf Trønnes <noralf@tronnes.org>
Acked-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200514202256.490926-1-daniel.vetter@ffwll.ch
parent 3a5a5971
Loading
Loading
Loading
Loading
+6 −5
Original line number Diff line number Diff line
@@ -252,15 +252,15 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
	if (shmem->vmap_use_count++ > 0)
		return shmem->vaddr;

	ret = drm_gem_shmem_get_pages(shmem);
	if (ret)
		goto err_zero_use;

	if (obj->import_attach) {
		shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
	} else {
		pgprot_t prot = PAGE_KERNEL;

		ret = drm_gem_shmem_get_pages(shmem);
		if (ret)
			goto err_zero_use;

		if (!shmem->map_cached)
			prot = pgprot_writecombine(prot);
		shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
@@ -276,6 +276,7 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
	return shmem->vaddr;

err_put_pages:
	if (!obj->import_attach)
		drm_gem_shmem_put_pages(shmem);
err_zero_use:
	shmem->vmap_use_count = 0;