Commit 71df0368 authored by Thomas Zimmermann's avatar Thomas Zimmermann
Browse files

drm/amdgpu: Implement mmap as GEM object function



Moving the driver-specific mmap code into a GEM object function allows
for using DRM helpers for various mmap callbacks.

This change resolves several inconsistencies between regular mmap and
prime-based mmap. The vm_ops field in vma is now set for all mmap'ed
areas. Previously it way only set for regular mmap calls, prime-based
mmap used TTM's default vm_ops. The function amdgpu_verify_access() is
no longer being called and therefore removed by this patch.

As a side effect, amdgpu_ttm_vm_ops and amdgpu_ttm_fault() are now
implemented in amdgpu's GEM code.

v4:
	* rebased
v3:
	* rename mmap function to amdgpu_gem_object_mmap() (Christian)
	* remove unnecessary checks from mmap (Christian)
v2:
	* rename amdgpu_ttm_vm_ops and amdgpu_ttm_fault() to
	  amdgpu_gem_vm_ops and amdgpu_gem_fault() (Christian)
	* the check for kfd_bo has meanwhile been removed

Signed-off-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210525151055.8174-3-tzimmermann@suse.de
parent ccd9fe97
Loading
Loading
Loading
Loading
+0 −46
Original line number Diff line number Diff line
@@ -42,52 +42,6 @@
#include <linux/pci-p2pdma.h>
#include <linux/pm_runtime.h>

/**
 * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
 * @obj: GEM BO
 * @vma: Virtual memory area
 *
 * Sets up a userspace mapping of the BO's memory in the given
 * virtual memory area.
 *
 * Returns:
 * 0 on success or a negative error code on failure.
 */
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
			  struct vm_area_struct *vma)
{
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
	unsigned asize = amdgpu_bo_size(bo);
	int ret;

	if (!vma->vm_file)
		return -ENODEV;

	if (adev == NULL)
		return -ENODEV;

	/* Check for valid size. */
	if (asize < vma->vm_end - vma->vm_start)
		return -EINVAL;

	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
	    (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
		return -EPERM;
	}
	vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;

	/* prime mmap does not need to check access, so allow here */
	ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
	if (ret)
		return ret;

	ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
	drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);

	return ret;
}

static int
__dma_resv_make_exclusive(struct dma_resv *obj)
{
+0 −2
Original line number Diff line number Diff line
@@ -31,8 +31,6 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
					    struct dma_buf *dma_buf);
bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
				      struct amdgpu_bo *bo);
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
			  struct vm_area_struct *vma);

extern const struct dma_buf_ops amdgpu_dmabuf_ops;

+2 −2
Original line number Diff line number Diff line
@@ -1695,7 +1695,7 @@ static const struct file_operations amdgpu_driver_kms_fops = {
	.flush = amdgpu_flush,
	.release = drm_release,
	.unlocked_ioctl = amdgpu_drm_ioctl,
	.mmap = amdgpu_mmap,
	.mmap = drm_gem_mmap,
	.poll = drm_poll,
	.read = drm_read,
#ifdef CONFIG_COMPAT
@@ -1762,7 +1762,7 @@ static const struct drm_driver amdgpu_kms_driver = {
	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
	.gem_prime_import = amdgpu_gem_prime_import,
	.gem_prime_mmap = amdgpu_gem_prime_mmap,
	.gem_prime_mmap = drm_gem_prime_mmap,

	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
+55 −0
Original line number Diff line number Diff line
@@ -32,6 +32,7 @@
#include <linux/dma-buf.h>

#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>

#include "amdgpu.h"
@@ -41,6 +42,46 @@

static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;

static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
{
	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
	struct drm_device *ddev = bo->base.dev;
	vm_fault_t ret;
	int idx;

	ret = ttm_bo_vm_reserve(bo, vmf);
	if (ret)
		return ret;

	if (drm_dev_enter(ddev, &idx)) {
		ret = amdgpu_bo_fault_reserve_notify(bo);
		if (ret) {
			drm_dev_exit(idx);
			goto unlock;
		}

		 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
						TTM_BO_VM_NUM_PREFAULT, 1);

		 drm_dev_exit(idx);
	} else {
		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
	}
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
		return ret;

unlock:
	dma_resv_unlock(bo->base.resv);
	return ret;
}

static const struct vm_operations_struct amdgpu_gem_vm_ops = {
	.fault = amdgpu_gem_fault,
	.open = ttm_bo_vm_open,
	.close = ttm_bo_vm_close,
	.access = ttm_bo_vm_access
};

static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
	struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
@@ -205,6 +246,18 @@ static void amdgpu_gem_object_close(struct drm_gem_object *obj,
	ttm_eu_backoff_reservation(&ticket, &list);
}

static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
	struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);

	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
		return -EPERM;
	if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
		return -EPERM;

	return drm_gem_ttm_mmap(obj, vma);
}

static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
	.free = amdgpu_gem_object_free,
	.open = amdgpu_gem_object_open,
@@ -212,6 +265,8 @@ static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
	.export = amdgpu_gem_prime_export,
	.vmap = drm_gem_ttm_vmap,
	.vunmap = drm_gem_ttm_vunmap,
	.mmap = amdgpu_gem_object_mmap,
	.vm_ops = &amdgpu_gem_vm_ops,
};

/*
+0 −75
Original line number Diff line number Diff line
@@ -47,7 +47,6 @@
#include <drm/ttm/ttm_placement.h>

#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>

#include "amdgpu.h"
#include "amdgpu_object.h"
@@ -166,25 +165,6 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
	*placement = abo->placement;
}

/**
 * amdgpu_verify_access - Verify access for a mmap call
 *
 * @bo:	The buffer object to map
 * @filp: The file pointer from the process performing the mmap
 *
 * This is called by ttm_bo_mmap() to verify whether a process
 * has the right to mmap a BO to their process space.
 */
static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
	struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);

	if (amdgpu_ttm_tt_get_usermm(bo->ttm))
		return -EPERM;
	return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
					  filp->private_data);
}

/**
 * amdgpu_ttm_map_buffer - Map memory into the GART windows
 * @bo: buffer object to map
@@ -1476,7 +1456,6 @@ static struct ttm_device_funcs amdgpu_bo_driver = {
	.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
	.evict_flags = &amdgpu_evict_flags,
	.move = &amdgpu_bo_move,
	.verify_access = &amdgpu_verify_access,
	.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
	.release_notify = &amdgpu_bo_release_notify,
	.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
@@ -1844,60 +1823,6 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
	adev->mman.buffer_funcs_enabled = enable;
}

static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
{
	struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
	struct drm_device *ddev = bo->base.dev;
	vm_fault_t ret;
	int idx;

	ret = ttm_bo_vm_reserve(bo, vmf);
	if (ret)
		return ret;

	if (drm_dev_enter(ddev, &idx)) {
		ret = amdgpu_bo_fault_reserve_notify(bo);
		if (ret) {
			drm_dev_exit(idx);
			goto unlock;
		}

		 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
						TTM_BO_VM_NUM_PREFAULT, 1);

		 drm_dev_exit(idx);
	} else {
		ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
	}
	if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
		return ret;

unlock:
	dma_resv_unlock(bo->base.resv);
	return ret;
}

static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
	.fault = amdgpu_ttm_fault,
	.open = ttm_bo_vm_open,
	.close = ttm_bo_vm_close,
	.access = ttm_bo_vm_access
};

int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *file_priv = filp->private_data;
	struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
	int r;

	r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
	if (unlikely(r != 0))
		return r;

	vma->vm_ops = &amdgpu_ttm_vm_ops;
	return 0;
}

int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
		       uint64_t dst_offset, uint32_t byte_count,
		       struct dma_resv *resv,
Loading