Commit 668b2066 authored by Zack Rusin's avatar Zack Rusin
Browse files

drm/vmwgfx: Stop using raw ttm_buffer_object's



Various bits of the driver used raw ttm_buffer_object instead of the
driver specific vmw_bo object. All those places used to duplicate
the mapped bo caching policy of vmw_bo.

Instead of duplicating all of that code and special casing various
functions to work both with vmw_bo and raw ttm_buffer_object's unify
the buffer object handling code.

As part of that work fix the naming of bo's, e.g. insted of generic
backup use 'guest_memory' because that's what it really is.

All of it makes the driver easier to maintain and the code easier to
read. Saves 100+ loc as well.

Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Reviewed-by: default avatarMartin Krastev <krastevm@vmware.com>
Reviewed-by: default avatarMaaz Mombasawala <mombasawalam@vmware.com>
Acked-by: default avatarThomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20230131033542.953249-9-zack@kde.org
parent 39985eea
Loading
Loading
Loading
Loading
+57 −141
Original line number Diff line number Diff line
@@ -32,6 +32,12 @@

#include <drm/ttm/ttm_placement.h>

static void vmw_bo_release(struct vmw_bo *vbo)
{
	vmw_bo_unmap(vbo);
	drm_gem_object_release(&vbo->tbo.base);
}

/**
 * vmw_bo_free - vmw_bo destructor
 *
@@ -43,26 +49,10 @@ static void vmw_bo_free(struct ttm_buffer_object *bo)

	WARN_ON(vbo->dirty);
	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
	vmw_bo_unmap(vbo);
	drm_gem_object_release(&bo->base);
	vmw_bo_release(vbo);
	kfree(vbo);
}

/**
 * bo_is_vmw - check if the buffer object is a &vmw_bo
 * @bo: ttm buffer object to be checked
 *
 * Uses destroy function associated with the object to determine if this is
 * a &vmw_bo.
 *
 * Returns:
 * true if the object is of &vmw_bo type, false if not.
 */
static bool bo_is_vmw(struct ttm_buffer_object *bo)
{
	return bo->destroy == &vmw_bo_free;
}

/**
 * vmw_bo_pin_in_placement - Validate a buffer to placement.
 *
@@ -79,7 +69,7 @@ static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
				   bool interruptible)
{
	struct ttm_operation_ctx ctx = {interruptible, false };
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_buffer_object *bo = &buf->tbo;
	int ret;

	vmw_execbuf_release_pinned_bo(dev_priv);
@@ -115,7 +105,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
			      bool interruptible)
{
	struct ttm_operation_ctx ctx = {interruptible, false };
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_buffer_object *bo = &buf->tbo;
	int ret;

	vmw_execbuf_release_pinned_bo(dev_priv);
@@ -184,7 +174,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
				bool interruptible)
{
	struct ttm_operation_ctx ctx = {interruptible, false };
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_buffer_object *bo = &buf->tbo;
	int ret = 0;

	vmw_execbuf_release_pinned_bo(dev_priv);
@@ -200,7 +190,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
	if (bo->resource->mem_type == TTM_PL_VRAM &&
	    bo->resource->start < PFN_UP(bo->resource->size) &&
	    bo->resource->start > 0 &&
	    buf->base.pin_count == 0) {
	    buf->tbo.pin_count == 0) {
		ctx.interruptible = false;
		vmw_bo_placement_set(buf,
				     VMW_BO_DOMAIN_SYS,
@@ -241,7 +231,7 @@ int vmw_bo_unpin(struct vmw_private *dev_priv,
		 struct vmw_bo *buf,
		 bool interruptible)
{
	struct ttm_buffer_object *bo = &buf->base;
	struct ttm_buffer_object *bo = &buf->tbo;
	int ret;

	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
@@ -288,7 +278,7 @@ void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
	struct ttm_operation_ctx ctx = { false, true };
	struct ttm_place pl;
	struct ttm_placement placement;
	struct ttm_buffer_object *bo = &vbo->base;
	struct ttm_buffer_object *bo = &vbo->tbo;
	uint32_t old_mem_type = bo->resource->mem_type;
	int ret;

@@ -333,7 +323,7 @@ void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
 */
void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
	struct ttm_buffer_object *bo = &vbo->base;
	struct ttm_buffer_object *bo = &vbo->tbo;
	bool not_used;
	void *virtual;
	int ret;
@@ -364,64 +354,58 @@ void vmw_bo_unmap(struct vmw_bo *vbo)
		return;

	ttm_bo_kunmap(&vbo->map);
	vbo->map.bo = NULL;
}

/* default destructor */
static void vmw_bo_default_destroy(struct ttm_buffer_object *bo)
{
	kfree(bo);
}

/**
 * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
 * vmw_bo_init - Initialize a vmw buffer object
 *
 * @dev_priv: Pointer to the device private struct
 * @size: size of the BO we need
 * @placement: where to put it
 * @p_bo: resulting BO
 * @vmw_bo: Buffer object to initialize
 * @params: Parameters used to initialize the buffer object
 * @destroy: The function used to delete the buffer object
 * Returns: Zero on success, negative error code on error.
 *
 * Creates and pin a simple BO for in kernel use.
 */
int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
			 struct ttm_placement *placement,
			 struct ttm_buffer_object **p_bo)
static int vmw_bo_init(struct vmw_private *dev_priv,
		       struct vmw_bo *vmw_bo,
		       struct vmw_bo_params *params,
		       void (*destroy)(struct ttm_buffer_object *))
{
	struct ttm_operation_ctx ctx = {
		.interruptible = false,
		.interruptible = params->bo_type != ttm_bo_type_kernel,
		.no_wait_gpu = false
	};
	struct ttm_buffer_object *bo;
	struct ttm_device *bdev = &dev_priv->bdev;
	struct drm_device *vdev = &dev_priv->drm;
	int ret;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (unlikely(!bo))
		return -ENOMEM;
	memset(vmw_bo, 0, sizeof(*vmw_bo));

	size = ALIGN(size, PAGE_SIZE);
	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
	vmw_bo->tbo.priority = 3;
	vmw_bo->res_tree = RB_ROOT;

	drm_gem_private_object_init(vdev, &bo->base, size);
	params->size = ALIGN(params->size, PAGE_SIZE);
	drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);

	ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel,
				   placement, 0, &ctx, NULL, NULL,
				   vmw_bo_default_destroy);
	vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
	ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
				   &vmw_bo->placement, 0, &ctx, NULL,
				   NULL, destroy);
	if (unlikely(ret))
		goto error_free;
		return ret;

	ttm_bo_pin(bo);
	ttm_bo_unreserve(bo);
	*p_bo = bo;
	if (params->pin)
		ttm_bo_pin(&vmw_bo->tbo);
	ttm_bo_unreserve(&vmw_bo->tbo);

	return 0;

error_free:
	kfree(bo);
	return ret;
}

int vmw_bo_create(struct vmw_private *vmw,
		  size_t size, u32 domain, u32 busy_domain,
		  bool interruptible, bool pin,
		  struct vmw_bo_params *params,
		  struct vmw_bo **p_bo)
{
	int ret;
@@ -432,9 +416,7 @@ int vmw_bo_create(struct vmw_private *vmw,
		return -ENOMEM;
	}

	ret = vmw_bo_init(vmw, *p_bo, size,
			  domain, busy_domain,
			  interruptible, pin);
	ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
	if (unlikely(ret != 0))
		goto out_error;

@@ -445,57 +427,6 @@ int vmw_bo_create(struct vmw_private *vmw,
	return ret;
}

/**
 * vmw_bo_init - Initialize a vmw buffer object
 *
 * @dev_priv: Pointer to the device private struct
 * @vmw_bo: Pointer to the struct vmw_bo to initialize.
 * @size: Buffer object size in bytes.
 * @domain: Domain to put the bo in.
 * @busy_domain: Domain to put the bo if busy.
 * @interruptible: Whether waits should be performed interruptible.
 * @pin: If the BO should be created pinned at a fixed location.
 * Returns: Zero on success, negative error code on error.
 *
 * Note that on error, the code will free the buffer object.
 */
int vmw_bo_init(struct vmw_private *dev_priv,
		struct vmw_bo *vmw_bo,
		size_t size,
		u32 domain,
		u32 busy_domain,
		bool interruptible, bool pin)
{
	struct ttm_operation_ctx ctx = {
		.interruptible = interruptible,
		.no_wait_gpu = false
	};
	struct ttm_device *bdev = &dev_priv->bdev;
	struct drm_device *vdev = &dev_priv->drm;
	int ret;

	memset(vmw_bo, 0, sizeof(*vmw_bo));
	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
	vmw_bo->base.priority = 3;
	vmw_bo->res_tree = RB_ROOT;

	size = ALIGN(size, PAGE_SIZE);
	drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);

	vmw_bo_placement_set(vmw_bo, domain, busy_domain);
	ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device,
				   &vmw_bo->placement, 0, &ctx, NULL, NULL, vmw_bo_free);
	if (unlikely(ret)) {
		return ret;
	}

	if (pin)
		ttm_bo_pin(&vmw_bo->base);
	ttm_bo_unreserve(&vmw_bo->base);

	return 0;
}

/**
 * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
 * access, idling previous GPU operations on the buffer and optionally
@@ -514,7 +445,7 @@ static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
				    uint32_t flags)
{
	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
	struct ttm_buffer_object *bo = &vmw_bo->base;
	struct ttm_buffer_object *bo = &vmw_bo->tbo;
	int ret;

	if (flags & drm_vmw_synccpu_allow_cs) {
@@ -564,7 +495,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
		if (!(flags & drm_vmw_synccpu_allow_cs)) {
			atomic_dec(&vmw_bo->cpu_writers);
		}
		ttm_bo_put(&vmw_bo->base);
		ttm_bo_put(&vmw_bo->tbo);
	}

	return ret;
@@ -650,8 +581,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
	struct drm_vmw_unref_dmabuf_arg *arg =
	    (struct drm_vmw_unref_dmabuf_arg *)data;

	drm_gem_handle_delete(file_priv, arg->handle);
	return 0;
	return drm_gem_handle_delete(file_priv, arg->handle);
}


@@ -667,7 +597,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
 * The vmw buffer object pointer will be refcounted.
 */
int vmw_user_bo_lookup(struct drm_file *filp,
		       uint32_t handle,
		       u32 handle,
		       struct vmw_bo **out)
{
	struct drm_gem_object *gobj;
@@ -680,7 +610,7 @@ int vmw_user_bo_lookup(struct drm_file *filp,
	}

	*out = to_vmw_bo(gobj);
	ttm_bo_get(&(*out)->base);
	ttm_bo_get(&(*out)->tbo);
	drm_gem_object_put(gobj);

	return 0;
@@ -702,8 +632,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
			 struct vmw_fence_obj *fence)
{
	struct ttm_device *bdev = bo->bdev;
	struct vmw_private *dev_priv =
		container_of(bdev, struct vmw_private, bdev);
	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
	int ret;

	if (fence == NULL)
@@ -773,10 +702,6 @@ int vmw_dumb_create(struct drm_file *file_priv,
 */
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
	/* Is @bo embedded in a struct vmw_bo? */
	if (!bo_is_vmw(bo))
		return;

	/* Kill any cached kernel maps before swapout */
	vmw_bo_unmap(to_vmw_bo(&bo->base));
}
@@ -795,13 +720,7 @@ void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
			struct ttm_resource *mem)
{
	struct vmw_bo *vbo;

	/* Make sure @bo is embedded in a struct vmw_bo? */
	if (!bo_is_vmw(bo))
		return;

	vbo = container_of(bo, struct vmw_bo, base);
	struct vmw_bo *vbo = to_vmw_bo(&bo->base);

	/*
	 * Kill any cached kernel maps before move to or from VRAM.
@@ -849,7 +768,6 @@ set_placement_list(struct ttm_place *pl, u32 domain)
		pl[n].lpfn = 0;
		n++;
	}
	WARN_ON((domain & VMW_BO_DOMAIN_WAITABLE_SYS) != 0);
	if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
		pl[n].mem_type = VMW_PL_SYSTEM;
		pl[n].flags = 0;
@@ -878,9 +796,8 @@ set_placement_list(struct ttm_place *pl, u32 domain)

void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
{
	struct ttm_device *bdev = bo->base.bdev;
	struct vmw_private *vmw =
		container_of(bdev, struct vmw_private, bdev);
	struct ttm_device *bdev = bo->tbo.bdev;
	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
	struct ttm_placement *pl = &bo->placement;
	bool mem_compatible = false;
	u32 i;
@@ -888,17 +805,17 @@ void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
	pl->placement = bo->places;
	pl->num_placement = set_placement_list(bo->places, domain);

	if (drm_debug_enabled(DRM_UT_DRIVER) && bo->base.resource) {
	if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
		for (i = 0; i < pl->num_placement; ++i) {
			if (bo->base.resource->mem_type == TTM_PL_SYSTEM ||
			    bo->base.resource->mem_type == pl->placement[i].mem_type)
			if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
			    bo->tbo.resource->mem_type == pl->placement[i].mem_type)
				mem_compatible = true;
		}
		if (!mem_compatible)
			drm_warn(&vmw->drm,
				 "%s: Incompatible transition from "
				 "bo->base.resource->mem_type = %u to domain = %u\n",
				 __func__, bo->base.resource->mem_type, domain);
				 __func__, bo->tbo.resource->mem_type, domain);
	}

	pl->busy_placement = bo->busy_places;
@@ -907,9 +824,8 @@ void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)

void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
{
	struct ttm_device *bdev = bo->base.bdev;
	struct vmw_private *vmw =
		container_of(bdev, struct vmw_private, bdev);
	struct ttm_device *bdev = bo->tbo.bdev;
	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
	u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;

	if (vmw->has_mob)
+30 −30
Original line number Diff line number Diff line
@@ -49,54 +49,54 @@ enum vmw_bo_domain {
	VMW_BO_DOMAIN_MOB           = BIT(4),
};

struct vmw_bo_params {
	u32 domain;
	u32 busy_domain;
	enum ttm_bo_type bo_type;
	size_t size;
	bool pin;
};

/**
 * struct vmw_bo - TTM buffer object with vmwgfx additions
 * @base: The TTM buffer object
 * @tbo: The TTM buffer object
 * @placement: The preferred placement for this buffer object
 * @places: The chosen places for the preferred placement.
 * @busy_places: Chosen busy places for the preferred placement
 * @map: Kmap object for semi-persistent mappings
 * @res_tree: RB tree of resources using this buffer object as a backing MOB
 * @res_prios: Eviction priority counts for attached resources
 * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
 * increased. May be decreased without reservation.
 * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
 * @map: Kmap object for semi-persistent mappings
 * @res_prios: Eviction priority counts for attached resources
 * @dirty: structure for user-space dirty-tracking
 */
struct vmw_bo {
	struct ttm_buffer_object base;
	struct ttm_buffer_object tbo;

	struct ttm_placement placement;
	struct ttm_place places[5];
	struct ttm_place busy_places[5];

	/* Protected by reservation */
	struct ttm_bo_kmap_obj map;

	struct rb_root res_tree;
	u32 res_prios[TTM_MAX_BO_PRIORITY];

	atomic_t cpu_writers;
	/* Not ref-counted.  Protected by binding_mutex */
	struct vmw_resource *dx_query_ctx;
	/* Protected by reservation */
	struct ttm_bo_kmap_obj map;
	u32 res_prios[TTM_MAX_BO_PRIORITY];
	struct vmw_bo_dirty *dirty;
};

void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);

int vmw_bo_create_kernel(struct vmw_private *dev_priv,
			 unsigned long size,
			 struct ttm_placement *placement,
			 struct ttm_buffer_object **p_bo);
int vmw_bo_create(struct vmw_private *dev_priv,
		  size_t size,
		  u32 domain,
		  u32 busy_domain,
		  bool interruptible, bool pin,
		  struct vmw_bo_params *params,
		  struct vmw_bo **p_bo);
int vmw_bo_init(struct vmw_private *dev_priv,
		struct vmw_bo *vmw_bo,
		size_t size,
		u32 domain,
		u32 busy_domain,
		bool interruptible, bool pin);

int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv);

@@ -118,9 +118,6 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
			  SVGAGuestPtr *ptr);
int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *file_priv);
int vmw_user_bo_lookup(struct drm_file *filp,
		       uint32_t handle,
		       struct vmw_bo **out);
void vmw_bo_fence_single(struct ttm_buffer_object *bo,
			 struct vmw_fence_obj *fence);

@@ -131,6 +128,9 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
			struct ttm_resource *mem);
void vmw_bo_swap_notify(struct ttm_buffer_object *bo);

int vmw_user_bo_lookup(struct drm_file *filp,
		       u32 handle,
		       struct vmw_bo **out);
/**
 * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
 * according to attached resources
@@ -142,12 +142,12 @@ static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo)

	while (i--) {
		if (vbo->res_prios[i]) {
			vbo->base.priority = i;
			vbo->tbo.priority = i;
			return;
		}
	}

	vbo->base.priority = 3;
	vbo->tbo.priority = 3;
}

/**
@@ -166,7 +166,7 @@ static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio)
}

/**
 * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
 * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain
 * priority being removed
 * @vbo: The struct vmw_bo
 * @prio: The resource priority
@@ -186,18 +186,18 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf)

	*buf = NULL;
	if (tmp_buf)
		ttm_bo_put(&tmp_buf->base);
		ttm_bo_put(&tmp_buf->tbo);
}

static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
{
	ttm_bo_get(&buf->base);
	ttm_bo_get(&buf->tbo);
	return buf;
}

static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
{
	return container_of((gobj), struct vmw_bo, base.base);
	return container_of((gobj), struct vmw_bo, tbo.base);
}

#endif // VMWGFX_BO_H
+2 −2
Original line number Diff line number Diff line
@@ -567,7 +567,7 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
	 * without writing to the query result structure.
	 */

	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
	struct {
		SVGA3dCmdHeader header;
		SVGA3dCmdWaitForQuery body;
@@ -613,7 +613,7 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
	 * without writing to the query result structure.
	 */

	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
	struct {
		SVGA3dCmdHeader header;
		SVGA3dCmdWaitForGBQuery body;
+15 −29
Original line number Diff line number Diff line
@@ -80,7 +80,6 @@ struct vmw_cmdbuf_context {
 * frees are protected by @lock.
 * @cmd_space: Buffer object for the command buffer space, unless we were
 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
 * @map_obj: Mapping state for @cmd_space. Immutable.
 * @map: Pointer to command buffer space. May be a mapped buffer object or
 * a contigous coherent DMA memory allocation. Immutable.
 * @cur: Command buffer for small kernel command submissions. Protected by
@@ -117,8 +116,7 @@ struct vmw_cmdbuf_man {
	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
	struct list_head error;
	struct drm_mm mm;
	struct ttm_buffer_object *cmd_space;
	struct ttm_bo_kmap_obj map_obj;
	struct vmw_bo *cmd_space;
	u8 *map;
	struct vmw_cmdbuf_header *cur;
	size_t cur_pos;
@@ -889,7 +887,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
	header->cmd = man->map + offset;
	if (man->using_mob) {
		cb_hdr->flags = SVGA_CB_FLAG_MOB;
		cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
		cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
		cb_hdr->ptr.mob.mobOffset = offset;
	} else {
		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
@@ -1222,7 +1220,6 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{
	struct vmw_private *dev_priv = man->dev_priv;
	bool dummy;
	int ret;

	if (man->has_pool)
@@ -1235,6 +1232,13 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
	if (man->map) {
		man->using_mob = false;
	} else {
		struct vmw_bo_params bo_params = {
			.domain = VMW_BO_DOMAIN_MOB,
			.busy_domain = VMW_BO_DOMAIN_MOB,
			.bo_type = ttm_bo_type_kernel,
			.size = size,
			.pin = true
		};
		/*
		 * DMA memory failed. If we can have command buffers in a
		 * MOB, try to use that instead. Note that this will
@@ -1245,19 +1249,12 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
		    !dev_priv->has_mob)
			return -ENOMEM;

		ret = vmw_bo_create_kernel(dev_priv, size,
					   &vmw_mob_placement,
					   &man->cmd_space);
		ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
		if (ret)
			return ret;

		man->using_mob = true;
		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
				  &man->map_obj);
		if (ret)
			goto out_no_map;

		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
		man->map = vmw_bo_map_and_cache(man->cmd_space);
		man->using_mob = man->map;
	}

	man->size = size;
@@ -1277,14 +1274,6 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
		 (man->using_mob) ? "MOB" : "DMA");

	return 0;

out_no_map:
	if (man->using_mob) {
		ttm_bo_put(man->cmd_space);
		man->cmd_space = NULL;
	}

	return ret;
}

/**
@@ -1383,15 +1372,12 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
	man->has_pool = false;
	man->default_size = VMW_CMDBUF_INLINE_SIZE;
	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
	if (man->using_mob) {
		(void) ttm_bo_kunmap(&man->map_obj);
		ttm_bo_put(man->cmd_space);
		man->cmd_space = NULL;
	} else {
	if (man->using_mob)
		vmw_bo_unreference(&man->cmd_space);
	else
		dma_free_coherent(man->dev_priv->drm.dev,
				  man->size, man->map, man->handle);
}
}

/**
 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
+8 −8
Original line number Diff line number Diff line
@@ -73,7 +73,7 @@ const struct vmw_user_resource_conv *user_context_converter =

static const struct vmw_res_func vmw_legacy_context_func = {
	.res_type = vmw_res_context,
	.needs_backup = false,
	.needs_guest_memory = false,
	.may_evict = false,
	.type_name = "legacy contexts",
	.domain = VMW_BO_DOMAIN_SYS,
@@ -86,7 +86,7 @@ static const struct vmw_res_func vmw_legacy_context_func = {

static const struct vmw_res_func vmw_gb_context_func = {
	.res_type = vmw_res_context,
	.needs_backup = true,
	.needs_guest_memory = true,
	.may_evict = true,
	.prio = 3,
	.dirty_prio = 3,
@@ -101,7 +101,7 @@ static const struct vmw_res_func vmw_gb_context_func = {

static const struct vmw_res_func vmw_dx_context_func = {
	.res_type = vmw_res_dx_context,
	.needs_backup = true,
	.needs_guest_memory = true,
	.may_evict = true,
	.prio = 3,
	.dirty_prio = 3,
@@ -186,7 +186,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
	struct vmw_user_context *uctx =
		container_of(res, struct vmw_user_context, res);

	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
	res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
				 sizeof(SVGAGBContextData));
	ret = vmw_resource_init(dev_priv, res, true,
				res_free,
@@ -358,8 +358,8 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
	cmd->header.size = sizeof(cmd->body);
	cmd->body.cid = res->id;
	cmd->body.mobid = bo->resource->start;
	cmd->body.validContents = res->backup_dirty;
	res->backup_dirty = false;
	cmd->body.validContents = res->guest_memory_dirty;
	res->guest_memory_dirty = false;
	vmw_cmd_commit(dev_priv, sizeof(*cmd));

	return 0;
@@ -525,8 +525,8 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
	cmd->header.size = sizeof(cmd->body);
	cmd->body.cid = res->id;
	cmd->body.mobid = bo->resource->start;
	cmd->body.validContents = res->backup_dirty;
	res->backup_dirty = false;
	cmd->body.validContents = res->guest_memory_dirty;
	res->guest_memory_dirty = false;
	vmw_cmd_commit(dev_priv, sizeof(*cmd));


Loading