Commit 2cd80dbd authored by Zack Rusin's avatar Zack Rusin
Browse files

drm/vmwgfx: Add basic support for SVGA3



SVGA3 is the next version of our PCI device. Some of the changes
include using MMIO for register accesses instead of ioports,
deprecating the FIFO MMIO and removing a lot of the old and
legacy functionality. SVGA3 doesn't support guest backed
objects right now so everything except 3D is working.

v2: Fixes all the static analyzer warnings

Signed-off-by: default avatarZack Rusin <zackr@vmware.com>
Cc: Martin Krastev <krastevm@vmware.com>
Reviewed-by: default avatarRoland Scheidegger <sroland@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210505191007.305872-1-zackr@vmware.com
parent 8211783f
Loading
Loading
Loading
Loading
+48 −7
Original line number Diff line number Diff line
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
 * Copyright 1998-2015 VMware, Inc.
 * Copyright 1998-2021 VMware, Inc.
 *
 * Permission is hereby granted, free of charge, to any person
 * obtaining a copy of this software and associated documentation
@@ -98,6 +98,10 @@ typedef uint32 SVGAMobId;
#define SVGA_MAGIC         0x900000UL
#define SVGA_MAKE_ID(ver)  (SVGA_MAGIC << 8 | (ver))

/* Version 3 has the control bar instead of the FIFO */
#define SVGA_VERSION_3     3
#define SVGA_ID_3          SVGA_MAKE_ID(SVGA_VERSION_3)

/* Version 2 let the address of the frame buffer be unsigned on Win32 */
#define SVGA_VERSION_2     2
#define SVGA_ID_2          SVGA_MAKE_ID(SVGA_VERSION_2)
@@ -129,11 +133,12 @@ typedef uint32 SVGAMobId;
 * Interrupts are only supported when the
 * SVGA_CAP_IRQMASK capability is present.
 */
#define SVGA_IRQFLAG_ANY_FENCE            0x1    /* Any fence was passed */
#define SVGA_IRQFLAG_FIFO_PROGRESS        0x2    /* Made forward progress in the FIFO */
#define SVGA_IRQFLAG_FENCE_GOAL           0x4    /* SVGA_FIFO_FENCE_GOAL reached */
#define SVGA_IRQFLAG_COMMAND_BUFFER       0x8    /* Command buffer completed */
#define SVGA_IRQFLAG_ERROR                0x10   /* Error while processing commands */
#define SVGA_IRQFLAG_ANY_FENCE            (1 << 0) /* Any fence was passed */
#define SVGA_IRQFLAG_FIFO_PROGRESS        (1 << 1) /* Made forward progress in the FIFO */
#define SVGA_IRQFLAG_FENCE_GOAL           (1 << 2) /* SVGA_FIFO_FENCE_GOAL reached */
#define SVGA_IRQFLAG_COMMAND_BUFFER       (1 << 3) /* Command buffer completed */
#define SVGA_IRQFLAG_ERROR                (1 << 4) /* Error while processing commands */
#define SVGA_IRQFLAG_MAX                  (1 << 5)

/*
 * The byte-size is the size of the actual cursor data,
@@ -286,7 +291,32 @@ enum {
    */
   SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76,

   SVGA_REG_TOP = 77,               /* Must be 1 more than the last register */
   /*
    +    * These registers are for the addresses of the memory BARs for SVGA3
    */
   SVGA_REG_REGS_START_HIGH32 = 77,
   SVGA_REG_REGS_START_LOW32 = 78,
   SVGA_REG_FB_START_HIGH32 = 79,
   SVGA_REG_FB_START_LOW32 = 80,

   /*
    * A hint register that recommends which quality level the guest should
    * currently use to define multisample surfaces.
    *
    * If the register is SVGA_REG_MSHINT_DISABLED,
    * the guest is only allowed to use SVGA3D_MS_QUALITY_FULL.
    *
    * Otherwise, this is a live value that can change while the VM is
    * powered on with the hint suggestion for which quality level the guest
    * should be using.  Guests are free to ignore the hint and use either
    * RESOLVE or FULL quality.
    */
   SVGA_REG_MSHINT = 81,

   SVGA_REG_IRQ_STATUS = 82,
   SVGA_REG_DIRTY_TRACKING = 83,

   SVGA_REG_TOP = 84,               /* Must be 1 more than the last register */

   SVGA_PALETTE_BASE = 1024,        /* Base of SVGA color map */
   /* Next 768 (== 256*3) registers exist for colormap */
@@ -310,6 +340,17 @@ typedef enum SVGARegGuestDriverId {
   SVGA_REG_GUEST_DRIVER_ID_SUBMIT  = MAX_UINT32,
} SVGARegGuestDriverId;

typedef enum SVGARegMSHint {
   SVGA_REG_MSHINT_DISABLED = 0,
   SVGA_REG_MSHINT_FULL     = 1,
   SVGA_REG_MSHINT_RESOLVED = 2,
} SVGARegMSHint;

typedef enum SVGARegDirtyTracking {
   SVGA_REG_DIRTY_TRACKING_PER_IMAGE = 0,
   SVGA_REG_DIRTY_TRACKING_PER_SURFACE = 1,
} SVGARegDirtyTracking;


/*
 * Guest memory regions (GMRs):
+4 −4
Original line number Diff line number Diff line
@@ -788,7 +788,7 @@ static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
}

/**
 * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
 * vmw_emit_set_sr - Issue delayed DX shader resource binding commands
 *
 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 * @shader_slot: The shader slot of the binding.
@@ -832,7 +832,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
}

/**
 * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
 * vmw_emit_set_rt - Issue delayed DX rendertarget binding commands
 *
 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 */
@@ -1024,7 +1024,7 @@ static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
}

/**
 * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
 * vmw_emit_set_vb - Issue delayed vertex buffer binding commands
 *
 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 *
@@ -1394,7 +1394,7 @@ struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
}

/**
 * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
 * vmw_binding_state_reset - clear a struct vmw_ctx_binding_state
 *
 * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
 *
+1 −1
Original line number Diff line number Diff line
@@ -421,7 +421,7 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
}

/**
 * ttm_bo_cpu_blit - in-kernel cpu blit.
 * vmw_bo_cpu_blit - in-kernel cpu blit.
 *
 * @dst: Destination buffer object.
 * @dst_offset: Destination offset of blit start in bytes.
+1 −1
Original line number Diff line number Diff line
@@ -600,7 +600,7 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)


/**
 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
 * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
 * for vmw user buffer objects
 *
 * @base: Pointer to the TTM base object
+55 −63
Original line number Diff line number Diff line
@@ -31,15 +31,10 @@

#include "vmwgfx_drv.h"

struct vmw_temp_set_context {
	SVGA3dCmdHeader header;
	SVGA3dCmdDXTempSetContext body;
};

bool vmw_supports_3d(struct vmw_private *dev_priv)
{
	uint32_t fifo_min, hwversion;
	const struct vmw_fifo_state *fifo = &dev_priv->fifo;
	const struct vmw_fifo_state *fifo = dev_priv->fifo;

	if (!(dev_priv->capabilities & SVGA_CAP_3D))
		return false;
@@ -61,6 +56,8 @@ bool vmw_supports_3d(struct vmw_private *dev_priv)
	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
		return false;

	BUG_ON(vmw_is_svga_v3(dev_priv));

	fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
		return false;
@@ -98,16 +95,20 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
	return false;
}

int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
{
	struct vmw_fifo_state *fifo;
	uint32_t max;
	uint32_t min;

	fifo->dx = false;
	if (!dev_priv->fifo_mem)
		return NULL;

	fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
	if (unlikely(fifo->static_buffer == NULL))
		return -ENOMEM;
		return ERR_PTR(-ENOMEM);

	fifo->dynamic_buffer = NULL;
	fifo->reserved_size = 0;
@@ -115,20 +116,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)

	mutex_init(&fifo->fifo_mutex);
	init_rwsem(&fifo->rwsem);

	DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
	DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
	DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));

	dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
	dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
	dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);

	vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
		  SVGA_REG_ENABLE_HIDE);

	vmw_write(dev_priv, SVGA_REG_TRACES, 0);

	min = 4;
	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
@@ -155,35 +142,23 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
		 (unsigned int) max,
		 (unsigned int) min,
		 (unsigned int) fifo->capabilities);

	atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno);

	return 0;
	return fifo;
}

void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
	u32 *fifo_mem = dev_priv->fifo_mem;

	if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
	if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
		vmw_write(dev_priv, SVGA_REG_SYNC, reason);

}

void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
void vmw_fifo_destroy(struct vmw_private *dev_priv)
{
	vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
	while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
		;

	dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
	struct vmw_fifo_state *fifo = dev_priv->fifo;

	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
		  dev_priv->config_done_state);
	vmw_write(dev_priv, SVGA_REG_ENABLE,
		  dev_priv->enable_state);
	vmw_write(dev_priv, SVGA_REG_TRACES,
		  dev_priv->traces_state);
	if (!fifo)
		return;

	if (likely(fifo->static_buffer != NULL)) {
		vfree(fifo->static_buffer);
@@ -194,6 +169,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
		vfree(fifo->dynamic_buffer);
		fifo->dynamic_buffer = NULL;
	}
	kfree(fifo);
	dev_priv->fifo = NULL;
}

static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
@@ -289,7 +266,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
				    uint32_t bytes)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
	u32  *fifo_mem = dev_priv->fifo_mem;
	uint32_t max;
	uint32_t min;
@@ -438,16 +415,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,

static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;

	if (fifo_state->dx)
		bytes += sizeof(struct vmw_temp_set_context);

	fifo_state->dx = false;
	BUG_ON((bytes & 3) != 0);
	BUG_ON(bytes > fifo_state->reserved_size);

@@ -495,7 +468,7 @@ void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)


/**
 * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
 * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
 *
 * @dev_priv: Pointer to device private structure.
 * @bytes: Number of bytes to commit.
@@ -509,7 +482,7 @@ void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
}

/**
 * vmw_fifo_flush - Flush any buffered commands and make sure command processing
 * vmw_cmd_flush - Flush any buffered commands and make sure command processing
 * starts.
 *
 * @dev_priv: Pointer to device private structure.
@@ -527,7 +500,6 @@ int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)

int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
	struct svga_fifo_cmd_fence *cmd_fence;
	u32 *fm;
	int ret = 0;
@@ -546,7 +518,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
	} while (*seqno == 0);

	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
	if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {

		/*
		 * Don't request hardware to send a fence. The
@@ -561,22 +533,22 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
	cmd_fence->fence = *seqno;
	vmw_cmd_commit_flush(dev_priv, bytes);
	vmw_update_seqno(dev_priv, fifo_state);
	vmw_update_seqno(dev_priv);

out_err:
	return ret;
}

/**
 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
 * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
 * legacy query commands.
 *
 * @dev_priv: The device private structure.
 * @cid: The hardware context id used for the query.
 *
 * See the vmw_fifo_emit_dummy_query documentation.
 * See the vmw_cmd_emit_dummy_query documentation.
 */
static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
					    uint32_t cid)
{
	/*
@@ -614,15 +586,15 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
}

/**
 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
 * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
 * guest-backed resource query commands.
 *
 * @dev_priv: The device private structure.
 * @cid: The hardware context id used for the query.
 *
 * See the vmw_fifo_emit_dummy_query documentation.
 * See the vmw_cmd_emit_dummy_query documentation.
 */
static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
				       uint32_t cid)
{
	/*
@@ -656,7 +628,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,


/**
 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
 * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
 * appropriate resource query commands.
 *
 * @dev_priv: The device private structure.
@@ -677,7 +649,27 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
			      uint32_t cid)
{
	if (dev_priv->has_mob)
		return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
		return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);

	return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
	return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
}


/**
 * vmw_cmd_supported - returns true if the given device supports
 * command queues.
 *
 * @vmw: The device private structure.
 *
 * Returns true if we can issue commands.
 */
bool vmw_cmd_supported(struct vmw_private *vmw)
{
	if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
				  SVGA_CAP_CMD_BUFFERS_2)) != 0)
		return true;
	/*
	 * We have FIFO cmd's
	 */
	return vmw->fifo_mem != NULL;
}
Loading