Commit e573a3cd authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'amd-drm-next-5.19-2022-05-26' of...

Merge tag 'amd-drm-next-5.19-2022-05-26' of https://gitlab.freedesktop.org/agd5f/linux

 into drm-next

amd-drm-next-5.19-2022-05-26:

amdgpu:
- Link training fixes
- DPIA fixes
- Misc code cleanups
- Aux fixes
- Hotplug fixes
- More FP clean up
- Misc GFX9/10 fixes
- Fix a possible memory leak in SMU shutdown
- SMU 13 updates
- RAS fixes
- TMZ fixes
- GC 11 updates
- SMU 11 metrics fixes
- Fix coverage blend mode for overlay plane
- Note DDR vs LPDDR memory
- Fuzz fix for CS IOCTL
- Add new PCI DID

amdkfd:
- Clean up hive setup
- Misc fixes

radeon:
- Fix a possible NULL pointer dereference

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220526200641.64097-1-alexander.deucher@amd.com
parents c4955d9c 62e9bd20
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -1621,7 +1621,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(

	mutex_lock(&mem->lock);

	/* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */
	/* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
	if (mem->alloc_flags &
	    (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
	     KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+6 −2
Original line number Diff line number Diff line
@@ -188,13 +188,17 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
			vram_type = AMDGPU_VRAM_TYPE_DDR3;
			break;
		case Ddr4MemType:
		case LpDdr4MemType:
			vram_type = AMDGPU_VRAM_TYPE_DDR4;
			break;
		case LpDdr4MemType:
			vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
			break;
		case Ddr5MemType:
		case LpDdr5MemType:
			vram_type = AMDGPU_VRAM_TYPE_DDR5;
			break;
		case LpDdr5MemType:
			vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
			break;
		default:
			vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
			break;
+2 −2
Original line number Diff line number Diff line
@@ -116,7 +116,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
	int ret;

	if (cs->in.num_chunks == 0)
		return 0;
		return -EINVAL;

	chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
	if (!chunk_array)
@@ -1252,7 +1252,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,

	p->fence = dma_fence_get(&job->base.s_fence->finished);

	amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
	seq = amdgpu_ctx_add_fence(p->ctx, entity, p->fence);
	amdgpu_cs_post_dependencies(p);

	if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+22 −24
Original line number Diff line number Diff line
@@ -135,9 +135,9 @@ static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_

static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
{
	struct amdgpu_device *adev = ctx->adev;
	int32_t ctx_prio;
	struct amdgpu_device *adev = ctx->mgr->adev;
	unsigned int hw_prio;
	int32_t ctx_prio;

	ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
			ctx->init_priority : ctx->override_priority;
@@ -166,7 +166,7 @@ static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
				  const u32 ring)
{
	struct amdgpu_device *adev = ctx->adev;
	struct amdgpu_device *adev = ctx->mgr->adev;
	struct amdgpu_ctx_entity *entity;
	struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
	unsigned num_scheds = 0;
@@ -220,10 +220,8 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
	return r;
}

static int amdgpu_ctx_init(struct amdgpu_device *adev,
			   int32_t priority,
			   struct drm_file *filp,
			   struct amdgpu_ctx *ctx)
static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
			   struct drm_file *filp, struct amdgpu_ctx *ctx)
{
	int r;

@@ -233,15 +231,14 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,

	memset(ctx, 0, sizeof(*ctx));

	ctx->adev = adev;

	kref_init(&ctx->refcount);
	ctx->mgr = mgr;
	spin_lock_init(&ctx->ring_lock);
	mutex_init(&ctx->lock);

	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
	ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
	ctx->reset_counter_query = ctx->reset_counter;
	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
	ctx->vram_lost_counter = atomic_read(&mgr->adev->vram_lost_counter);
	ctx->init_priority = priority;
	ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
	ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
@@ -266,7 +263,7 @@ static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
					u32 *stable_pstate)
{
	struct amdgpu_device *adev = ctx->adev;
	struct amdgpu_device *adev = ctx->mgr->adev;
	enum amd_dpm_forced_level current_level;

	current_level = amdgpu_dpm_get_performance_level(adev);
@@ -294,7 +291,7 @@ static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
					u32 stable_pstate)
{
	struct amdgpu_device *adev = ctx->adev;
	struct amdgpu_device *adev = ctx->mgr->adev;
	enum amd_dpm_forced_level level;
	u32 current_stable_pstate;
	int r;
@@ -345,7 +342,8 @@ static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
static void amdgpu_ctx_fini(struct kref *ref)
{
	struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
	struct amdgpu_device *adev = ctx->adev;
	struct amdgpu_ctx_mgr *mgr = ctx->mgr;
	struct amdgpu_device *adev = mgr->adev;
	unsigned i, j, idx;

	if (!adev)
@@ -421,7 +419,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
	}

	*id = (uint32_t)r;
	r = amdgpu_ctx_init(adev, priority, filp, ctx);
	r = amdgpu_ctx_init(mgr, priority, filp, ctx);
	if (r) {
		idr_remove(&mgr->ctx_handles, *id);
		*id = 0;
@@ -671,9 +669,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
	return 0;
}

void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
			      struct drm_sched_entity *entity,
			  struct dma_fence *fence, uint64_t *handle)
			      struct dma_fence *fence)
{
	struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
	uint64_t seq = centity->sequence;
@@ -682,8 +680,7 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,

	idx = seq & (amdgpu_sched_jobs - 1);
	other = centity->fences[idx];
	if (other)
		BUG_ON(!dma_fence_is_signaled(other));
	WARN_ON(other && !dma_fence_is_signaled(other));

	dma_fence_get(fence);

@@ -693,8 +690,7 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
	spin_unlock(&ctx->ring_lock);

	dma_fence_put(other);
	if (handle)
		*handle = seq;
	return seq;
}

struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
@@ -731,7 +727,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
					   int hw_ip,
					   int32_t priority)
{
	struct amdgpu_device *adev = ctx->adev;
	struct amdgpu_device *adev = ctx->mgr->adev;
	unsigned int hw_prio;
	struct drm_gpu_scheduler **scheds = NULL;
	unsigned num_scheds;
@@ -796,8 +792,10 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
	return r;
}

void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
			 struct amdgpu_device *adev)
{
	mgr->adev = adev;
	mutex_init(&mgr->lock);
	idr_init(&mgr->ctx_handles);
}
+6 −5
Original line number Diff line number Diff line
@@ -40,7 +40,7 @@ struct amdgpu_ctx_entity {

struct amdgpu_ctx {
	struct kref			refcount;
	struct amdgpu_device		*adev;
	struct amdgpu_ctx_mgr		*mgr;
	unsigned			reset_counter;
	unsigned			reset_counter_query;
	uint32_t			vram_lost_counter;
@@ -70,9 +70,9 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx);

int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
			  u32 ring, struct drm_sched_entity **entity);
void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
			      struct drm_sched_entity *entity,
			  struct dma_fence *fence, uint64_t *seq);
			      struct dma_fence *fence);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
				       struct drm_sched_entity *entity,
				       uint64_t seq);
@@ -85,7 +85,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
			       struct drm_sched_entity *entity);

void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
			 struct amdgpu_device *adev);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
Loading