Commit b5bf1d8a authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2022-11-19' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "I guess the main question is are things settling down, and I'd say
  kinda, these are all pretty small fixes, nothing big stands out
  really, just seems to be quite a few of them.

  Mostly amdgpu and core fixes, with some i915, tegra, vc4, panel bits.

  core:
   - Fix potential memory leak in drm_dev_init()
   - Fix potential null-ptr-deref in drm_vblank_destroy_worker()
   - Revert hiding unregistered connectors from userspace, as it breaks
     on DP-MST
   - Add workaround for DP++ dual mode adaptors that don't support i2c
     subaddressing

  i915:
   - Fix uaf with lmem_userfault_list handling

  amdgpu:
   - gang submit fixes
   - Fix a possible memory leak in ganng submit error path
   - DP tunneling fixes
   - DCN 3.1 page flip fix
   - DCN 3.2.x fixes
   - DCN 3.1.4 fixes
   - Don't expose degamma on hardware that doesn't support it
   - BACO fixes for SMU 11.x
   - BACO fixes for SMU 13.x
   - Virtual display fix for devices with no display hardware

  amdkfd:
   - Memory limit regression fix

  tegra:
   - tegra20 GART fix

  vc4:
   - Fix error handling in vc4_atomic_commit_tail()

  lima:
   - Set lima's clkname corrrectly when regulator is missing

  panel:
   - Set bpc for logictechno panels"

* tag 'drm-fixes-2022-11-19' of git://anongit.freedesktop.org/drm/drm: (28 commits)
  gpu: host1x: Avoid trying to use GART on Tegra20
  drm/display: Don't assume dual mode adaptors support i2c sub-addressing
  drm/amd/pm: fix SMU13 runpm hang due to unintentional workaround
  drm/amd/pm: enable runpm support over BACO for SMU13.0.7
  drm/amd/pm: enable runpm support over BACO for SMU13.0.0
  drm/amdgpu: there is no vbios fb on devices with no display hw (v2)
  drm/amdkfd: Fix a memory limit issue
  drm/amdgpu: disable BACO support on more cards
  drm/amd/display: don't enable DRM CRTC degamma property for DCE
  drm/amd/display: Set max for prefetch lines on dcn32
  drm/amd/display: use uclk pstate latency for fw assisted mclk validation dcn32
  drm/amd/display: Fix prefetch calculations for dcn32
  drm/amd/display: Fix optc2_configure warning on dcn314
  drm/amd/display: Fix calculation for cursor CAB allocation
  Revert "drm: hide unregistered connectors from GETCONNECTOR IOCTL"
  drm/amd/display: Support parsing VRAM info v3.0 from VBIOS
  drm/amd/display: Fix invalid DPIA AUX reply causing system hang
  drm/amdgpu: Add psp_13_0_10_ta firmware to modinfo
  drm/amd/display: Add HUBP surface flip interrupt handler
  drm/amd/display: Fix access timeout to DPIA AUX at boot time
  ...
parents ab290ead b1010b93
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -1293,6 +1293,7 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
				u32 reg, u32 v);
struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
					    struct dma_fence *gang);
bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);

/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
+1 −3
Original line number Diff line number Diff line
@@ -171,9 +171,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
	    (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
	     kfd_mem_limit.max_ttm_mem_limit) ||
	    (adev && adev->kfd.vram_used + vram_needed >
	     adev->gmc.real_vram_size -
	     atomic64_read(&adev->vram_pin_size) -
	     reserved_for_pt)) {
	     adev->gmc.real_vram_size - reserved_for_pt)) {
		ret = -ENOMEM;
		goto release;
	}
+20 −9
Original line number Diff line number Diff line
@@ -109,6 +109,7 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
		return r;

	++(num_ibs[r]);
	p->gang_leader_idx = r;
	return 0;
}

@@ -287,8 +288,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
		}
	}

	if (!p->gang_size)
		return -EINVAL;
	if (!p->gang_size) {
		ret = -EINVAL;
		goto free_partial_kdata;
	}

	for (i = 0; i < p->gang_size; ++i) {
		ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
@@ -300,7 +303,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
		if (ret)
			goto free_all_kdata;
	}
	p->gang_leader = p->jobs[p->gang_size - 1];
	p->gang_leader = p->jobs[p->gang_leader_idx];

	if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) {
		ret = -ECANCELED;
@@ -1195,16 +1198,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
			return r;
	}

	for (i = 0; i < p->gang_size - 1; ++i) {
	for (i = 0; i < p->gang_size; ++i) {
		if (p->jobs[i] == leader)
			continue;

		r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
		if (r)
			return r;
	}

	r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]);
	r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
	if (r && r != -ERESTARTSYS)
		DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");

	return r;
}

@@ -1238,9 +1243,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
	for (i = 0; i < p->gang_size; ++i)
		drm_sched_job_arm(&p->jobs[i]->base);

	for (i = 0; i < (p->gang_size - 1); ++i) {
	for (i = 0; i < p->gang_size; ++i) {
		struct dma_fence *fence;

		if (p->jobs[i] == leader)
			continue;

		fence = &p->jobs[i]->base.s_fence->scheduled;
		r = amdgpu_sync_fence(&leader->sync, fence);
		if (r)
@@ -1276,7 +1284,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
	list_for_each_entry(e, &p->validated, tv.head) {

		/* Everybody except for the gang leader uses READ */
		for (i = 0; i < (p->gang_size - 1); ++i) {
		for (i = 0; i < p->gang_size; ++i) {
			if (p->jobs[i] == leader)
				continue;

			dma_resv_add_fence(e->tv.bo->base.resv,
					   &p->jobs[i]->base.s_fence->finished,
					   DMA_RESV_USAGE_READ);
@@ -1286,7 +1297,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
		e->tv.num_shared = 0;
	}

	seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1],
	seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
				   p->fence);
	amdgpu_cs_post_dependencies(p);

+1 −0
Original line number Diff line number Diff line
@@ -54,6 +54,7 @@ struct amdgpu_cs_parser {

	/* scheduler job objects */
	unsigned int		gang_size;
	unsigned int		gang_leader_idx;
	struct drm_sched_entity	*entities[AMDGPU_CS_GANG_SIZE];
	struct amdgpu_job	*jobs[AMDGPU_CS_GANG_SIZE];
	struct amdgpu_job	*gang_leader;
+41 −0
Original line number Diff line number Diff line
@@ -6044,3 +6044,44 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
	dma_fence_put(old);
	return NULL;
}

bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
{
	switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
	case CHIP_HAINAN:
#endif
	case CHIP_TOPAZ:
		/* chips with no display hardware */
		return false;
#ifdef CONFIG_DRM_AMDGPU_SI
	case CHIP_TAHITI:
	case CHIP_PITCAIRN:
	case CHIP_VERDE:
	case CHIP_OLAND:
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
	case CHIP_BONAIRE:
	case CHIP_HAWAII:
	case CHIP_KAVERI:
	case CHIP_KABINI:
	case CHIP_MULLINS:
#endif
	case CHIP_TONGA:
	case CHIP_FIJI:
	case CHIP_POLARIS10:
	case CHIP_POLARIS11:
	case CHIP_POLARIS12:
	case CHIP_VEGAM:
	case CHIP_CARRIZO:
	case CHIP_STONEY:
		/* chips with display hardware */
		return true;
	default:
		/* IP discovery */
		if (!adev->ip_versions[DCE_HWIP][0] ||
		    (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
			return false;
		return true;
	}
}
Loading