Commit 6f5a5e86 authored by Dave Airlie's avatar Dave Airlie
Browse files

Merge tag 'amd-drm-fixes-6.4-2023-04-26' of...

Merge tag 'amd-drm-fixes-6.4-2023-04-26' of https://gitlab.freedesktop.org/agd5f/linux

 into drm-next

amd-drm-fixes-6.4-2023-04-26:

amdgpu:
- SR-IOV fixes
- DCN 3.2 fixes
- DC mclk handling fixes
- eDP fixes
- SubVP fixes
- HDCP regression fix
- DSC fixes
- DC FP fixes
- DCN 3.x fixes
- Display flickering fix when switching between vram and gtt
- Z8 power saving fix
- Fix hang when skipping modeset

Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230427033012.7668-1-alexander.deucher@amd.com
parents cf03e295 d893f393
Loading
Loading
Loading
Loading
+17 −15
Original line number Diff line number Diff line
@@ -2539,8 +2539,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
	amdgpu_fru_get_product_info(adev);

init_failed:
	if (amdgpu_sriov_vf(adev))
		amdgpu_virt_release_full_gpu(adev, true);

	return r;
}
@@ -3859,18 +3857,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,

	r = amdgpu_device_ip_init(adev);
	if (r) {
		/* failed in exclusive mode due to timeout */
		if (amdgpu_sriov_vf(adev) &&
		    !amdgpu_sriov_runtime(adev) &&
		    amdgpu_virt_mmio_blocked(adev) &&
		    !amdgpu_virt_wait_reset(adev)) {
			dev_err(adev->dev, "VF exclusive mode timeout\n");
			/* Don't send request since VF is inactive. */
			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
			adev->virt.ops = NULL;
			r = -EAGAIN;
			goto release_ras_con;
		}
		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
		goto release_ras_con;
@@ -3939,8 +3925,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
				   msecs_to_jiffies(AMDGPU_RESUME_MS));
	}

	if (amdgpu_sriov_vf(adev))
	if (amdgpu_sriov_vf(adev)) {
		amdgpu_virt_release_full_gpu(adev, true);
		flush_delayed_work(&adev->delayed_init_work);
	}

	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
	if (r)
@@ -3980,6 +3968,20 @@ int amdgpu_device_init(struct amdgpu_device *adev,
	return 0;

release_ras_con:
	if (amdgpu_sriov_vf(adev))
		amdgpu_virt_release_full_gpu(adev, true);

	/* failed in exclusive mode due to timeout */
	if (amdgpu_sriov_vf(adev) &&
		!amdgpu_sriov_runtime(adev) &&
		amdgpu_virt_mmio_blocked(adev) &&
		!amdgpu_virt_wait_reset(adev)) {
		dev_err(adev->dev, "VF exclusive mode timeout\n");
		/* Don't send request since VF is inactive. */
		adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
		adev->virt.ops = NULL;
		r = -EAGAIN;
	}
	amdgpu_release_ras_context(adev);

failed:
+1 −1
Original line number Diff line number Diff line
@@ -430,7 +430,7 @@ static int jpeg_v4_0_start_sriov(struct amdgpu_device *adev)
		MMSCH_COMMAND__END;

	header.version = MMSCH_VERSION;
	header.total_size = sizeof(struct mmsch_v4_0_init_header) >> 2;
	header.total_size = RREG32_SOC15(VCN, 0, regMMSCH_VF_CTX_SIZE);

	header.jpegdec.init_status = 0;
	header.jpegdec.table_offset = 0;
+28 −6
Original line number Diff line number Diff line
@@ -3128,9 +3128,12 @@ void amdgpu_dm_update_connector_after_detect(
						    aconnector->edid);
		}

		aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
		if (!aconnector->timing_requested) {
			aconnector->timing_requested =
				kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
			if (!aconnector->timing_requested)
			dm_error("%s: failed to create aconnector->requested_timing\n", __func__);
				dm_error("failed to create aconnector->requested_timing\n");
		}

		drm_connector_update_edid_property(connector, aconnector->edid);
		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
@@ -7898,6 +7901,13 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
			amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
}

static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
{
	struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);

	return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
}

static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
				    struct dc_state *dc_state,
				    struct drm_device *dev,
@@ -7972,6 +7982,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
			continue;

		dc_plane = dm_new_plane_state->dc_state;
		if (!dc_plane)
			continue;

		bundle->surface_updates[planes_count].surface = dc_plane;
		if (new_pcrtc_state->color_mgmt_changed) {
@@ -8038,11 +8050,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,

		/*
		 * Only allow immediate flips for fast updates that don't
		 * change FB pitch, DCC state, rotation or mirroing.
		 * change memory domain, FB pitch, DCC state, rotation or
		 * mirroring.
		 */
		bundle->flip_addrs[planes_count].flip_immediate =
			crtc->state->async_flip &&
			acrtc_state->update_type == UPDATE_TYPE_FAST;
			acrtc_state->update_type == UPDATE_TYPE_FAST &&
			get_mem_type(old_plane_state->fb) == get_mem_type(fb);

		timestamp_ns = ktime_get_ns();
		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
@@ -8554,6 +8568,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);

		if (!adev->dm.hdcp_workqueue)
			continue;

		pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);

		if (!connector)
@@ -8602,6 +8619,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);

		if (!adev->dm.hdcp_workqueue)
			continue;

		new_crtc_state = NULL;
		old_crtc_state = NULL;

@@ -9620,8 +9640,9 @@ static int dm_update_plane_state(struct dc *dc,
			return -EINVAL;
		}


		if (dm_old_plane_state->dc_state)
			dc_plane_state_release(dm_old_plane_state->dc_state);

		dm_new_plane_state->dc_state = NULL;

		*lock_and_validation_needed = true;
@@ -10158,6 +10179,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
		ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
		if (ret) {
			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
			ret = -EINVAL;
			goto fail;
		}

+0 −1
Original line number Diff line number Diff line
@@ -687,7 +687,6 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
		return;

	data[0] |= (1 << 1); // set bit 1 to 1
		return;

	if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data))
		return;
+11 −6
Original line number Diff line number Diff line
@@ -379,14 +379,18 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
		if (aconnector->dc_sink && connector->state) {
			struct drm_device *dev = connector->dev;
			struct amdgpu_device *adev = drm_to_adev(dev);

			if (adev->dm.hdcp_workqueue) {
				struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
			struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index];
				struct hdcp_workqueue *hdcp_w =
					&hdcp_work[aconnector->dc_link->link_index];

				connector->state->hdcp_content_type =
				hdcp_w->hdcp_content_type[connector->index];
				connector->state->content_protection =
				hdcp_w->content_protection[connector->index];
			}
		}

		if (aconnector->dc_sink) {
			amdgpu_dm_update_freesync_caps(
@@ -1406,6 +1410,7 @@ int pre_validate_dsc(struct drm_atomic_state *state,
	ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
	if (ret != 0) {
		DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
		ret = -EINVAL;
		goto clean_exit;
	}

Loading